diff --git a/techpack/video/Makefile b/techpack/video/Makefile new file mode 100644 index 000000000000..6216c2266d9b --- /dev/null +++ b/techpack/video/Makefile @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# auto-detect subdirs +ifneq ($(CONFIG_ARCH_QTI_VM), y) +ifeq ($(CONFIG_ARCH_LAHAINA), y) +include $(srctree)/techpack/video/config/konavid.conf +LINUXINCLUDE += -include $(srctree)/techpack/video/config/konavidconf.h +endif + +# auto-detect subdirs +ifeq ($(CONFIG_ARCH_HOLI), y) +include $(srctree)/techpack/video/config/holivid.conf +endif + +ifeq ($(CONFIG_ARCH_HOLI), y) +LINUXINCLUDE += -include $(srctree)/techpack/video/config/holividconf.h +endif + +# auto-detect subdirs +ifeq ($(CONFIG_ARCH_LITO), y) +include $(srctree)/techpack/video/config/litovid.conf +endif + +ifeq ($(CONFIG_ARCH_LITO), y) +LINUXINCLUDE += -include $(srctree)/techpack/video/config/litovidconf.h +endif +endif + +# auto-detect subdirs +ifeq ($(CONFIG_ARCH_SCUBA), y) +include $(srctree)/techpack/video/config/scubavid.conf +endif + +ifeq ($(CONFIG_ARCH_SCUBA), y) +LINUXINCLUDE += -include $(srctree)/techpack/video/config/scubavidconf.h +endif + +LINUXINCLUDE += -I$(srctree)/techpack/video/include \ + -I$(srctree)/techpack/video/include/uapi + +USERINCLUDE += -I$(srctree)/techpack/video/include/uapi + +obj-y +=msm/ diff --git a/techpack/video/config/bengalvid.conf b/techpack/video/config/bengalvid.conf new file mode 100644 index 000000000000..efb4eedfb73e --- /dev/null +++ b/techpack/video/config/bengalvid.conf @@ -0,0 +1 @@ +export CONFIG_MSM_VIDC_V4L2=y diff --git a/techpack/video/config/bengalvidconf.h b/techpack/video/config/bengalvidconf.h new file mode 100644 index 000000000000..78d6c57a6920 --- /dev/null +++ b/techpack/video/config/bengalvidconf.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_MSM_VIDC_V4L2 1 diff --git a/techpack/video/config/holivid.conf b/techpack/video/config/holivid.conf new file mode 100644 index 000000000000..65eae99c590b --- /dev/null +++ b/techpack/video/config/holivid.conf @@ -0,0 +1,5 @@ +ifeq ($(CONFIG_QGKI),y) +export CONFIG_MSM_VIDC_V4L2=y +else +export CONFIG_MSM_VIDC_V4L2=m +endif diff --git a/techpack/video/config/holividconf.h b/techpack/video/config/holividconf.h new file mode 100644 index 000000000000..594a99490792 --- /dev/null +++ b/techpack/video/config/holividconf.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_MSM_VIDC_V4L2 1 diff --git a/techpack/video/config/konavid.conf b/techpack/video/config/konavid.conf new file mode 100644 index 000000000000..65eae99c590b --- /dev/null +++ b/techpack/video/config/konavid.conf @@ -0,0 +1,5 @@ +ifeq ($(CONFIG_QGKI),y) +export CONFIG_MSM_VIDC_V4L2=y +else +export CONFIG_MSM_VIDC_V4L2=m +endif diff --git a/techpack/video/config/konavidconf.h b/techpack/video/config/konavidconf.h new file mode 100644 index 000000000000..78d6c57a6920 --- /dev/null +++ b/techpack/video/config/konavidconf.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_MSM_VIDC_V4L2 1 diff --git a/techpack/video/config/litovid.conf b/techpack/video/config/litovid.conf new file mode 100644 index 000000000000..efb4eedfb73e --- /dev/null +++ b/techpack/video/config/litovid.conf @@ -0,0 +1 @@ +export CONFIG_MSM_VIDC_V4L2=y diff --git a/techpack/video/config/litovidconf.h b/techpack/video/config/litovidconf.h new file mode 100644 index 000000000000..78d6c57a6920 --- /dev/null +++ b/techpack/video/config/litovidconf.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_MSM_VIDC_V4L2 1 diff --git a/techpack/video/config/scubavid.conf b/techpack/video/config/scubavid.conf new file mode 100644 index 000000000000..65eae99c590b --- /dev/null +++ b/techpack/video/config/scubavid.conf @@ -0,0 +1,5 @@ +ifeq ($(CONFIG_QGKI),y) +export CONFIG_MSM_VIDC_V4L2=y +else +export CONFIG_MSM_VIDC_V4L2=m +endif diff --git a/techpack/video/config/scubavidconf.h b/techpack/video/config/scubavidconf.h new file mode 100644 index 000000000000..a31a91c7390e --- /dev/null +++ b/techpack/video/config/scubavidconf.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_MSM_VIDC_V4L2 1 diff --git a/techpack/video/include/uapi/vidc/media/msm_media_info.h b/techpack/video/include/uapi/vidc/media/msm_media_info.h new file mode 100644 index 000000000000..f9487c93ebeb --- /dev/null +++ b/techpack/video/include/uapi/vidc/media/msm_media_info.h @@ -0,0 +1,1395 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __MSM_MEDIA_INFO_H__ +#define __MSM_MEDIA_INFO_H__ + +/* Width and Height should be multiple of 16 */ +#define INTERLACE_WIDTH_MAX 1920 +#define INTERLACE_HEIGHT_MAX 1920 +#define INTERLACE_MB_PER_FRAME_MAX ((1920*1088)/256) + +#ifndef MSM_MEDIA_ALIGN +#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\ + ((((__sz) + (__align) - 1) / (__align)) * (__align)) :\ + (((__sz) + (__align) - 1) & (~((__align) - 1)))) +#endif + +#ifndef MSM_MEDIA_ROUNDUP +#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r)) +#endif + +enum color_fmts { + /* Venus NV12: + * YUV 4:2:0 image with a plane of 8 bit Y samples followed + * by an interleaved U/V plane containing 8 bit 2x2 subsampled + * colour difference samples. + * + * <-------- Y/UV_Stride --------> + * <------- Width -------> + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * U V U V U V U V U V U V . . . . ^ + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . --> Buffer size alignment + * + * Y_Stride : Width aligned to 512 + * UV_Stride : Width aligned to 512 + * Y_Scanlines: Height aligned to 512 + * UV_Scanlines: Height/2 aligned to 256 + * Total size = align(Y_Stride * Y_Scanlines + * + UV_Stride * UV_Scanlines, 4096) + */ + COLOR_FMT_NV12, + /* Venus NV12: + * YUV 4:2:0 image with a plane of 8 bit Y samples followed + * by an interleaved U/V plane containing 8 bit 2x2 subsampled + * colour difference samples. + * + * <-------- Y/UV_Stride --------> + * <------- Width -------> + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * U V U V U V U V U V U V . . . . ^ + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . --> Buffer size alignment + * + * Y_Stride : Width aligned to 128 + * UV_Stride : Width aligned to 128 + * Y_Scanlines: Height aligned to 32 + * UV_Scanlines: Height/2 aligned to 16 + * Total size = align(Y_Stride * Y_Scanlines + * + UV_Stride * UV_Scanlines, 4096) + */ + COLOR_FMT_NV12_128, + /* Venus NV21: + * YUV 4:2:0 image with a plane of 8 bit Y samples followed + * by an interleaved V/U plane containing 8 bit 2x2 subsampled + * colour difference samples. + * + * <-------- Y/UV_Stride --------> + * <------- Width -------> + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * V U V U V U V U V U V U . . . . ^ + * V U V U V U V U V U V U . . . . | + * V U V U V U V U V U V U . . . . | + * V U V U V U V U V U V U . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . --> Padding & Buffer size alignment + * + * Y_Stride : Width aligned to 512 + * UV_Stride : Width aligned to 512 + * Y_Scanlines: Height aligned to 512 + * UV_Scanlines: Height/2 aligned to 256 + * Total size = align(Y_Stride * Y_Scanlines + * + UV_Stride * UV_Scanlines, 4096) + */ + COLOR_FMT_NV21, + /* + * The buffer can be of 2 types: + * (1) Venus NV12 UBWC Progressive + * (2) Venus NV12 UBWC Interlaced + * + * (1) Venus NV12 UBWC Progressive Buffer Format: + * Compressed Macro-tile format for NV12. + * Contains 4 planes in the following order - + * (A) Y_Meta_Plane + * (B) Y_UBWC_Plane + * (C) UV_Meta_Plane + * (D) UV_UBWC_Plane + * + * Y_Meta_Plane consists of meta information to decode compressed + * tile data in Y_UBWC_Plane. + * Y_UBWC_Plane consists of Y data in compressed macro-tile format. + * UBWC decoder block will use the Y_Meta_Plane data together with + * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples. + * + * UV_Meta_Plane consists of meta information to decode compressed + * tile data in UV_UBWC_Plane. + * UV_UBWC_Plane consists of UV data in compressed macro-tile format. + * UBWC decoder block will use UV_Meta_Plane data together with + * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2 + * subsampled color difference samples. + * + * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable + * and randomly accessible. There is no dependency between tiles. + * + * <----- Y_Meta_Stride ----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_Y_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <--Compressed tile Y Stride---> + * <------- Width -------> + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * <----- UV_Meta_Stride ----> + * M M M M M M M M M M M M . . ^ + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . M_UV_Scanlines + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <--Compressed tile UV Stride---> + * U* V* U* V* U* V* U* V* . . . . ^ + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * + * Y_Stride = align(Width, 128) + * UV_Stride = align(Width, 128) + * Y_Scanlines = align(Height, 32) + * UV_Scanlines = align(Height/2, 32) + * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096) + * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096) + * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) + * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16) + * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096) + * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) + * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16) + * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096) + * + * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size + + * Y_Meta_Plane_size + UV_Meta_Plane_size, 4096) + * + * + * (2) Venus NV12 UBWC Interlaced Buffer Format: + * Compressed Macro-tile format for NV12 interlaced. + * Contains 8 planes in the following order - + * (A) Y_Meta_Top_Field_Plane + * (B) Y_UBWC_Top_Field_Plane + * (C) UV_Meta_Top_Field_Plane + * (D) UV_UBWC_Top_Field_Plane + * (E) Y_Meta_Bottom_Field_Plane + * (F) Y_UBWC_Bottom_Field_Plane + * (G) UV_Meta_Bottom_Field_Plane + * (H) UV_UBWC_Bottom_Field_Plane + * Y_Meta_Top_Field_Plane consists of meta information to decode + * compressed tile data for Y_UBWC_Top_Field_Plane. + * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile + * format for top field of an interlaced frame. + * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together + * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed + * 8 bit Y samples for top field of an interlaced frame. + * + * UV_Meta_Top_Field_Plane consists of meta information to decode + * compressed tile data in UV_UBWC_Top_Field_Plane. + * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile + * format for top field of an interlaced frame. + * UBWC decoder block will use UV_Meta_Top_Field_Plane data together + * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed + * 8 bit subsampled color difference samples for top field of an + * interlaced frame. + * + * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is + * independently decodable and randomly accessible. There is no + * dependency between tiles. + * + * Y_Meta_Bottom_Field_Plane consists of meta information to decode + * compressed tile data for Y_UBWC_Bottom_Field_Plane. + * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile + * format for bottom field of an interlaced frame. + * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data + * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less + * uncompressed 8 bit Y samples for bottom field of an interlaced frame. + * + * UV_Meta_Bottom_Field_Plane consists of meta information to decode + * compressed tile data in UV_UBWC_Bottom_Field_Plane. + * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed + * macro-tile format for bottom field of an interlaced frame. + * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together + * with UV_UBWC_Bottom_Field_Plane data to produce loss-less + * uncompressed 8 bit subsampled color difference samples for bottom + * field of an interlaced frame. + * + * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is + * independently decodable and randomly accessible. There is no + * dependency between tiles. + * + * <-----Y_TF_Meta_Stride----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Half_height | + * M M M M M M M M M M M M . . | Meta_Y_TF_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-Compressed tile Y_TF Stride-> + * <------- Width -------> + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_TF_Scanlines + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * <----UV_TF_Meta_Stride----> + * M M M M M M M M M M M M . . ^ + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . M_UV_TF_Scanlines + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <-Compressed tile UV_TF Stride-> + * U* V* U* V* U* V* U* V* . . . . ^ + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . UV_TF_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <-----Y_BF_Meta_Stride----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Half_height | + * M M M M M M M M M M M M . . | Meta_Y_BF_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-Compressed tile Y_BF Stride-> + * <------- Width -------> + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_BF_Scanlines + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * <----UV_BF_Meta_Stride----> + * M M M M M M M M M M M M . . ^ + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . M_UV_BF_Scanlines + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <-Compressed tile UV_BF Stride-> + * U* V* U* V* U* V* U* V* . . . . ^ + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . UV_BF_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * + * Half_height = (Height+1)>>1 + * Y_TF_Stride = align(Width, 128) + * UV_TF_Stride = align(Width, 128) + * Y_TF_Scanlines = align(Half_height, 32) + * UV_TF_Scanlines = align((Half_height+1)/2, 32) + * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096) + * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096) + * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) + * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16) + * Y_TF_Meta_Plane_size = + * align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096) + * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) + * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16) + * UV_TF_Meta_Plane_size = + * align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096) + * Y_BF_Stride = align(Width, 128) + * UV_BF_Stride = align(Width, 128) + * Y_BF_Scanlines = align(Half_height, 32) + * UV_BF_Scanlines = align((Half_height+1)/2, 32) + * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096) + * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096) + * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) + * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16) + * Y_BF_Meta_Plane_size = + * align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096) + * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) + * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16) + * UV_BF_Meta_Plane_size = + * align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096) + * + * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size + + * Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size + + * Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size + + * Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size +, 4096) + */ + COLOR_FMT_NV12_UBWC, + /* Venus NV12 10-bit UBWC: + * Compressed Macro-tile format for NV12. + * Contains 4 planes in the following order - + * (A) Y_Meta_Plane + * (B) Y_UBWC_Plane + * (C) UV_Meta_Plane + * (D) UV_UBWC_Plane + * + * Y_Meta_Plane consists of meta information to decode compressed + * tile data in Y_UBWC_Plane. + * Y_UBWC_Plane consists of Y data in compressed macro-tile format. + * UBWC decoder block will use the Y_Meta_Plane data together with + * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples. + * + * UV_Meta_Plane consists of meta information to decode compressed + * tile data in UV_UBWC_Plane. + * UV_UBWC_Plane consists of UV data in compressed macro-tile format. + * UBWC decoder block will use UV_Meta_Plane data together with + * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2 + * subsampled color difference samples. + * + * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable + * and randomly accessible. There is no dependency between tiles. + * + * <----- Y_Meta_Stride -----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_Y_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <--Compressed tile Y Stride---> + * <------- Width -------> + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * <----- UV_Meta_Stride ----> + * M M M M M M M M M M M M . . ^ + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . M_UV_Scanlines + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <--Compressed tile UV Stride---> + * U* V* U* V* U* V* U* V* . . . . ^ + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * + * + * Y_Stride = align(Width * 4/3, 256) + * UV_Stride = align(Width * 4/3, 256) + * Y_Scanlines = align(Height, 32) + * UV_Scanlines = align(Height/2, 16) + * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096) + * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096) + * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) + * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16) + * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096) + * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) + * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16) + * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096) + * + * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size + + * Y_Meta_Plane_size + UV_Meta_Plane_size, 4096) + */ + COLOR_FMT_NV12_BPP10_UBWC, + /* Venus RGBA8888 format: + * Contains 1 plane in the following order - + * (A) RGBA plane + * + * <-------- RGB_Stride --------> + * <------- Width -------> + * R R R R R R R R R R R R . . . . ^ ^ + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . Height | + * R R R R R R R R R R R R . . . . | RGB_Scanlines + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * + * RGB_Stride = align(Width * 4, 128) + * RGB_Scanlines = align(Height, 32) + * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) + * + * Total size = align(RGB_Plane_size , 4096) + */ + COLOR_FMT_RGBA8888, + /* Venus RGBA8888 UBWC format: + * Contains 2 planes in the following order - + * (A) Meta plane + * (B) RGBA plane + * + * <--- RGB_Meta_Stride ----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_RGB_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-------- RGB_Stride --------> + * <------- Width -------> + * R R R R R R R R R R R R . . . . ^ ^ + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . Height | + * R R R R R R R R R R R R . . . . | RGB_Scanlines + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * + * RGB_Stride = align(Width * 4, 256) + * RGB_Scanlines = align(Height, 16) + * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) + * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64) + * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16) + * RGB_Meta_Plane_size = align(RGB_Meta_Stride * + * RGB_Meta_Scanlines, 4096) + * + * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size, 4096) + */ + COLOR_FMT_RGBA8888_UBWC, + /* Venus RGBA1010102 UBWC format: + * Contains 2 planes in the following order - + * (A) Meta plane + * (B) RGBA plane + * + * <--- RGB_Meta_Stride ----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_RGB_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-------- RGB_Stride --------> + * <------- Width -------> + * R R R R R R R R R R R R . . . . ^ ^ + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . Height | + * R R R R R R R R R R R R . . . . | RGB_Scanlines + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * + * RGB_Stride = align(Width * 4, 256) + * RGB_Scanlines = align(Height, 16) + * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) + * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64) + * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16) + * RGB_Meta_Plane_size = align(RGB_Meta_Stride * + * RGB_Meta_Scanlines, 4096) + * + * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size, 4096) + */ + COLOR_FMT_RGBA1010102_UBWC, + /* Venus RGB565 UBWC format: + * Contains 2 planes in the following order - + * (A) Meta plane + * (B) RGB plane + * + * <--- RGB_Meta_Stride ----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_RGB_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-------- RGB_Stride --------> + * <------- Width -------> + * R R R R R R R R R R R R . . . . ^ ^ + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . Height | + * R R R R R R R R R R R R . . . . | RGB_Scanlines + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * + * RGB_Stride = align(Width * 2, 256) + * RGB_Scanlines = align(Height, 16) + * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) + * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64) + * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16) + * RGB_Meta_Plane_size = align(RGB_Meta_Stride * + * RGB_Meta_Scanlines, 4096) + * + * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size, 4096) + */ + COLOR_FMT_RGB565_UBWC, + /* P010 UBWC: + * Compressed Macro-tile format for NV12. + * Contains 4 planes in the following order - + * (A) Y_Meta_Plane + * (B) Y_UBWC_Plane + * (C) UV_Meta_Plane + * (D) UV_UBWC_Plane + * + * Y_Meta_Plane consists of meta information to decode compressed + * tile data in Y_UBWC_Plane. + * Y_UBWC_Plane consists of Y data in compressed macro-tile format. + * UBWC decoder block will use the Y_Meta_Plane data together with + * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples. + * + * UV_Meta_Plane consists of meta information to decode compressed + * tile data in UV_UBWC_Plane. + * UV_UBWC_Plane consists of UV data in compressed macro-tile format. + * UBWC decoder block will use UV_Meta_Plane data together with + * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2 + * subsampled color difference samples. + * + * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable + * and randomly accessible. There is no dependency between tiles. + * + * <----- Y_Meta_Stride -----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_Y_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <--Compressed tile Y Stride---> + * <------- Width -------> + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * <----- UV_Meta_Stride ----> + * M M M M M M M M M M M M . . ^ + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . M_UV_Scanlines + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <--Compressed tile UV Stride---> + * U* V* U* V* U* V* U* V* . . . . ^ + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * + * + * Y_Stride = align(Width * 2, 256) + * UV_Stride = align(Width * 2, 256) + * Y_Scanlines = align(Height, 16) + * UV_Scanlines = align(Height/2, 16) + * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096) + * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096) + * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) + * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16) + * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096) + * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) + * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16) + * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096) + * + * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size + + * Y_Meta_Plane_size + UV_Meta_Plane_size, 4096) + */ + COLOR_FMT_P010_UBWC, + /* Venus P010: + * YUV 4:2:0 image with a plane of 10 bit Y samples followed + * by an interleaved U/V plane containing 10 bit 2x2 subsampled + * colour difference samples. + * + * <-------- Y/UV_Stride --------> + * <------- Width -------> + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * U V U V U V U V U V U V . . . . ^ + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . --> Buffer size alignment + * + * Y_Stride : Width * 2 aligned to 256 + * UV_Stride : Width * 2 aligned to 256 + * Y_Scanlines: Height aligned to 32 + * UV_Scanlines: Height/2 aligned to 16 + * Total size = align(Y_Stride * Y_Scanlines + * + UV_Stride * UV_Scanlines, 4096) + */ + COLOR_FMT_P010, + /* Venus NV12_512: + * YUV 4:2:0 image with a plane of 8 bit Y samples followed + * by an interleaved U/V plane containing 8 bit 2x2 subsampled + * colour difference samples. + * + * <-------- Y/UV_Stride --------> + * <------- Width -------> + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * U V U V U V U V U V U V . . . . ^ + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . --> Buffer size alignment + * + * Y_Stride : Width aligned to 512 + * UV_Stride : Width aligned to 512 + * Y_Scanlines: Height aligned to 512 + * UV_Scanlines: Height/2 aligned to 256 + * Total size = align((Y_Stride * Y_Scanlines + * + UV_Stride * UV_Scanlines), 4096) + */ + COLOR_FMT_NV12_512, +}; + +/* + * Function arguments: + * @color_fmt + * @width + * Progressive: width + * Interlaced: width + */ +static inline unsigned int VENUS_Y_STRIDE(unsigned int color_fmt, + unsigned int width) +{ + unsigned int alignment, stride = 0; + + if (!width) + goto invalid_input; + + switch (color_fmt) { + case COLOR_FMT_NV12: + case COLOR_FMT_NV21: + case COLOR_FMT_NV12_512: + alignment = 512; + stride = MSM_MEDIA_ALIGN(width, alignment); + break; + case COLOR_FMT_NV12_128: + case COLOR_FMT_NV12_UBWC: + alignment = 128; + stride = MSM_MEDIA_ALIGN(width, alignment); + break; + case COLOR_FMT_NV12_BPP10_UBWC: + alignment = 256; + stride = MSM_MEDIA_ALIGN(width, 192); + stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment); + break; + case COLOR_FMT_P010_UBWC: + case COLOR_FMT_P010: + alignment = 256; + stride = MSM_MEDIA_ALIGN(width * 2, alignment); + break; + default: + break; + } +invalid_input: + return stride; +} + +/* + * Function arguments: + * @color_fmt + * @width + * Progressive: width + * Interlaced: width + */ +static inline unsigned int VENUS_UV_STRIDE(unsigned int color_fmt, + unsigned int width) +{ + unsigned int alignment, stride = 0; + + if (!width) + goto invalid_input; + + switch (color_fmt) { + case COLOR_FMT_NV21: + case COLOR_FMT_NV12: + case COLOR_FMT_NV12_512: + alignment = 512; + stride = MSM_MEDIA_ALIGN(width, alignment); + break; + case COLOR_FMT_NV12_128: + case COLOR_FMT_NV12_UBWC: + alignment = 128; + stride = MSM_MEDIA_ALIGN(width, alignment); + break; + case COLOR_FMT_NV12_BPP10_UBWC: + alignment = 256; + stride = MSM_MEDIA_ALIGN(width, 192); + stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment); + break; + case COLOR_FMT_P010_UBWC: + case COLOR_FMT_P010: + alignment = 256; + stride = MSM_MEDIA_ALIGN(width * 2, alignment); + break; + default: + break; + } +invalid_input: + return stride; +} + +/* + * Function arguments: + * @color_fmt + * @height + * Progressive: height + * Interlaced: (height+1)>>1 + */ +static inline unsigned int VENUS_Y_SCANLINES(unsigned int color_fmt, + unsigned int height) +{ + unsigned int alignment, sclines = 0; + + if (!height) + goto invalid_input; + + switch (color_fmt) { + case COLOR_FMT_NV12: + case COLOR_FMT_NV21: + case COLOR_FMT_NV12_512: + alignment = 512; + break; + case COLOR_FMT_NV12_128: + case COLOR_FMT_NV12_UBWC: + case COLOR_FMT_P010: + alignment = 32; + break; + case COLOR_FMT_NV12_BPP10_UBWC: + case COLOR_FMT_P010_UBWC: + alignment = 16; + break; + default: + return 0; + } + sclines = MSM_MEDIA_ALIGN(height, alignment); +invalid_input: + return sclines; +} + +/* + * Function arguments: + * @color_fmt + * @height + * Progressive: height + * Interlaced: (height+1)>>1 + */ +static inline unsigned int VENUS_UV_SCANLINES(unsigned int color_fmt, + unsigned int height) +{ + unsigned int alignment, sclines = 0; + + if (!height) + goto invalid_input; + + switch (color_fmt) { + case COLOR_FMT_NV21: + case COLOR_FMT_NV12: + case COLOR_FMT_NV12_512: + alignment = 256; + break; + case COLOR_FMT_NV12_128: + case COLOR_FMT_NV12_BPP10_UBWC: + case COLOR_FMT_P010_UBWC: + case COLOR_FMT_P010: + alignment = 16; + break; + case COLOR_FMT_NV12_UBWC: + alignment = 32; + break; + default: + goto invalid_input; + } + + sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment); + +invalid_input: + return sclines; +} + +/* + * Function arguments: + * @color_fmt + * @width + * Progressive: width + * Interlaced: width + */ +static inline unsigned int VENUS_Y_META_STRIDE(unsigned int color_fmt, + unsigned int width) +{ + int y_tile_width = 0, y_meta_stride = 0; + + if (!width) + goto invalid_input; + + switch (color_fmt) { + case COLOR_FMT_NV12_UBWC: + case COLOR_FMT_P010_UBWC: + y_tile_width = 32; + break; + case COLOR_FMT_NV12_BPP10_UBWC: + y_tile_width = 48; + break; + default: + goto invalid_input; + } + + y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width); + y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64); + +invalid_input: + return y_meta_stride; +} + +/* + * Function arguments: + * @color_fmt + * @height + * Progressive: height + * Interlaced: (height+1)>>1 + */ +static inline unsigned int VENUS_Y_META_SCANLINES(unsigned int color_fmt, + unsigned int height) +{ + int y_tile_height = 0, y_meta_scanlines = 0; + + if (!height) + goto invalid_input; + + switch (color_fmt) { + case COLOR_FMT_NV12_UBWC: + y_tile_height = 8; + break; + case COLOR_FMT_NV12_BPP10_UBWC: + case COLOR_FMT_P010_UBWC: + y_tile_height = 4; + break; + default: + goto invalid_input; + } + + y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height); + y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16); + +invalid_input: + return y_meta_scanlines; +} + +/* + * Function arguments: + * @color_fmt + * @width + * Progressive: width + * Interlaced: width + */ +static inline unsigned int VENUS_UV_META_STRIDE(unsigned int color_fmt, + unsigned int width) +{ + int uv_tile_width = 0, uv_meta_stride = 0; + + if (!width) + goto invalid_input; + + switch (color_fmt) { + case COLOR_FMT_NV12_UBWC: + case COLOR_FMT_P010_UBWC: + uv_tile_width = 16; + break; + case COLOR_FMT_NV12_BPP10_UBWC: + uv_tile_width = 24; + break; + default: + goto invalid_input; + } + + uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width); + uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64); + +invalid_input: + return uv_meta_stride; +} + +/* + * Function arguments: + * @color_fmt + * @height + * Progressive: height + * Interlaced: (height+1)>>1 + */ +static inline unsigned int VENUS_UV_META_SCANLINES(unsigned int color_fmt, + unsigned int height) +{ + int uv_tile_height = 0, uv_meta_scanlines = 0; + + if (!height) + goto invalid_input; + + switch (color_fmt) { + case COLOR_FMT_NV12_UBWC: + uv_tile_height = 8; + break; + case COLOR_FMT_NV12_BPP10_UBWC: + case COLOR_FMT_P010_UBWC: + uv_tile_height = 4; + break; + default: + goto invalid_input; + } + + uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height); + uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16); + +invalid_input: + return uv_meta_scanlines; +} + +static inline unsigned int VENUS_RGB_STRIDE(unsigned int color_fmt, + unsigned int width) +{ + unsigned int alignment = 0, stride = 0, bpp = 4; + + if (!width) + goto invalid_input; + + switch (color_fmt) { + case COLOR_FMT_RGBA8888: + alignment = 128; + break; + case COLOR_FMT_RGB565_UBWC: + alignment = 256; + bpp = 2; + break; + case COLOR_FMT_RGBA8888_UBWC: + case COLOR_FMT_RGBA1010102_UBWC: + alignment = 256; + break; + default: + goto invalid_input; + } + + stride = MSM_MEDIA_ALIGN(width * bpp, alignment); + +invalid_input: + return stride; +} + +static inline unsigned int VENUS_RGB_SCANLINES(unsigned int color_fmt, + unsigned int height) +{ + unsigned int alignment = 0, scanlines = 0; + + if (!height) + goto invalid_input; + + switch (color_fmt) { + case COLOR_FMT_RGBA8888: + alignment = 32; + break; + case COLOR_FMT_RGBA8888_UBWC: + case COLOR_FMT_RGBA1010102_UBWC: + case COLOR_FMT_RGB565_UBWC: + alignment = 16; + break; + default: + goto invalid_input; + } + + scanlines = MSM_MEDIA_ALIGN(height, alignment); + +invalid_input: + return scanlines; +} + +static inline unsigned int VENUS_RGB_META_STRIDE(unsigned int color_fmt, + unsigned int width) +{ + int rgb_tile_width = 0, rgb_meta_stride = 0; + + if (!width) + goto invalid_input; + + switch (color_fmt) { + case COLOR_FMT_RGBA8888_UBWC: + case COLOR_FMT_RGBA1010102_UBWC: + case COLOR_FMT_RGB565_UBWC: + rgb_tile_width = 16; + break; + default: + goto invalid_input; + } + + rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width); + rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64); + +invalid_input: + return rgb_meta_stride; +} + +static inline unsigned int VENUS_RGB_META_SCANLINES(unsigned int color_fmt, + unsigned int height) +{ + int rgb_tile_height = 0, rgb_meta_scanlines = 0; + + if (!height) + goto invalid_input; + + switch (color_fmt) { + case COLOR_FMT_RGBA8888_UBWC: + case COLOR_FMT_RGBA1010102_UBWC: + case COLOR_FMT_RGB565_UBWC: + rgb_tile_height = 4; + break; + default: + goto invalid_input; + } + + rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height); + rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16); + +invalid_input: + return rgb_meta_scanlines; +} + +/* + * Function arguments: + * @color_fmt + * @width + * Progressive: width + * Interlaced: width + * @height + * Progressive: height + * Interlaced: height + */ +static inline unsigned int VENUS_BUFFER_SIZE(unsigned int color_fmt, + unsigned int width, unsigned int height) +{ + unsigned int size = 0; + unsigned int y_plane, uv_plane, y_stride, + uv_stride, y_sclines, uv_sclines; + unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0; + unsigned int y_meta_stride = 0, y_meta_scanlines = 0; + unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0; + unsigned int y_meta_plane = 0, uv_meta_plane = 0; + unsigned int rgb_stride = 0, rgb_scanlines = 0; + unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0; + unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0; + + if (!width || !height) + goto invalid_input; + + y_stride = VENUS_Y_STRIDE(color_fmt, width); + uv_stride = VENUS_UV_STRIDE(color_fmt, width); + y_sclines = VENUS_Y_SCANLINES(color_fmt, height); + uv_sclines = VENUS_UV_SCANLINES(color_fmt, height); + rgb_stride = VENUS_RGB_STRIDE(color_fmt, width); + rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height); + + switch (color_fmt) { + case COLOR_FMT_NV21: + case COLOR_FMT_NV12: + case COLOR_FMT_P010: + case COLOR_FMT_NV12_512: + case COLOR_FMT_NV12_128: + y_plane = y_stride * y_sclines; + uv_plane = uv_stride * uv_sclines; + size = y_plane + uv_plane; + break; + case COLOR_FMT_NV12_UBWC: + y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width); + uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width); + if (width <= INTERLACE_WIDTH_MAX && + height <= INTERLACE_HEIGHT_MAX && + (height * width) / 256 <= INTERLACE_MB_PER_FRAME_MAX) { + y_sclines = + VENUS_Y_SCANLINES(color_fmt, (height+1)>>1); + y_ubwc_plane = + MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096); + uv_sclines = + VENUS_UV_SCANLINES(color_fmt, (height+1)>>1); + uv_ubwc_plane = + MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096); + y_meta_scanlines = + VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1); + y_meta_plane = MSM_MEDIA_ALIGN( + y_meta_stride * y_meta_scanlines, 4096); + uv_meta_scanlines = + VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1); + uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride * + uv_meta_scanlines, 4096); + size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane + + uv_meta_plane)*2; + } else { + y_sclines = VENUS_Y_SCANLINES(color_fmt, height); + y_ubwc_plane = + MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096); + uv_sclines = VENUS_UV_SCANLINES(color_fmt, height); + uv_ubwc_plane = + MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096); + y_meta_scanlines = + VENUS_Y_META_SCANLINES(color_fmt, height); + y_meta_plane = MSM_MEDIA_ALIGN( + y_meta_stride * y_meta_scanlines, 4096); + uv_meta_scanlines = + VENUS_UV_META_SCANLINES(color_fmt, height); + uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride * + uv_meta_scanlines, 4096); + size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane + + uv_meta_plane); + } + break; + case COLOR_FMT_NV12_BPP10_UBWC: + y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096); + uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096); + y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width); + y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height); + y_meta_plane = MSM_MEDIA_ALIGN( + y_meta_stride * y_meta_scanlines, 4096); + uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width); + uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height); + uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride * + uv_meta_scanlines, 4096); + + size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane + + uv_meta_plane; + break; + case COLOR_FMT_P010_UBWC: + y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096); + uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096); + y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width); + y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height); + y_meta_plane = MSM_MEDIA_ALIGN( + y_meta_stride * y_meta_scanlines, 4096); + uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width); + uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height); + uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride * + uv_meta_scanlines, 4096); + + size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane + + uv_meta_plane; + break; + case COLOR_FMT_RGBA8888: + rgb_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines, 4096); + size = rgb_plane; + break; + case COLOR_FMT_RGBA8888_UBWC: + case COLOR_FMT_RGBA1010102_UBWC: + case COLOR_FMT_RGB565_UBWC: + rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines, + 4096); + rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width); + rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt, + height); + rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride * + rgb_meta_scanlines, 4096); + size = rgb_ubwc_plane + rgb_meta_plane; + break; + default: + break; + } +invalid_input: + return MSM_MEDIA_ALIGN(size, 4096); +} + +static inline unsigned int VENUS_BUFFER_SIZE_USED(unsigned int color_fmt, + unsigned int width, unsigned int height, unsigned int interlace) +{ + unsigned int size = 0; + unsigned int y_stride, uv_stride, y_sclines, uv_sclines; + unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0; + unsigned int y_meta_stride = 0, y_meta_scanlines = 0; + unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0; + unsigned int y_meta_plane = 0, uv_meta_plane = 0; + + if (!width || !height) + goto invalid_input; + + if (!interlace && color_fmt == COLOR_FMT_NV12_UBWC) { + y_stride = VENUS_Y_STRIDE(color_fmt, width); + uv_stride = VENUS_UV_STRIDE(color_fmt, width); + y_sclines = VENUS_Y_SCANLINES(color_fmt, height); + y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096); + uv_sclines = VENUS_UV_SCANLINES(color_fmt, height); + uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096); + y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width); + y_meta_scanlines = + VENUS_Y_META_SCANLINES(color_fmt, height); + y_meta_plane = MSM_MEDIA_ALIGN( + y_meta_stride * y_meta_scanlines, 4096); + uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width); + uv_meta_scanlines = + VENUS_UV_META_SCANLINES(color_fmt, height); + uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride * + uv_meta_scanlines, 4096); + size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane + + uv_meta_plane); + size = MSM_MEDIA_ALIGN(size, 4096); + } else { + size = VENUS_BUFFER_SIZE(color_fmt, width, height); + } +invalid_input: + return size; +} + +#endif diff --git a/techpack/video/include/uapi/vidc/media/msm_vidc_utils.h b/techpack/video/include/uapi/vidc/media/msm_vidc_utils.h new file mode 100644 index 000000000000..41f9c88a7dce --- /dev/null +++ b/techpack/video/include/uapi/vidc/media/msm_vidc_utils.h @@ -0,0 +1,653 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __MSM_VIDC_UTILS_H__ +#define __MSM_VIDC_UTILS_H__ + +#include +#include + +/* vendor color format start */ +/* UBWC 8-bit Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV12_UBWC v4l2_fourcc('Q', '1', '2', '8') +/* NV12_512 8-bit Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV12_512 v4l2_fourcc('Q', '5', '1', '2') +/* NV12_128 8-bit Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV12_128 v4l2_fourcc('N', '1', '2', '8') +/* NV12 10-bit Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV12_P010_UBWC v4l2_fourcc('Q', '1', '2', 'B') +/* UBWC 10-bit Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV12_TP10_UBWC v4l2_fourcc('Q', '1', '2', 'A') +#define V4L2_PIX_FMT_RGBA8888_UBWC v4l2_fourcc('Q', 'R', 'G', 'B') +#define V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS \ + v4l2_fourcc('Q', 'P', '1', '0') /* Y/CbCr 4:2:0 P10 Venus*/ +/* vendor color format end */ + +/* Vendor buffer flags start */ +#define V4L2_BUF_FLAG_CODECCONFIG 0x01000000 +#define V4L2_BUF_FLAG_END_OF_SUBFRAME 0x02000000 +#define V4L2_BUF_FLAG_DATA_CORRUPT 0x04000000 +#define V4L2_BUF_INPUT_UNSUPPORTED 0x08000000 +#define V4L2_BUF_FLAG_EOS 0x10000000 +#define V4L2_BUF_FLAG_READONLY 0x20000000 +#define V4L2_BUF_FLAG_PERF_MODE 0x40000000 +#define V4L2_BUF_FLAG_CVPMETADATA_SKIP 0x80000000 +/* Vendor buffer flags end */ + +/* Vendor commands start */ +#define V4L2_CMD_FLUSH (4) +/* flags for flush cmd */ +#define V4L2_CMD_FLUSH_OUTPUT (1 << 0) +#define V4L2_CMD_FLUSH_CAPTURE (1 << 1) +/* Vendor commands end */ + +/* Vendor events start */ +#define V4L2_EVENT_MSM_VIDC_START \ + (V4L2_EVENT_PRIVATE_START + 0x00001000) +#define V4L2_EVENT_MSM_VIDC_FLUSH_DONE \ + (V4L2_EVENT_MSM_VIDC_START + 1) +#define V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_SUFFICIENT \ + (V4L2_EVENT_MSM_VIDC_START + 2) +#define V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_INSUFFICIENT \ + (V4L2_EVENT_MSM_VIDC_START + 3) +#define V4L2_EVENT_MSM_VIDC_SYS_ERROR \ + (V4L2_EVENT_MSM_VIDC_START + 5) +#define V4L2_EVENT_MSM_VIDC_RELEASE_BUFFER_REFERENCE \ + (V4L2_EVENT_MSM_VIDC_START + 6) +#define V4L2_EVENT_MSM_VIDC_RELEASE_UNQUEUED_BUFFER \ + (V4L2_EVENT_MSM_VIDC_START + 7) +#define V4L2_EVENT_MSM_VIDC_HW_OVERLOAD \ + (V4L2_EVENT_MSM_VIDC_START + 8) +#define V4L2_EVENT_MSM_VIDC_MAX_CLIENTS \ + (V4L2_EVENT_MSM_VIDC_START + 9) +#define V4L2_EVENT_MSM_VIDC_HW_UNSUPPORTED \ + (V4L2_EVENT_MSM_VIDC_START + 10) +/* Vendor events end */ + +/* missing v4l2 entries start */ +enum v4l2_mpeg_vidc_video_bitrate_mode { + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR = + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR + 1, + V4L2_MPEG_VIDEO_BITRATE_MODE_MBR, + V4L2_MPEG_VIDEO_BITRATE_MODE_MBR_VFR, + V4L2_MPEG_VIDEO_BITRATE_MODE_CQ, +}; +/* missing v4l2 entries end */ + +/* vendor controls start */ +#define V4L2_CID_MPEG_MSM_VIDC_BASE (V4L2_CTRL_CLASS_MPEG | 0x2000) + +#define V4L2_MPEG_MSM_VIDC_DISABLE 0 +#define V4L2_MPEG_MSM_VIDC_ENABLE 1 + +#define V4L2_CID_MPEG_VIDC_VIDEO_PICTYPE_DEC_MODE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE+0) +enum v4l2_mpeg_vidc_video_pictype_dec_mode { + V4L2_MPEG_VIDC_VIDEO_PICTYPE_DECODE_I = 1, + V4L2_MPEG_VIDC_VIDEO_PICTYPE_DECODE_P = 2, + V4L2_MPEG_VIDC_VIDEO_PICTYPE_DECODE_B = 4, +}; + +#define V4L2_CID_MPEG_VIDC_VIDEO_STREAM_FORMAT \ + (V4L2_CID_MPEG_MSM_VIDC_BASE+2) +enum v4l2_mpeg_vidc_video_stream_format { + V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_STARTCODES = 0, + V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_FOUR_BYTE_LENGTH = 4, +}; + +#define V4L2_CID_MPEG_VIDC_VIDEO_DECODE_ORDER \ + (V4L2_CID_MPEG_MSM_VIDC_BASE+3) +#define V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM \ + (V4L2_CID_MPEG_MSM_VIDC_BASE+13) +#define V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 14) +#define V4L2_CID_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 15) +#define V4L2_CID_MPEG_VIDC_VIDEO_SECURE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE+16) +#define V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 17) +enum v4l2_mpeg_vidc_extradata { + EXTRADATA_NONE = 0, + EXTRADATA_DEFAULT = 1, + EXTRADATA_ADVANCED = 2, + EXTRADATA_ENC_INPUT_ROI = 4, + EXTRADATA_ENC_INPUT_HDR10PLUS = 8, + EXTRADATA_ENC_INPUT_CVP = 16, + EXTRADATA_ENC_FRAME_QP = 32, + EXTRADATA_ENC_INPUT_CROP = 64, +}; +#define V4L2_CID_MPEG_VIDC_VIDEO_VUI_TIMING_INFO \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 19) +#define V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 20) +enum v4l2_mpeg_vidc_video_vp8_profile_level { + V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED, + V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0, + V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1, + V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_2, + V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_3, +}; +#define V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL \ + (V4L2_CID_MPEG_MSM_VIDC_BASE+23) +enum v4l2_mpeg_vidc_video_mpeg2_level { + V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_0 = 0, + V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_1 = 1, + V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_2 = 2, + V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_3 = 3, +}; +#define V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE+24) +enum v4l2_mpeg_vidc_video_mpeg2_profile { + V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_SIMPLE = 0, + V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_MAIN = 1, + V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_HIGH = 2, +}; +#define V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 27) +#define V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 28) +#define V4L2_CID_MPEG_VIDC_VIDEO_MARKLTRFRAME \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 29) +#define V4L2_CID_MPEG_VIDC_VIDEO_VPX_ERROR_RESILIENCE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 38) +#define V4L2_CID_MPEG_VIDC_VIDEO_BUFFER_SIZE_LIMIT \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 39) +#define V4L2_CID_MPEG_VIDC_VIDEO_BASELAYER_ID \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 47) +#define V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 52) +#define V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 53) +#define V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 55) +#define V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 56) +#define V4L2_CID_MPEG_VIDC_VIDEO_BLUR_DIMENSIONS \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 57) +#define V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 60) +#define V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 61) +#define V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 62) +#define V4L2_CID_MPEG_VIDC_VIDEO_MATRIX_COEFFS \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 63) +#define V4L2_CID_MPEG_VIDC_VIDEO_VP9_LEVEL \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 67) +enum v4l2_mpeg_vidc_video_vp9_level { + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_UNUSED = 0, + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_1 = 1, + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_11 = 2, + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_2 = 3, + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_21 = 4, + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_3 = 5, + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_31 = 6, + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_4 = 7, + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_41 = 8, + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_5 = 9, + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_51 = 10, + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_6 = 11, + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61 = 12, +}; +#define V4L2_CID_MPEG_VIDC_VIDEO_DYN_QP \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 108) +#define V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_8BIT \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 109) +#define V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_10BIT \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 110) +#define V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC_CUSTOM_MATRIX \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 114) +#define V4L2_CID_MPEG_VIDC_VENC_HDR_INFO \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 116) +#define V4L2_CID_MPEG_VIDC_IMG_GRID_SIZE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 117) +#define V4L2_CID_MPEG_VIDC_COMPRESSION_QUALITY \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 118) +#define V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 119) +#define V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 120) +enum v4l2_mpeg_vidc_video_hevc_max_hier_coding_layer { + V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_0 = 0, + V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_1 = 1, + V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_2 = 2, + V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_3 = 3, + V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_4 = 4, + V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_5 = 5, + V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_6 = 6, +}; +#define V4L2_CID_MPEG_VIDC_VENC_CVP_DISABLE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 121) +#define V4L2_CID_MPEG_VIDC_VENC_NATIVE_RECORDER \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 122) +#define V4L2_CID_MPEG_VIDC_VENC_RC_TIMESTAMP_DISABLE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 123) +#define V4L2_CID_MPEG_VIDC_SUPERFRAME \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 124) +#define V4L2_CID_MPEG_VIDC_CAPTURE_FRAME_RATE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 125) +#define V4L2_CID_MPEG_VIDC_CVP_FRAME_RATE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 126) +#define V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 127) +enum v4l2_mpeg_vidc_video_stream_output_mode { + V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_PRIMARY = 0, + V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_SECONDARY = 1, +}; +#define V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 128) +enum v4l2_mpeg_vidc_video_roi_type { + V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_NONE = 0, + V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_2BIT = 1, + V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_2BYTE = 2, +}; +#define V4L2_CID_MPEG_VIDC_VENC_BITRATE_SAVINGS \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 131) +enum v4l2_mpeg_vidc_video_bitrate_savings_type { + V4L2_MPEG_VIDC_VIDEO_BRS_DISABLE = 0, + V4L2_MPEG_VIDC_VIDEO_BRS_ENABLE_8BIT = 1, + V4L2_MPEG_VIDC_VIDEO_BRS_ENABLE_10BIT = 2, + V4L2_MPEG_VIDC_VIDEO_BRS_ENABLE_ALL = 3, +}; +#define V4L2_CID_MPEG_VIDC_VENC_BITRATE_BOOST \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 132) +#define V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_HINT \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 133) +#define V4L2_CID_MPEG_VIDC_VDEC_HEIF_MODE \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 134) +#define V4L2_CID_MPEG_VIDC_VENC_QPRANGE_BOOST \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 135) +#define V4L2_CID_MPEG_VIDC_VIDEO_DISABLE_TIMESTAMP_REORDER \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 136) +#define V4L2_CID_MPEG_VIDC_VENC_COMPLEXITY \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 137) + +#define V4L2_CID_MPEG_VIDC_VIDEO_UNKNOWN \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 0xFFF) +/* vendor controls end */ + +#define MSM_VIDC_EXTRADATA_NONE 0x00000000 +struct msm_vidc_extradata_header { + __u32 size; + __u32 version; /** Keeping binary compatibility */ + __u32 port_index; /* with firmware and OpenMAX IL **/ + __u32 type; /* msm_vidc_extradata_type */ + __u32 data_size; + __u32 data[1]; +}; + +/* msm_vidc_interlace_type */ +#define MSM_VIDC_INTERLACE_FRAME_PROGRESSIVE 0x01 +#define MSM_VIDC_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST 0x02 +#define MSM_VIDC_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST 0x04 +#define MSM_VIDC_INTERLACE_FRAME_TOPFIELDFIRST 0x08 +#define MSM_VIDC_INTERLACE_FRAME_BOTTOMFIELDFIRST 0x10 +#define MSM_VIDC_INTERLACE_FRAME_MBAFF 0x20 +/* Color formats */ +#define MSM_VIDC_HAL_INTERLACE_COLOR_FORMAT_NV12 0x2 +#define MSM_VIDC_HAL_INTERLACE_COLOR_FORMAT_NV12_UBWC 0x8002 +#define MSM_VIDC_EXTRADATA_INTERLACE_VIDEO 0x00000002 +struct msm_vidc_interlace_payload { + __u32 format; /* Interlace format */ + __u32 color_format; +}; + +#define MSM_VIDC_EXTRADATA_FRAME_RATE 0x00000007 +struct msm_vidc_framerate_payload { + __u32 frame_rate; /*In Q16 format */ +}; + +#define MSM_VIDC_EXTRADATA_TIMESTAMP 0x00000005 +struct msm_vidc_ts_payload { + __u32 timestamp_lo; + __u32 timestamp_hi; +}; + +#define MSM_VIDC_EXTRADATA_NUM_CONCEALED_MB 0x7F100001 +struct msm_vidc_concealmb_payload { + __u32 num_mbs; +}; + + +#define MSM_VIDC_FRAME_RECONSTRUCTION_INCORRECT 0x0 +#define MSM_VIDC_FRAME_RECONSTRUCTION_CORRECT 0x01 +#define MSM_VIDC_FRAME_RECONSTRUCTION_APPROXIMATELY_CORRECT 0x02 +#define MSM_VIDC_EXTRADATA_RECOVERY_POINT_SEI 0x00000009 +struct msm_vidc_recoverysei_payload { + __u32 flags; +}; + +#define MSM_VIDC_EXTRADATA_ASPECT_RATIO 0x7F100003 +struct msm_vidc_aspect_ratio_payload { + __u32 size; + __u32 version; + __u32 port_index; + __u32 aspect_width; + __u32 aspect_height; +}; + +#define MSM_VIDC_EXTRADATA_INPUT_CROP 0x0700000E +struct msm_vidc_input_crop_payload { + __u32 size; + __u32 version; + __u32 port_index; + __u32 left; + __u32 top; + __u32 width; + __u32 height; +}; + +struct msm_vidc_misr_info { + __u32 misr_set; + __u32 misr_dpb_luma[8]; + __u32 misr_dpb_chroma[8]; + __u32 misr_opb_luma[8]; + __u32 misr_opb_chroma[8]; +}; +#define MSM_VIDC_EXTRADATA_OUTPUT_CROP 0x0700000F +struct msm_vidc_output_crop_payload { + __u32 size; + __u32 version; + __u32 port_index; + __u32 left; + __u32 top; + __u32 display_width; + __u32 display_height; + __u32 width; + __u32 height; + __u32 frame_num; + __u32 bit_depth_y; + __u32 bit_depth_c; + struct msm_vidc_misr_info misr_info[2]; +}; + +#define MSM_VIDC_EXTRADATA_INDEX 0x7F100002 +struct msm_vidc_extradata_index { + __u32 type; + union { + struct msm_vidc_input_crop_payload input_crop; + struct msm_vidc_aspect_ratio_payload aspect_ratio; + }; +}; + +#define MSM_VIDC_EXTRADATA_PANSCAN_WINDOW 0x00000008 +struct msm_vidc_panscan_window { + __u32 panscan_height_offset; + __u32 panscan_width_offset; + __u32 panscan_window_width; + __u32 panscan_window_height; +}; + +struct msm_vidc_panscan_window_payload { + __u32 num_panscan_windows; + struct msm_vidc_panscan_window wnd[1]; +}; + +#define MSM_VIDC_USERDATA_TYPE_FRAME 0x1 +#define MSM_VIDC_USERDATA_TYPE_TOP_FIELD 0x2 +#define MSM_VIDC_USERDATA_TYPE_BOTTOM_FIELD 0x3 +#define MSM_VIDC_EXTRADATA_STREAM_USERDATA 0x0000000E +struct msm_vidc_stream_userdata_payload { + __u32 type; + __u32 data[1]; +}; + +#define MSM_VIDC_EXTRADATA_FRAME_QP 0x0000000F +struct msm_vidc_frame_qp_payload { + __u32 frame_qp; + __u32 qp_sum; + __u32 skip_qp_sum; + __u32 skip_num_blocks; + __u32 total_num_blocks; +}; + +#define MSM_VIDC_EXTRADATA_FRAME_BITS_INFO 0x00000010 +struct msm_vidc_frame_bits_info_payload { + __u32 frame_bits; + __u32 header_bits; +}; + +#define MSM_VIDC_EXTRADATA_S3D_FRAME_PACKING 0x00000006 +struct msm_vidc_s3d_frame_packing_payload { + __u32 fpa_id; + __u32 cancel_flag; + __u32 fpa_type; + __u32 quin_cunx_flag; + __u32 content_interprtation_type; + __u32 spatial_flipping_flag; + __u32 frame0_flipped_flag; + __u32 field_views_flag; + __u32 current_frame_is_frame0_flag; + __u32 frame0_self_contained_flag; + __u32 frame1_self_contained_flag; + __u32 frame0_graid_pos_x; + __u32 frame0_graid_pos_y; + __u32 frame1_graid_pos_x; + __u32 frame1_graid_pos_y; + __u32 fpa_reserved_byte; + __u32 fpa_repetition_period; + __u32 fpa_extension_flag; +}; + +#define MSM_VIDC_EXTRADATA_ROI_QP 0x00000013 +struct msm_vidc_roi_deltaqp_payload { + __u32 b_roi_info; /*Enable/Disable*/ + __u32 mbi_info_size; /*Size of QP data*/ + __u32 data[1]; +}; + +struct msm_vidc_roi_qp_payload { + __s32 upper_qp_offset; + __s32 lower_qp_offset; + __u32 b_roi_info; + __u32 mbi_info_size; + __u32 data[1]; +}; + +#define MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI 0x00000015 +struct msm_vidc_mastering_display_colour_sei_payload { + __u32 nDisplayPrimariesX[3]; + __u32 nDisplayPrimariesY[3]; + __u32 nWhitePointX; + __u32 nWhitePointY; + __u32 nMaxDisplayMasteringLuminance; + __u32 nMinDisplayMasteringLuminance; +}; + +#define MSM_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI 0x00000016 +struct msm_vidc_content_light_level_sei_payload { + __u32 nMaxContentLight; + __u32 nMaxPicAverageLight; +}; + +#define MSM_VIDC_EXTRADATA_HDR10PLUS_METADATA 0x0000001A +struct msm_vidc_hdr10plus_metadata_payload { + __u32 size; + __u32 data[1]; +}; + +#define MSM_VIDC_EXTRADATA_CVP_METADATA 0x0000001B +struct msm_vidc_enc_cvp_metadata_payload { + __u32 data[256]; +}; + +/* video_format */ +#define MSM_VIDC_COMPONENT 0 +#define MSM_VIDC_PAL 1 +#define MSM_VIDC_NTSC 2 +#define MSM_VIDC_SECAM 3 +#define MSM_VIDC_MAC 4 +#define MSM_VIDC_UNSPECIFIED_FORMAT 5 +#define MSM_VIDC_RESERVED_1_FORMAT 6 +#define MSM_VIDC_RESERVED_2_FORMAT 7 + +/* See colour_primaries of ISO/IEC 14496 for significance */ +/* color_primaries values */ +#define MSM_VIDC_RESERVED_1 0 +#define MSM_VIDC_BT709_5 1 +#define MSM_VIDC_UNSPECIFIED 2 +#define MSM_VIDC_RESERVED_2 3 +#define MSM_VIDC_BT470_6_M 4 +#define MSM_VIDC_BT601_6_625 5 +#define MSM_VIDC_BT470_6_BG MSM_VIDC_BT601_6_625 +#define MSM_VIDC_BT601_6_525 6 +#define MSM_VIDC_SMPTE_240M 7 +#define MSM_VIDC_GENERIC_FILM 8 +#define MSM_VIDC_BT2020 9 + +/* matrix_coeffs values */ +#define MSM_VIDC_MATRIX_RGB 0 +#define MSM_VIDC_MATRIX_BT_709_5 1 +#define MSM_VIDC_MATRIX_UNSPECIFIED 2 +#define MSM_VIDC_MATRIX_RESERVED 3 +#define MSM_VIDC_MATRIX_FCC_47 4 +#define MSM_VIDC_MATRIX_601_6_625 5 +#define MSM_VIDC_MATRIX_BT470_BG MSM_VIDC_MATRIX_601_6_625 +#define MSM_VIDC_MATRIX_601_6_525 6 +#define MSM_VIDC_MATRIX_SMPTE_170M MSM_VIDC_MATRIX_601_6_525 +#define MSM_VIDC_MATRIX_SMPTE_240M 7 +#define MSM_VIDC_MATRIX_Y_CG_CO 8 +#define MSM_VIDC_MATRIX_BT_2020 9 +#define MSM_VIDC_MATRIX_BT_2020_CONST 10 + +/* transfer_char values */ +#define MSM_VIDC_TRANSFER_RESERVED_1 0 +#define MSM_VIDC_TRANSFER_BT709_5 1 +#define MSM_VIDC_TRANSFER_UNSPECIFIED 2 +#define MSM_VIDC_TRANSFER_RESERVED_2 3 +#define MSM_VIDC_TRANSFER_BT_470_6_M 4 +#define MSM_VIDC_TRANSFER_BT_470_6_BG 5 +#define MSM_VIDC_TRANSFER_601_6_625 6 +#define MSM_VIDC_TRANSFER_601_6_525 MSM_VIDC_TRANSFER_601_6_625 +#define MSM_VIDC_TRANSFER_SMPTE_240M 7 +#define MSM_VIDC_TRANSFER_LINEAR 8 +#define MSM_VIDC_TRANSFER_LOG_100_1 9 +#define MSM_VIDC_TRANSFER_LOG_100_SQRT10_1 10 +#define MSM_VIDC_TRANSFER_IEC_61966 11 +#define MSM_VIDC_TRANSFER_BT_1361 12 +#define MSM_VIDC_TRANSFER_SRGB 13 +#define MSM_VIDC_TRANSFER_BT_2020_10 14 +#define MSM_VIDC_TRANSFER_BT_2020_12 15 +#define MSM_VIDC_TRANSFER_SMPTE_ST2084 16 +#define MSM_VIDC_TRANSFER_SMPTE_ST428_1 17 +#define MSM_VIDC_TRANSFER_HLG 18 + +#define MSM_VIDC_EXTRADATA_VUI_DISPLAY_INFO 0x7F100006 +struct msm_vidc_vui_display_info_payload { + __u32 video_signal_present_flag; + __u32 video_format; + __u32 bit_depth_y; + __u32 bit_depth_c; + __u32 video_full_range_flag; + __u32 color_description_present_flag; + __u32 color_primaries; + __u32 transfer_char; + __u32 matrix_coeffs; + __u32 chroma_location_info_present_flag; + __u32 chroma_format_idc; + __u32 separate_color_plane_flag; + __u32 chroma_sample_loc_type_top_field; + __u32 chroma_sample_loc_type_bottom_field; +}; + +#define MSM_VIDC_EXTRADATA_HDR_HIST 0x7F100008 +struct msm_vidc_extradata_hdr_hist_payload { + __u32 value_count[1024]; +}; + +#define MSM_VIDC_EXTRADATA_MPEG2_SEQDISP 0x0000000D +struct msm_vidc_mpeg2_seqdisp_payload { + __u32 video_format; + __u32 color_descp; + __u32 color_primaries; + __u32 transfer_char; + __u32 matrix_coeffs; + __u32 disp_width; + __u32 disp_height; +}; + +/* VPx color_space values */ +#define MSM_VIDC_CS_UNKNOWN 0 +#define MSM_VIDC_CS_BT_601 1 +#define MSM_VIDC_CS_BT_709 2 +#define MSM_VIDC_CS_SMPTE_170 3 +#define MSM_VIDC_CS_SMPTE_240 4 +#define MSM_VIDC_CS_BT_2020 5 +#define MSM_VIDC_CS_RESERVED 6 +#define MSM_VIDC_CS_RGB 7 +#define MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO 0x00000014 +struct msm_vidc_vpx_colorspace_payload { + __u32 color_space; + __u32 yuv_range_flag; + __u32 sumsampling_x; + __u32 sumsampling_y; +}; + +#define MSM_VIDC_EXTRADATA_METADATA_LTRINFO 0x7F100004 +/* Don't use the #define below. It is to bypass checkpatch */ +#define LTRINFO MSM_VIDC_EXTRADATA_METADATA_LTRINFO +struct msm_vidc_metadata_ltr_payload { + __u32 ltr_use_mark; +}; + +/* ptr[2]: event_notify: pixel_depth */ +#define MSM_VIDC_BIT_DEPTH_8 0 +#define MSM_VIDC_BIT_DEPTH_10 1 +#define MSM_VIDC_BIT_DEPTH_UNSUPPORTED 0XFFFFFFFF + +/* ptr[3]: event_notify: pic_struct */ +#define MSM_VIDC_PIC_STRUCT_MAYBE_INTERLACED 0x0 +#define MSM_VIDC_PIC_STRUCT_PROGRESSIVE 0x1 + +/*default when layer ID isn't specified*/ +#define MSM_VIDC_ALL_LAYER_ID 0xFF + +static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height) +{ + (void)height; + (void)width; + + /* + * In the future, calculate the size based on the w/h but just + * hardcode it for now since 16K satisfies all current usecases. + */ + return 16 * 1024; +} + +/* V4L2_CID_MPEG_VIDC_VENC_HDR_INFO payload index */ +enum msm_vidc_hdr_info_types { + MSM_VIDC_RGB_PRIMARY_00, + MSM_VIDC_RGB_PRIMARY_01, + MSM_VIDC_RGB_PRIMARY_10, + MSM_VIDC_RGB_PRIMARY_11, + MSM_VIDC_RGB_PRIMARY_20, + MSM_VIDC_RGB_PRIMARY_21, + MSM_VIDC_WHITEPOINT_X, + MSM_VIDC_WHITEPOINT_Y, + MSM_VIDC_MAX_DISP_LUM, + MSM_VIDC_MIN_DISP_LUM, + MSM_VIDC_RGB_MAX_CLL, + MSM_VIDC_RGB_MAX_FLL, +}; + +enum msm_vidc_plane_reserved_field_types { + MSM_VIDC_BUFFER_FD, + MSM_VIDC_DATA_OFFSET, + MSM_VIDC_COMP_RATIO, + MSM_VIDC_INPUT_TAG_1, + MSM_VIDC_INPUT_TAG_2, + MSM_VIDC_FRAMERATE, +}; + +enum msm_vidc_cb_event_types { + MSM_VIDC_HEIGHT, + MSM_VIDC_WIDTH, + MSM_VIDC_BIT_DEPTH, + MSM_VIDC_PIC_STRUCT, + MSM_VIDC_COLOR_SPACE, + MSM_VIDC_FW_MIN_COUNT, +}; +#endif diff --git a/techpack/video/msm/Makefile b/techpack/video/msm/Makefile new file mode 100644 index 000000000000..6aeea18bb290 --- /dev/null +++ b/techpack/video/msm/Makefile @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0-only +ccflags-y += -I$(srctree)/techpack/video/msm/vidc/ \ + -I$(srctree)/drivers/devfreq/ + +msm-vidc-objs := vidc/msm_v4l2_vidc.o \ + vidc/msm_vidc_platform.o \ + vidc/msm_vidc_common.o \ + vidc/msm_vidc.o \ + vidc/msm_vdec.o \ + vidc/msm_venc.o \ + vidc/msm_smem.o \ + vidc/msm_vidc_debug.o \ + vidc/msm_vidc_res_parse.o \ + vidc/hfi_common.o \ + vidc/hfi_ar50_lt.o \ + vidc/hfi_iris2.o \ + vidc/hfi_response_handler.o \ + vidc/hfi_packetization.o \ + vidc/vidc_hfi.o \ + vidc/msm_vidc_clocks.o \ + vidc/msm_vidc_bus_ar50lite.o\ + vidc/msm_vidc_bus_iris2.o \ + vidc/msm_vidc_buffer_calculations.o + +obj-$(CONFIG_MSM_VIDC_V4L2) := msm-vidc.o + diff --git a/techpack/video/msm/vidc/fixedpoint.h b/techpack/video/msm/vidc/fixedpoint.h new file mode 100644 index 000000000000..6a28ed839f68 --- /dev/null +++ b/techpack/video/msm/vidc/fixedpoint.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. + */ + +#ifdef _FIXP_ARITH_H +#error "This implementation is meant to override fixp-arith.h, don't use both" +#endif + +#ifndef _FIXEDPOINT_H_ +#define _FIXEDPOINT_H_ + +#include +#include + +/* + * Normally would typedef'ed, but checkpatch doesn't like typedef. + * Also should be normally typedef'ed to intmax_t but that doesn't seem to be + * available in the kernel + */ +#define fp_t size_t + +/* (Arbitrarily) make the first 25% of the bits to be the fractional bits */ +#define FP_FRACTIONAL_BITS ((sizeof(fp_t) * 8) / 4) + +#define FP(__i, __f_n, __f_d) \ + ((((fp_t)(__i)) << FP_FRACTIONAL_BITS) + \ + (((__f_n) << FP_FRACTIONAL_BITS) / (__f_d))) + +#define FP_INT(__i) FP(__i, 0, 1) +#define FP_ONE FP_INT(1) +#define FP_ZERO FP_INT(0) + +static inline size_t fp_frac_base(void) +{ + return GENMASK(FP_FRACTIONAL_BITS - 1, 0); +} + +static inline size_t fp_frac(fp_t a) +{ + return a & GENMASK(FP_FRACTIONAL_BITS - 1, 0); +} + +static inline size_t fp_int(fp_t a) +{ + return a >> FP_FRACTIONAL_BITS; +} + +static inline size_t fp_round(fp_t a) +{ + /* is the fractional part >= frac_max / 2? */ + bool round_up = fp_frac(a) >= fp_frac_base() / 2; + + return fp_int(a) + round_up; +} + +static inline fp_t fp_mult(fp_t a, fp_t b) +{ + return (a * b) >> FP_FRACTIONAL_BITS; +} + + +static inline fp_t fp_div(fp_t a, fp_t b) +{ + return (a << FP_FRACTIONAL_BITS) / b; +} + +#endif diff --git a/techpack/video/msm/vidc/hfi_ar50_lt.c b/techpack/video/msm/vidc/hfi_ar50_lt.c new file mode 100644 index 000000000000..85b4909342e7 --- /dev/null +++ b/techpack/video/msm/vidc/hfi_ar50_lt.c @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#include "msm_vidc_debug.h" +#include "hfi_common.h" + +#define VIDC_CPU_BASE_OFFS_AR50_LT 0x000A0000 +#define VIDEO_GCC_BASE_OFFS_AR50_LT 0x00000000 +#define VIDEO_CC_BASE_OFFS_AR50_LT 0x00100000 + +#define VIDC_CPU_CS_BASE_OFFS_AR50_LT (VIDC_CPU_BASE_OFFS_AR50_LT) +#define VIDC_CPU_IC_BASE_OFFS_AR50_LT (VIDC_CPU_BASE_OFFS_AR50_LT) + +#define VIDC_CPU_CS_A2HSOFTINTCLR_AR50_LT (VIDC_CPU_CS_BASE_OFFS_AR50_LT + 0x1C) +#define VIDC_CPU_CS_VMIMSG_AR50_LTi (VIDC_CPU_CS_BASE_OFFS_AR50_LT + 0x34) +#define VIDC_CPU_CS_VMIMSGAG0_AR50_LT (VIDC_CPU_CS_BASE_OFFS_AR50_LT + 0x38) +#define VIDC_CPU_CS_VMIMSGAG1_AR50_LT (VIDC_CPU_CS_BASE_OFFS_AR50_LT + 0x3C) +#define VIDC_CPU_CS_VMIMSGAG2_AR50_LT (VIDC_CPU_CS_BASE_OFFS_AR50_LT + 0x40) +#define VIDC_CPU_CS_VMIMSGAG3_AR50_LT (VIDC_CPU_CS_BASE_OFFS_AR50_LT + 0x44) +#define VIDC_CPU_CS_SCIACMD_AR50_LT (VIDC_CPU_CS_BASE_OFFS_AR50_LT + 0x48) + +/* HFI_CTRL_STATUS */ +#define VIDC_CPU_CS_SCIACMDARG0_AR50_LT (VIDC_CPU_CS_BASE_OFFS_AR50_LT + 0x4C) +#define VIDC_CPU_CS_SCIACMDARG0_BMSK_AR50_LT 0xff +#define VIDC_CPU_CS_SCIACMDARG0_SHFT_AR50_LT 0x0 +#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK_AR50_LT 0xfe +#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_SHFT_AR50_LT 0x1 +#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_STATUS_BMSK_AR50_LT 0x1 +#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_STATUS_SHFT_AR50_LT 0x0 +#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY_AR50_LT 0x100 +#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_IDLE_MSG_BMSK_AR50_LT 0x40000000 + +/* HFI_QTBL_INFO */ +#define VIDC_CPU_CS_SCIACMDARG1_AR50_LT (VIDC_CPU_CS_BASE_OFFS_AR50_LT + 0x50) + +/* HFI_QTBL_ADDR */ +#define VIDC_CPU_CS_SCIACMDARG2_AR50_LT (VIDC_CPU_CS_BASE_OFFS_AR50_LT + 0x54) + +/* HFI_VERSION_INFO */ +#define VIDC_CPU_CS_SCIACMDARG3_AR50_LT (VIDC_CPU_CS_BASE_OFFS_AR50_LT + 0x58) + +/* VIDC_SFR_ADDR */ +#define VIDC_CPU_CS_SCIBCMD_AR50_LT (VIDC_CPU_CS_BASE_OFFS_AR50_LT + 0x5C) + +/* VIDC_MMAP_ADDR */ +#define VIDC_CPU_CS_SCIBCMDARG0_AR50_LT (VIDC_CPU_CS_BASE_OFFS_AR50_LT + 0x60) + +/* VIDC_UC_REGION_ADDR */ +#define VIDC_CPU_CS_SCIBARG1_AR50_LT (VIDC_CPU_CS_BASE_OFFS_AR50_LT + 0x64) + +/* VIDC_UC_REGION_ADDR */ +#define VIDC_CPU_CS_SCIBARG2_AR50_LT (VIDC_CPU_CS_BASE_OFFS_AR50_LT + 0x68) + +#define VIDC_CPU_IC_SOFTINT_EN_AR50_LT (VIDC_CPU_IC_BASE_OFFS_AR50_LT + 0x148) +#define VIDC_CPU_IC_SOFTINT_AR50_LT (VIDC_CPU_IC_BASE_OFFS_AR50_LT + 0x150) +#define VIDC_CPU_IC_SOFTINT_H2A_BMSK_AR50_LT 0x8000 +#define VIDC_CPU_IC_SOFTINT_H2A_SHFT_AR50_LT 0x1 + +/* + * -------------------------------------------------------------------------- + * MODULE: vidc_wrapper + * -------------------------------------------------------------------------- + */ +#define VIDC_WRAPPER_BASE_OFFS_AR50_LT 0x000B0000 + +#define VIDC_WRAPPER_HW_VERSION_AR50_LT (VIDC_WRAPPER_BASE_OFFS_AR50_LT + 0x00) +#define VIDC_WRAPPER_HW_VERSION_MAJOR_VERSION_MASK_AR50_LT 0x78000000 +#define VIDC_WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT_AR50_LT 28 +#define VIDC_WRAPPER_HW_VERSION_MINOR_VERSION_MASK_AR50_LT 0xFFF0000 +#define VIDC_WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT_AR50_LT 16 +#define VIDC_WRAPPER_HW_VERSION_STEP_VERSION_MASK_AR50_LT 0xFFFF + +#define VIDC_WRAPPER_CLOCK_CONFIG_AR50_LT (VIDC_WRAPPER_BASE_OFFS_AR50_LT + 0x04) + +#define VIDC_WRAPPER_INTR_STATUS_AR50_LT (VIDC_WRAPPER_BASE_OFFS_AR50_LT + 0x0C) +#define VIDC_WRAPPER_INTR_STATUS_A2HWD_BMSK_AR50_LT 0x10 +#define VIDC_WRAPPER_INTR_STATUS_A2HWD_SHFT_AR50_LT 0x4 +#define VIDC_WRAPPER_INTR_STATUS_A2H_BMSK_AR50_LT 0x4 +#define VIDC_WRAPPER_INTR_STATUS_A2H_SHFT_AR50_LT 0x2 + +#define VIDC_WRAPPER_INTR_MASK_AR50_LT (VIDC_WRAPPER_BASE_OFFS_AR50_LT + 0x10) +#define VIDC_WRAPPER_INTR_MASK_A2HWD_BMSK_AR50_LT 0x10 +#define VIDC_WRAPPER_INTR_MASK_A2HWD_SHFT_AR50_LT 0x4 +#define VIDC_WRAPPER_INTR_MASK_A2HVCODEC_BMSK_AR50_LT 0x8 +#define VIDC_WRAPPER_INTR_MASK_A2HCPU_BMSK_AR50_LT 0x4 +#define VIDC_WRAPPER_INTR_MASK_A2HCPU_SHFT_AR50_LT 0x2 + +#define VIDC_WRAPPER_INTR_CLEAR_A2HWD_BMSK_AR50_LT 0x10 +#define VIDC_WRAPPER_INTR_CLEAR_A2HWD_SHFT_AR50_LT 0x4 +#define VIDC_WRAPPER_INTR_CLEAR_A2H_BMSK_AR50_LT 0x4 +#define VIDC_WRAPPER_INTR_CLEAR_A2H_SHFT_AR50_LT 0x2 + +/* + * -------------------------------------------------------------------------- + * MODULE: tz_wrapper + * -------------------------------------------------------------------------- + */ +#define VIDC_WRAPPER_TZ_BASE_OFFS 0x000C0000 +#define VIDC_WRAPPER_TZ_CPU_CLOCK_CONFIG (VIDC_WRAPPER_TZ_BASE_OFFS) +#define VIDC_WRAPPER_TZ_CPU_STATUS (VIDC_WRAPPER_TZ_BASE_OFFS + 0x10) + +#define VIDC_CTRL_INIT_AR50_LT VIDC_CPU_CS_SCIACMD_AR50_LT + +#define VIDC_CTRL_STATUS_AR50_LT VIDC_CPU_CS_SCIACMDARG0_AR50_LT +#define VIDC_CTRL_ERROR_STATUS__M_AR50_LT \ + VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK_AR50_LT +#define VIDC_CTRL_INIT_IDLE_MSG_BMSK_AR50_LT \ + VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_IDLE_MSG_BMSK_AR50_LT +#define VIDC_CTRL_STATUS_PC_READY_AR50_LT \ + VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY_AR50_LT + +#define VIDC_QTBL_INFO_AR50_LT VIDC_CPU_CS_SCIACMDARG1_AR50_LT +#define VIDC_QTBL_ADDR_AR50_LT VIDC_CPU_CS_SCIACMDARG2_AR50_LT +#define VIDC_VERSION_INFO_AR50_LT VIDC_CPU_CS_SCIACMDARG3_AR50_LT + +#define VIDC_SFR_ADDR_AR50_LT VIDC_CPU_CS_SCIBCMD_AR50_LT +#define VIDC_MMAP_ADDR_AR50_LT VIDC_CPU_CS_SCIBCMDARG0_AR50_LT +#define VIDC_UC_REGION_ADDR_AR50_LT VIDC_CPU_CS_SCIBARG1_AR50_LT +#define VIDC_UC_REGION_SIZE_AR50_LT VIDC_CPU_CS_SCIBARG2_AR50_LT + +void __interrupt_init_ar50_lt(struct venus_hfi_device *device, u32 sid) +{ + __write_register(device, VIDC_WRAPPER_INTR_MASK_AR50_LT, + VIDC_WRAPPER_INTR_MASK_A2HVCODEC_BMSK_AR50_LT, sid); +} + +void __setup_ucregion_memory_map_ar50_lt(struct venus_hfi_device *device, u32 sid) +{ + __write_register(device, VIDC_UC_REGION_ADDR_AR50_LT, + (u32)device->iface_q_table.align_device_addr, sid); + __write_register(device, VIDC_UC_REGION_SIZE_AR50_LT, SHARED_QSIZE, sid); + __write_register(device, VIDC_QTBL_ADDR_AR50_LT, + (u32)device->iface_q_table.align_device_addr, sid); + __write_register(device, VIDC_QTBL_INFO_AR50_LT, 0x01, sid); + if (device->sfr.align_device_addr) + __write_register(device, VIDC_SFR_ADDR_AR50_LT, + (u32)device->sfr.align_device_addr, sid); + if (device->qdss.align_device_addr) + __write_register(device, VIDC_MMAP_ADDR_AR50_LT, + (u32)device->qdss.align_device_addr, sid); +} + +void __power_off_ar50_lt(struct venus_hfi_device *device) +{ + if (!device->power_enabled) + return; + + if (!(device->intr_status & VIDC_WRAPPER_INTR_STATUS_A2HWD_BMSK_AR50_LT)) + disable_irq_nosync(device->hal_data->irq); + device->intr_status = 0; + + __disable_unprepare_clks(device); + if (__disable_regulators(device)) + d_vpr_e("Failed to disable regulators\n"); + + if (__unvote_buses(device, DEFAULT_SID)) + d_vpr_e("Failed to unvote for buses\n"); + device->power_enabled = false; +} + +int __prepare_pc_ar50_lt(struct venus_hfi_device *device) +{ + int rc = 0; + u32 wfi_status = 0, idle_status = 0, pc_ready = 0; + u32 ctrl_status = 0; + u32 count = 0, max_tries = 10; + + ctrl_status = __read_register(device, VIDC_CTRL_STATUS_AR50_LT, DEFAULT_SID); + pc_ready = ctrl_status & VIDC_CTRL_STATUS_PC_READY_AR50_LT; + idle_status = ctrl_status & BIT(30); + + if (pc_ready) { + d_vpr_l("Already in pc_ready state\n"); + return 0; + } + + wfi_status = BIT(0) & __read_register(device, + VIDC_WRAPPER_TZ_CPU_STATUS, DEFAULT_SID); + if (!wfi_status || !idle_status) { + d_vpr_e("Skipping PC, wfi status not set\n"); + goto skip_power_off; + } + + rc = __prepare_pc(device); + if (rc) { + d_vpr_e("Failed __prepare_pc %d\n", rc); + goto skip_power_off; + } + + while (count < max_tries) { + wfi_status = BIT(0) & __read_register(device, + VIDC_WRAPPER_TZ_CPU_STATUS, DEFAULT_SID); + ctrl_status = __read_register(device, + VIDC_CTRL_STATUS_AR50_LT, DEFAULT_SID); + pc_ready = ctrl_status & VIDC_CTRL_STATUS_PC_READY_AR50_LT; + if (wfi_status && pc_ready) + break; + usleep_range(150, 250); + count++; + } + + if (count == max_tries) { + d_vpr_e("Skip PC. Core is not in right state\n"); + goto skip_power_off; + } + + return rc; + +skip_power_off: + d_vpr_e("Skip PC, wfi=%#x, idle=%#x, pcr=%#x, ctrl=%#x)\n", + wfi_status, idle_status, pc_ready, ctrl_status); + return -EAGAIN; +} + +void __raise_interrupt_ar50_lt(struct venus_hfi_device *device, u32 sid) +{ + __write_register(device, VIDC_CPU_IC_SOFTINT_AR50_LT, + VIDC_CPU_IC_SOFTINT_H2A_SHFT_AR50_LT, sid); +} + +void __core_clear_interrupt_ar50_lt(struct venus_hfi_device *device) +{ + u32 intr_status = 0, mask = 0; + + if (!device) { + d_vpr_e("%s: NULL device\n", __func__); + return; + } + + intr_status = __read_register(device, VIDC_WRAPPER_INTR_STATUS_AR50_LT, DEFAULT_SID); + mask = (VIDC_WRAPPER_INTR_STATUS_A2H_BMSK_AR50_LT | + VIDC_WRAPPER_INTR_STATUS_A2HWD_BMSK_AR50_LT | + VIDC_CTRL_INIT_IDLE_MSG_BMSK_AR50_LT); + + if (intr_status & mask) { + device->intr_status |= intr_status; + device->reg_count++; + d_vpr_l( + "INTERRUPT for device: %pK: times: %d interrupt_status: %d\n", + device, device->reg_count, intr_status); + } else { + device->spur_count++; + } + + __write_register(device, VIDC_CPU_CS_A2HSOFTINTCLR_AR50_LT, 1, DEFAULT_SID); +} + +int __boot_firmware_ar50_lt(struct venus_hfi_device *device, u32 sid) +{ + int rc = 0; + u32 ctrl_init_val = 0, ctrl_status = 0, count = 0, max_tries = 1000; + + ctrl_init_val = BIT(0); + + __write_register(device, VIDC_CTRL_INIT_AR50_LT, ctrl_init_val, sid); + while (!ctrl_status && count < max_tries) { + ctrl_status = __read_register(device, VIDC_CTRL_STATUS_AR50_LT, sid); + if ((ctrl_status & VIDC_CTRL_ERROR_STATUS__M_AR50_LT) == 0x4) { + s_vpr_e(sid, "invalid setting for UC_REGION\n"); + break; + } + usleep_range(50, 100); + count++; + } + + if (count >= max_tries) { + s_vpr_e(sid, "Error booting up vidc firmware\n"); + rc = -ETIME; + } + + /* Enable interrupt before sending commands to venus */ + __write_register(device, VIDC_CPU_IC_SOFTINT_EN_AR50_LT, 0x1, sid); + return rc; +} diff --git a/techpack/video/msm/vidc/hfi_common.c b/techpack/video/msm/vidc/hfi_common.c new file mode 100644 index 000000000000..74aa73796f39 --- /dev/null +++ b/techpack/video/msm/vidc/hfi_common.c @@ -0,0 +1,4372 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#include "hfi_common.h" + +#define FIRMWARE_SIZE 0X00A00000 +#define REG_ADDR_OFFSET_BITMASK 0x000FFFFF +#define QDSS_IOVA_START 0x80001000 +#define MIN_PAYLOAD_SIZE 3 + +static struct hal_device_data hal_ctxt; +static struct venus_hfi_device venus_hfi_dev; + +struct tzbsp_memprot { + u32 cp_start; + u32 cp_size; + u32 cp_nonpixel_start; + u32 cp_nonpixel_size; +}; + +/* Poll interval in uS */ +#define POLL_INTERVAL_US 50 + +enum tzbsp_video_state { + TZBSP_VIDEO_STATE_SUSPEND = 0, + TZBSP_VIDEO_STATE_RESUME = 1, + TZBSP_VIDEO_STATE_RESTORE_THRESHOLD = 2, +}; + +const struct msm_vidc_bus_data DEFAULT_BUS_VOTE = { + .total_bw_ddr = 0, + .total_bw_llcc = 0, +}; + +/* Less than 50MBps is treated as trivial BW change */ +#define TRIVIAL_BW_THRESHOLD 50000 +#define TRIVIAL_BW_CHANGE(a, b) \ + ((a) > (b) ? (a) - (b) < TRIVIAL_BW_THRESHOLD : \ + (b) - (a) < TRIVIAL_BW_THRESHOLD) + +const int max_packets = 480; /* 16 sessions x 30 packets */ + +static void venus_hfi_pm_handler(struct work_struct *work); +static DECLARE_DELAYED_WORK(venus_hfi_pm_work, venus_hfi_pm_handler); +static inline int __resume(struct venus_hfi_device *device, u32 sid); +static inline int __suspend(struct venus_hfi_device *device); +static int __enable_regulators(struct venus_hfi_device *device, u32 sid); +static inline int __prepare_enable_clks( + struct venus_hfi_device *device, u32 sid); +static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet); +static int __initialize_packetization(struct venus_hfi_device *device); +static struct hal_session *__get_session(struct venus_hfi_device *device, + u32 sid); +static bool __is_session_valid(struct venus_hfi_device *device, + struct hal_session *session, const char *func); +static int __set_clocks(struct venus_hfi_device *device, u32 freq, u32 sid); +static int __iface_cmdq_write(struct venus_hfi_device *device, + void *pkt, u32 sid); +static int __load_fw(struct venus_hfi_device *device); +static void __unload_fw(struct venus_hfi_device *device); +static int __tzbsp_set_video_state(enum tzbsp_video_state state, u32 sid); +static int __enable_subcaches(struct venus_hfi_device *device, u32 sid); +static int __set_subcaches(struct venus_hfi_device *device, u32 sid); +static int __release_subcaches(struct venus_hfi_device *device, u32 sid); +static int __disable_subcaches(struct venus_hfi_device *device, u32 sid); +static int __power_collapse(struct venus_hfi_device *device, bool force); +static int venus_hfi_noc_error_info(void *dev); +static int __set_ubwc_config(struct venus_hfi_device *device); +static bool __watchdog_common(u32 intr_status); +static void __noc_error_info_common(struct venus_hfi_device *device); + +struct venus_hfi_vpu_ops ar50_lite_ops = { + .interrupt_init = __interrupt_init_ar50_lt, + .setup_ucregion_memmap = __setup_ucregion_memory_map_ar50_lt, + .clock_config_on_enable = NULL, + .reset_ahb2axi_bridge = NULL, + .power_off = __power_off_ar50_lt, + .prepare_pc = __prepare_pc_ar50_lt, + .raise_interrupt = __raise_interrupt_ar50_lt, + .watchdog = __watchdog_common, + .noc_error_info = __noc_error_info_common, + .core_clear_interrupt = __core_clear_interrupt_ar50_lt, + .boot_firmware = __boot_firmware_ar50_lt, +}; + +struct venus_hfi_vpu_ops iris2_ops = { + .interrupt_init = __interrupt_init_iris2, + .setup_ucregion_memmap = __setup_ucregion_memory_map_iris2, + .clock_config_on_enable = NULL, + .reset_ahb2axi_bridge = __reset_ahb2axi_bridge_common, + .power_off = __power_off_iris2, + .prepare_pc = __prepare_pc_iris2, + .raise_interrupt = __raise_interrupt_iris2, + .watchdog = __watchdog_iris2, + .noc_error_info = __noc_error_info_iris2, + .core_clear_interrupt = __core_clear_interrupt_iris2, + .boot_firmware = __boot_firmware_iris2, +}; + +/** + * Utility function to enforce some of our assumptions. Spam calls to this + * in hotspots in code to double check some of the assumptions that we hold. + */ + +struct lut const *__lut(int width, int height, int fps) +{ + int frame_size = height * width, c = 0; + + do { + if (LUT[c].frame_size >= frame_size && LUT[c].frame_rate >= fps) + return &LUT[c]; + } while (++c < ARRAY_SIZE(LUT)); + + return &LUT[ARRAY_SIZE(LUT) - 1]; +} + +fp_t __compression_ratio(struct lut const *entry, int bpp) +{ + int c = 0; + + for (c = 0; c < COMPRESSION_RATIO_MAX; ++c) { + if (entry->compression_ratio[c].bpp == bpp) + return entry->compression_ratio[c].ratio; + } + + WARN(true, "Shouldn't be here, LUT possibly corrupted?\n"); + return FP_ZERO; /* impossible */ +} + + +void __dump(struct dump dump[], int len, u32 sid) +{ + int c = 0; + + for (c = 0; c < len; ++c) { + char format_line[128] = "", formatted_line[128] = ""; + + if (dump[c].val == DUMP_HEADER_MAGIC) { + snprintf(formatted_line, sizeof(formatted_line), "%s\n", + dump[c].key); + } else { + bool fp_format = !strcmp(dump[c].format, DUMP_FP_FMT); + + if (!fp_format) { + snprintf(format_line, sizeof(format_line), + " %-35s: %s\n", dump[c].key, + dump[c].format); + snprintf(formatted_line, sizeof(formatted_line), + format_line, dump[c].val); + } else { + size_t integer_part, fractional_part; + + integer_part = fp_int(dump[c].val); + fractional_part = fp_frac(dump[c].val); + snprintf(formatted_line, sizeof(formatted_line), + " %-35s: %zd + %zd/%zd\n", + dump[c].key, integer_part, + fractional_part, + fp_frac_base()); + + + } + } + s_vpr_b(sid, "%s", formatted_line); + } +} + +static inline void __strict_check(struct venus_hfi_device *device) +{ + msm_vidc_res_handle_fatal_hw_error(device->res, + !mutex_is_locked(&device->lock)); +} + +static inline void __set_state(struct venus_hfi_device *device, + enum venus_hfi_state state) +{ + device->state = state; +} + +static inline bool __core_in_valid_state(struct venus_hfi_device *device) +{ + return device->state != VENUS_STATE_DEINIT; +} + +static inline bool is_sys_cache_present(struct venus_hfi_device *device) +{ + return device->res->sys_cache_present; +} + +static void __dump_packet(u8 *packet, u32 sid) +{ + u32 c = 0, packet_size = *(u32 *)packet; + const int row_size = 32; + /* + * row must contain enough for 0xdeadbaad * 8 to be converted into + * "de ad ba ab " * 8 + '\0' + */ + char row[3 * 32]; + + for (c = 0; c * row_size < packet_size; ++c) { + int bytes_to_read = ((c + 1) * row_size > packet_size) ? + packet_size % row_size : row_size; + hex_dump_to_buffer(packet + c * row_size, bytes_to_read, + row_size, 4, row, sizeof(row), false); + s_vpr_t(sid, "%s\n", row); + } +} + +static void __sim_modify_cmd_packet(u8 *packet, struct venus_hfi_device *device) +{ + struct hfi_cmd_sys_session_init_packet *sys_init; + struct hal_session *session = NULL; + u8 i; + phys_addr_t fw_bias = 0; + + sys_init = (struct hfi_cmd_sys_session_init_packet *)packet; + if (!device || !sys_init) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, device, sys_init); + return; + } else if (!device->hal_data->firmware_base + || is_iommu_present(device->res)) { + return; + } + fw_bias = device->hal_data->firmware_base; + + session = __get_session(device, sys_init->sid); + if (!session) { + d_vpr_e("%s: Invalid session id\n", __func__); + return; + } + + switch (sys_init->packet_type) { + case HFI_CMD_SESSION_EMPTY_BUFFER: + if (session->is_decoder) { + struct hfi_cmd_session_empty_buffer_compressed_packet + *pkt = (struct + hfi_cmd_session_empty_buffer_compressed_packet + *) packet; + pkt->packet_buffer -= fw_bias; + } else { + struct + hfi_cmd_session_empty_buffer_uncompressed_plane0_packet + *pkt = (struct + hfi_cmd_session_empty_buffer_uncompressed_plane0_packet + *) packet; + pkt->packet_buffer -= fw_bias; + } + break; + case HFI_CMD_SESSION_FILL_BUFFER: + { + struct hfi_cmd_session_fill_buffer_packet *pkt = + (struct hfi_cmd_session_fill_buffer_packet *)packet; + pkt->packet_buffer -= fw_bias; + break; + } + case HFI_CMD_SESSION_SET_BUFFERS: + { + struct hfi_cmd_session_set_buffers_packet *pkt = + (struct hfi_cmd_session_set_buffers_packet *)packet; + if (pkt->buffer_type == HFI_BUFFER_OUTPUT || + pkt->buffer_type == HFI_BUFFER_OUTPUT2) { + struct hfi_buffer_info *buff; + + buff = (struct hfi_buffer_info *) pkt->rg_buffer_info; + buff->buffer_addr -= fw_bias; + if (buff->extra_data_addr >= fw_bias) + buff->extra_data_addr -= fw_bias; + } else { + for (i = 0; i < pkt->num_buffers; i++) + pkt->rg_buffer_info[i] -= fw_bias; + } + break; + } + case HFI_CMD_SESSION_RELEASE_BUFFERS: + { + struct hfi_cmd_session_release_buffer_packet *pkt = + (struct hfi_cmd_session_release_buffer_packet *)packet; + + if (pkt->buffer_type == HFI_BUFFER_OUTPUT || + pkt->buffer_type == HFI_BUFFER_OUTPUT2) { + struct hfi_buffer_info *buff; + + buff = (struct hfi_buffer_info *) pkt->rg_buffer_info; + buff->buffer_addr -= fw_bias; + buff->extra_data_addr -= fw_bias; + } else { + for (i = 0; i < pkt->num_buffers; i++) + pkt->rg_buffer_info[i] -= fw_bias; + } + break; + } + default: + break; + } +} + +static int __session_pause(struct venus_hfi_device *device, + struct hal_session *session) +{ + int rc = 0; + + if (!__is_session_valid(device, session, __func__)) + return -EINVAL; + + /* ignore if session paused already */ + if (session->flags & SESSION_PAUSE) + return 0; + + session->flags |= SESSION_PAUSE; + s_vpr_h(session->sid, "%s: session paused\n", __func__); + + return rc; +} + +static int __session_resume(struct venus_hfi_device *device, + struct hal_session *session) +{ + int rc = 0; + + if (!__is_session_valid(device, session, __func__)) + return -EINVAL; + + /* ignore if session already resumed */ + if (!(session->flags & SESSION_PAUSE)) + return 0; + + session->flags &= ~SESSION_PAUSE; + s_vpr_h(session->sid, "%s: session resumed\n", __func__); + + rc = __resume(device, session->sid); + if (rc) { + s_vpr_e(session->sid, "%s: resume failed\n", __func__); + goto exit; + } + +exit: + return rc; +} + +static int venus_hfi_session_pause(void *sess) +{ + int rc; + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + + mutex_lock(&device->lock); + rc = __session_pause(device, session); + mutex_unlock(&device->lock); + + return rc; +} + +static int venus_hfi_session_resume(void *sess) +{ + int rc; + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + + mutex_lock(&device->lock); + rc = __session_resume(device, session); + mutex_unlock(&device->lock); + + return rc; +} + +static int __acquire_regulator(struct regulator_info *rinfo, + struct venus_hfi_device *device, u32 sid) +{ + int rc = 0; + + if (rinfo->has_hw_power_collapse) { + rc = regulator_set_mode(rinfo->regulator, + REGULATOR_MODE_NORMAL); + if (rc) { + /* + * This is somewhat fatal, but nothing we can do + * about it. We can't disable the regulator w/o + * getting it back under s/w control + */ + s_vpr_e(sid, + "Failed to acquire regulator control: %s\n", + rinfo->name); + } else { + + s_vpr_h(sid, "Acquire regulator control from HW: %s\n", + rinfo->name); + + } + } + + if (!regulator_is_enabled(rinfo->regulator)) { + s_vpr_e(sid, "Regulator is not enabled %s\n", + rinfo->name); + msm_vidc_res_handle_fatal_hw_error(device->res, true); + } + + return rc; +} + +static int __hand_off_regulator(struct regulator_info *rinfo, u32 sid) +{ + int rc = 0; + + if (rinfo->has_hw_power_collapse) { + rc = regulator_set_mode(rinfo->regulator, + REGULATOR_MODE_FAST); + if (rc) { + s_vpr_e(sid, + "Failed to hand off regulator control: %s\n", + rinfo->name); + } else { + s_vpr_h(sid, "Hand off regulator control to HW: %s\n", + rinfo->name); + } + } + + return rc; +} + +static int __hand_off_regulators(struct venus_hfi_device *device, u32 sid) +{ + struct regulator_info *rinfo; + int rc = 0, c = 0; + + venus_hfi_for_each_regulator(device, rinfo) { + rc = __hand_off_regulator(rinfo, sid); + /* + * If one regulator hand off failed, driver should take + * the control for other regulators back. + */ + if (rc) + goto err_reg_handoff_failed; + c++; + } + + return rc; +err_reg_handoff_failed: + venus_hfi_for_each_regulator_reverse_continue(device, rinfo, c) + __acquire_regulator(rinfo, device, sid); + + return rc; +} + +static int __write_queue(struct vidc_iface_q_info *qinfo, u8 *packet, + bool *rx_req_is_set, u32 sid) +{ + struct hfi_queue_header *queue; + u32 packet_size_in_words, new_write_idx; + u32 empty_space, read_idx, write_idx; + u32 *write_ptr; + + if (!qinfo || !packet) { + s_vpr_e(sid, "%s: invalid params %pK %pK\n", + __func__, qinfo, packet); + return -EINVAL; + } else if (!qinfo->q_array.align_virtual_addr) { + s_vpr_e(sid, "Queues have already been freed\n"); + return -EINVAL; + } + + queue = (struct hfi_queue_header *) qinfo->q_hdr; + if (!queue) { + s_vpr_e(sid, "queue not present\n"); + return -ENOENT; + } + + if (msm_vidc_debug & VIDC_PKT) { + s_vpr_t(sid, "%s: %pK\n", __func__, qinfo); + __dump_packet(packet, sid); + } + + packet_size_in_words = (*(u32 *)packet) >> 2; + if (!packet_size_in_words || packet_size_in_words > + qinfo->q_array.mem_size>>2) { + s_vpr_e(sid, "Invalid packet size\n"); + return -ENODATA; + } + + read_idx = queue->qhdr_read_idx; + write_idx = queue->qhdr_write_idx; + + empty_space = (write_idx >= read_idx) ? + ((qinfo->q_array.mem_size>>2) - (write_idx - read_idx)) : + (read_idx - write_idx); + if (empty_space <= packet_size_in_words) { + queue->qhdr_tx_req = 1; + s_vpr_e(sid, "Insufficient size (%d) to write (%d)\n", + empty_space, packet_size_in_words); + return -ENOTEMPTY; + } + + queue->qhdr_tx_req = 0; + + new_write_idx = write_idx + packet_size_in_words; + write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) + + (write_idx << 2)); + if (write_ptr < (u32 *)qinfo->q_array.align_virtual_addr || + write_ptr > (u32 *)(qinfo->q_array.align_virtual_addr + + qinfo->q_array.mem_size)) { + s_vpr_e(sid, "Invalid write index"); + return -ENODATA; + } + + if (new_write_idx < (qinfo->q_array.mem_size >> 2)) { + memcpy(write_ptr, packet, packet_size_in_words << 2); + } else { + new_write_idx -= qinfo->q_array.mem_size >> 2; + memcpy(write_ptr, packet, (packet_size_in_words - + new_write_idx) << 2); + memcpy((void *)qinfo->q_array.align_virtual_addr, + packet + ((packet_size_in_words - new_write_idx) << 2), + new_write_idx << 2); + } + + /* + * Memory barrier to make sure packet is written before updating the + * write index + */ + mb(); + queue->qhdr_write_idx = new_write_idx; + if (rx_req_is_set) + *rx_req_is_set = queue->qhdr_rx_req == 1; + /* + * Memory barrier to make sure write index is updated before an + * interrupt is raised on venus. + */ + mb(); + return 0; +} + +static void __hal_sim_modify_msg_packet(u8 *packet, + struct venus_hfi_device *device) +{ + struct hfi_msg_sys_session_init_done_packet *init_done; + struct hal_session *session = NULL; + phys_addr_t fw_bias = 0; + + if (!device || !packet) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, device, packet); + return; + } else if (!device->hal_data->firmware_base + || is_iommu_present(device->res)) { + return; + } + + fw_bias = device->hal_data->firmware_base; + init_done = (struct hfi_msg_sys_session_init_done_packet *)packet; + session = __get_session(device, init_done->sid); + if (!session) { + d_vpr_e("%s: Invalid session id: %x\n", + __func__, init_done->sid); + return; + } + + switch (init_done->packet_type) { + case HFI_MSG_SESSION_FILL_BUFFER_DONE: + if (session->is_decoder) { + struct + hfi_msg_session_fbd_uncompressed_plane0_packet + *pkt_uc = (struct + hfi_msg_session_fbd_uncompressed_plane0_packet + *) packet; + pkt_uc->packet_buffer += fw_bias; + } else { + struct + hfi_msg_session_fill_buffer_done_compressed_packet + *pkt = (struct + hfi_msg_session_fill_buffer_done_compressed_packet + *) packet; + pkt->packet_buffer += fw_bias; + } + break; + case HFI_MSG_SESSION_EMPTY_BUFFER_DONE: + { + struct hfi_msg_session_empty_buffer_done_packet *pkt = + (struct hfi_msg_session_empty_buffer_done_packet *)packet; + pkt->packet_buffer += fw_bias; + break; + } + default: + break; + } +} + +static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet, + u32 *pb_tx_req_is_set) +{ + struct hfi_queue_header *queue; + u32 packet_size_in_words, new_read_idx; + u32 *read_ptr; + u32 receive_request = 0; + u32 read_idx, write_idx; + int rc = 0; + u32 sid; + + if (!qinfo || !packet || !pb_tx_req_is_set) { + d_vpr_e("%s: invalid params %pK %pK %pK\n", + __func__, qinfo, packet, pb_tx_req_is_set); + return -EINVAL; + } else if (!qinfo->q_array.align_virtual_addr) { + d_vpr_e("Queues have already been freed\n"); + return -EINVAL; + } + + /* + * Memory barrier to make sure data is valid before + *reading it + */ + mb(); + queue = (struct hfi_queue_header *) qinfo->q_hdr; + + if (!queue) { + d_vpr_e("Queue memory is not allocated\n"); + return -ENOMEM; + } + + /* + * Do not set receive request for debug queue, if set, + * Venus generates interrupt for debug messages even + * when there is no response message available. + * In general debug queue will not become full as it + * is being emptied out for every interrupt from Venus. + * Venus will anyway generates interrupt if it is full. + */ + if (queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_MSG_Q) + receive_request = 1; + + read_idx = queue->qhdr_read_idx; + write_idx = queue->qhdr_write_idx; + + if (read_idx == write_idx) { + queue->qhdr_rx_req = receive_request; + /* + * mb() to ensure qhdr is updated in main memory + * so that venus reads the updated header values + */ + mb(); + *pb_tx_req_is_set = 0; + d_vpr_l( + "%s queue is empty, rx_req = %u, tx_req = %u, read_idx = %u\n", + receive_request ? "message" : "debug", + queue->qhdr_rx_req, queue->qhdr_tx_req, + queue->qhdr_read_idx); + return -ENODATA; + } + + read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) + + (read_idx << 2)); + if (read_ptr < (u32 *)qinfo->q_array.align_virtual_addr || + read_ptr > (u32 *)(qinfo->q_array.align_virtual_addr + + qinfo->q_array.mem_size - sizeof(*read_ptr))) { + d_vpr_e("Invalid read index\n"); + return -ENODATA; + } + + packet_size_in_words = (*read_ptr) >> 2; + if (!packet_size_in_words) { + d_vpr_e("Zero packet size\n"); + return -ENODATA; + } + + new_read_idx = read_idx + packet_size_in_words; + if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE) && + read_idx <= (qinfo->q_array.mem_size >> 2)) { + if (new_read_idx < (qinfo->q_array.mem_size >> 2)) { + memcpy(packet, read_ptr, + packet_size_in_words << 2); + } else { + new_read_idx -= (qinfo->q_array.mem_size >> 2); + memcpy(packet, read_ptr, + (packet_size_in_words - new_read_idx) << 2); + memcpy(packet + ((packet_size_in_words - + new_read_idx) << 2), + (u8 *)qinfo->q_array.align_virtual_addr, + new_read_idx << 2); + } + } else { + d_vpr_e("BAD packet received, read_idx: %#x, pkt_size: %d\n", + read_idx, packet_size_in_words << 2); + d_vpr_e("Dropping this packet\n"); + new_read_idx = write_idx; + rc = -ENODATA; + } + + if (new_read_idx != write_idx) + queue->qhdr_rx_req = 0; + else + queue->qhdr_rx_req = receive_request; + + queue->qhdr_read_idx = new_read_idx; + /* + * mb() to ensure qhdr is updated in main memory + * so that venus reads the updated header values + */ + mb(); + + *pb_tx_req_is_set = (queue->qhdr_tx_req == 1) ? 1 : 0; + + if ((msm_vidc_debug & VIDC_PKT) && + !(queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q)) { + sid = *((u32 *)packet + 2); + s_vpr_t(sid, "%s: %pK\n", __func__, qinfo); + __dump_packet(packet, sid); + } + + return rc; +} + +static int __smem_alloc(struct venus_hfi_device *dev, + struct vidc_mem_addr *mem, u32 size, u32 align, + u32 flags, u32 usage) +{ + struct msm_smem *alloc = &mem->mem_data; + int rc = 0; + + if (!dev || !mem || !size) { + d_vpr_e("%s: invalid params %pK %pK %pK\n", + __func__, dev, mem, size); + return -EINVAL; + } + + d_vpr_h("start to alloc size: %d, flags: %d\n", size, flags); + rc = msm_smem_alloc( + size, align, flags, usage, 1, (void *)dev->res, + MSM_VIDC_UNKNOWN, alloc, DEFAULT_SID); + if (rc) { + d_vpr_e("%s: alloc failed\n", __func__); + rc = -ENOMEM; + goto fail_smem_alloc; + } + + d_vpr_h("%s: ptr = %pK, size = %d\n", __func__, + alloc->kvaddr, size); + + mem->mem_size = alloc->size; + mem->align_virtual_addr = alloc->kvaddr; + mem->align_device_addr = alloc->device_addr; + + return rc; +fail_smem_alloc: + return rc; +} + +static void __smem_free(struct venus_hfi_device *dev, struct msm_smem *mem) +{ + if (!dev || !mem) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, dev, mem); + return; + } + + msm_smem_free(mem, DEFAULT_SID); +} + +void __write_register(struct venus_hfi_device *device, + u32 reg, u32 value, u32 sid) +{ + u32 hwiosymaddr = reg; + u8 *base_addr; + + if (!device) { + s_vpr_e(sid, "%s: invalid params\n", __func__); + return; + } + + __strict_check(device); + + if (!device->power_enabled) { + s_vpr_e(sid, "HFI Write register failed : Power is OFF\n"); + msm_vidc_res_handle_fatal_hw_error(device->res, true); + return; + } + + base_addr = device->hal_data->register_base; + s_vpr_l(sid, "Base addr: %pK, writing to: %#x, Value: %#x...\n", + base_addr, hwiosymaddr, value); + base_addr += hwiosymaddr; + writel_relaxed(value, base_addr); + + /* + * Memory barrier to make sure value is written into the register. + */ + wmb(); +} + +/* + * Argument mask is used to specify which bits to update. In case mask is 0x11, + * only bits 0 & 4 will be updated with corresponding bits from value. To update + * entire register with value, set mask = 0xFFFFFFFF. + */ +void __write_register_masked(struct venus_hfi_device *device, + u32 reg, u32 value, u32 mask, u32 sid) +{ + u32 prev_val, new_val; + u8 *base_addr; + + if (!device) { + s_vpr_e(sid, "%s: invalid params\n", __func__); + return; + } + + __strict_check(device); + + if (!device->power_enabled) { + s_vpr_e(sid, "%s: register write failed, power is off\n", + __func__); + msm_vidc_res_handle_fatal_hw_error(device->res, true); + return; + } + + base_addr = device->hal_data->register_base; + base_addr += reg; + + prev_val = readl_relaxed(base_addr); + /* + * Memory barrier to ensure register read is correct + */ + rmb(); + + new_val = (prev_val & ~mask) | (value & mask); + s_vpr_l(sid, + "Base addr: %pK, writing to: %#x, previous-value: %#x, value: %#x, mask: %#x, new-value: %#x...\n", + base_addr, reg, prev_val, value, mask, new_val); + writel_relaxed(new_val, base_addr); + /* + * Memory barrier to make sure value is written into the register. + */ + wmb(); +} + +int __read_register(struct venus_hfi_device *device, u32 reg, u32 sid) +{ + int rc = 0; + u8 *base_addr; + + if (!device) { + s_vpr_e(sid, "%s: invalid params\n", __func__); + return -EINVAL; + } + + __strict_check(device); + + if (!device->power_enabled) { + s_vpr_e(sid, "HFI Read register failed : Power is OFF\n"); + msm_vidc_res_handle_fatal_hw_error(device->res, true); + return -EINVAL; + } + + base_addr = device->hal_data->register_base; + + rc = readl_relaxed(base_addr + reg); + /* + * Memory barrier to make sure value is read correctly from the + * register. + */ + rmb(); + s_vpr_l(sid, "Base addr: %pK, read from: %#x, value: %#x...\n", + base_addr, reg, rc); + + return rc; +} + +static void __set_registers(struct venus_hfi_device *device, u32 sid) +{ + struct reg_set *reg_set; + int i; + + if (!device->res) { + s_vpr_e(sid, "device resources null, cannot set registers\n"); + return; + } + + reg_set = &device->res->reg_set; + for (i = 0; i < reg_set->count; i++) { + __write_register_masked(device, reg_set->reg_tbl[i].reg, + reg_set->reg_tbl[i].value, + reg_set->reg_tbl[i].mask, sid); + } +} + +static int __vote_bandwidth(struct bus_info *bus, unsigned long ab_kbps, + unsigned long ib_kbps, u32 sid) +{ + int rc = 0; + s_vpr_p(sid, "Voting bus %s to ab %llu ib %llu kbps\n", bus->name, ab_kbps, ib_kbps); + rc = icc_set_bw(bus->path, ab_kbps, ib_kbps); + if (rc) + s_vpr_e(sid, "Failed voting bus %s to ab %llu ib %llu, rc=%d\n", + bus->name, ab_kbps, ib_kbps, rc); + + return rc; +} + +int __unvote_buses(struct venus_hfi_device *device, u32 sid) +{ + int rc = 0; + struct bus_info *bus = NULL; + + device->bus_vote = DEFAULT_BUS_VOTE; + + venus_hfi_for_each_bus(device, bus) { + rc = __vote_bandwidth(bus, 0, 0, sid); + if (rc) + goto err_unknown_device; + } + +err_unknown_device: + return rc; +} + +static int __vote_buses(struct venus_hfi_device *device, + unsigned long bw_ddr, unsigned long bw_llcc, u32 sid) +{ + int rc = 0; + struct bus_info *bus = NULL; + unsigned long ab_kbps = 0, ib_kbps = 0, bw_prev = 0; + enum vidc_bus_type type; + + venus_hfi_for_each_bus(device, bus) { + if (bus && bus->path) { + type = get_type_frm_name(bus->name); + + if (type == DDR) { + ab_kbps = bw_ddr; + bw_prev = device->bus_vote.total_bw_ddr; + } else if (type == LLCC) { + ab_kbps = bw_llcc; + bw_prev = device->bus_vote.total_bw_llcc; + } else { + ab_kbps = bus->range[1]; + bw_prev = device->bus_vote.total_bw_ddr ? + ab_kbps : 0; + } + + /* ensure freq is within limits */ + ab_kbps = clamp_t(typeof(ab_kbps), ab_kbps, + bus->range[0], bus->range[1]); + + if (TRIVIAL_BW_CHANGE(ab_kbps, bw_prev) && bw_prev) { + s_vpr_l(sid, "Skip voting bus %s to %llu bps", + bus->name, ab_kbps * 1000); + continue; + } + + if (device->res->vpu_ver == VPU_VERSION_AR50_LITE) + ib_kbps = 2 * ab_kbps; + + rc = __vote_bandwidth(bus, ab_kbps, ib_kbps, sid); + + if (type == DDR) + device->bus_vote.total_bw_ddr = ab_kbps; + else if (type == LLCC) + device->bus_vote.total_bw_llcc = ab_kbps; + } else { + s_vpr_e(sid, "No BUS to Vote\n"); + } + } + + return rc; +} + +static int venus_hfi_vote_buses(void *dev, unsigned long bw_ddr, + unsigned long bw_llcc, u32 sid) +{ + int rc = 0; + struct venus_hfi_device *device = dev; + + if (!device) + return -EINVAL; + + mutex_lock(&device->lock); + rc = __vote_buses(device, bw_ddr, bw_llcc, sid); + mutex_unlock(&device->lock); + + return rc; +} +static int __core_set_resource(struct venus_hfi_device *device, + struct vidc_resource_hdr *resource_hdr, void *resource_value) +{ + struct hfi_cmd_sys_set_resource_packet *pkt; + u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE]; + int rc = 0; + + if (!device || !resource_hdr || !resource_value) { + d_vpr_e("%s: invalid params %pK %pK %pK\n", __func__, + device, resource_hdr, resource_value); + return -EINVAL; + } + + pkt = (struct hfi_cmd_sys_set_resource_packet *) packet; + + rc = call_hfi_pkt_op(device, sys_set_resource, + pkt, resource_hdr, resource_value); + if (rc) { + d_vpr_e("set_res: failed to create packet\n"); + goto err_create_pkt; + } + + rc = __iface_cmdq_write(device, pkt, DEFAULT_SID); + if (rc) + rc = -ENOTEMPTY; + +err_create_pkt: + return rc; +} + +static int __core_release_resource(struct venus_hfi_device *device, + struct vidc_resource_hdr *resource_hdr) +{ + struct hfi_cmd_sys_release_resource_packet *pkt; + u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE]; + int rc = 0; + + if (!device || !resource_hdr) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, device, resource_hdr); + return -EINVAL; + } + + pkt = (struct hfi_cmd_sys_release_resource_packet *) packet; + + rc = call_hfi_pkt_op(device, sys_release_resource, + pkt, resource_hdr); + + if (rc) { + d_vpr_e("release_res: failed to create packet\n"); + goto err_create_pkt; + } + + rc = __iface_cmdq_write(device, pkt, DEFAULT_SID); + if (rc) + rc = -ENOTEMPTY; + +err_create_pkt: + return rc; +} + +static int __tzbsp_set_video_state(enum tzbsp_video_state state, u32 sid) +{ + int tzbsp_rsp = qcom_scm_set_remote_state(state, 0); + + s_vpr_l(sid, "Set state %d, resp %d\n", state, tzbsp_rsp); + if (tzbsp_rsp) { + s_vpr_e(sid, "Failed to set video core state to suspend: %d\n", + tzbsp_rsp); + return -EINVAL; + } + + return 0; +} + +static int venus_hfi_suspend(void *dev) +{ + int rc = 0; + struct venus_hfi_device *device = (struct venus_hfi_device *) dev; + + if (!device) { + d_vpr_e("%s: invalid device\n", __func__); + return -EINVAL; + } else if (!device->res->sw_power_collapsible) { + return -ENOTSUPP; + } + + d_vpr_h("Suspending Venus\n"); + mutex_lock(&device->lock); + rc = __power_collapse(device, true); + if (rc) { + d_vpr_e("%s: Venus is busy\n", __func__); + rc = -EBUSY; + } + mutex_unlock(&device->lock); + + /* Cancel pending delayed works if any */ + if (!rc) + cancel_delayed_work(&venus_hfi_pm_work); + + return rc; +} + +static int venus_hfi_flush_debug_queue(void *dev) +{ + int rc = 0; + struct venus_hfi_device *device = (struct venus_hfi_device *) dev; + + if (!device) { + d_vpr_e("%s: invalid device\n", __func__); + return -EINVAL; + } + + mutex_lock(&device->lock); + if (!device->power_enabled) { + d_vpr_e("%s: venus power off\n", __func__); + rc = -EINVAL; + goto exit; + } + __flush_debug_queue(device, NULL); +exit: + mutex_unlock(&device->lock); + return rc; +} + +static int __set_clk_rate(struct venus_hfi_device *device, + struct clock_info *cl, u64 rate, u32 sid) +{ + int rc = 0; + u64 threshold_freq = device->res->clk_freq_threshold; + struct cx_ipeak_client *ipeak = device->res->cx_ipeak_context; + struct clk *clk = cl->clk; + + if (ipeak && device->clk_freq < threshold_freq && rate >= threshold_freq) { + rc = cx_ipeak_update(ipeak, true); + if (rc) { + s_vpr_e(sid, "%s: cx_ipeak_update failed!\n", __func__); + return rc; + } + s_vpr_p(sid, + "cx_ipeak_update: up, clk freq = %lu rate = %lu threshold_freq = %lu\n", + device->clk_freq, rate, threshold_freq); + } + + rc = clk_set_rate(clk, rate); + if (rc) { + s_vpr_e(sid, + "%s: Failed to set clock rate %llu %s: %d\n", + __func__, rate, cl->name, rc); + return rc; + } + + if (ipeak && device->clk_freq >= threshold_freq && rate < threshold_freq) { + rc = cx_ipeak_update(ipeak, false); + if (rc) { + s_vpr_e(sid, + "cx_ipeak_update failed! ipeak %pK\n", ipeak); + device->clk_freq = rate; + return rc; + } + s_vpr_p(sid, + "cx_ipeak_update: up, clk freq = %lu rate = %lu threshold_freq = %lu\n", + device->clk_freq, rate, threshold_freq); + } + + device->clk_freq = rate; + + return rc; +} + +static int __set_clocks(struct venus_hfi_device *device, u32 freq, u32 sid) +{ + struct clock_info *cl; + int rc = 0; + + /* bail early if requested clk_freq is not changed */ + if (freq == device->clk_freq) + return 0; + + venus_hfi_for_each_clock(device, cl) { + if (cl->has_scaling) {/* has_scaling */ + rc = __set_clk_rate(device, cl, freq, sid); + if (rc) + return rc; + + trace_msm_vidc_perf_clock_scale(cl->name, freq); + s_vpr_p(sid, "Scaling clock %s to %u\n", + cl->name, freq); + } + } + + return 0; +} + +static int venus_hfi_scale_clocks(void *dev, u32 freq, u32 sid) +{ + int rc = 0; + struct venus_hfi_device *device = dev; + + if (!device) { + s_vpr_e(sid, "Invalid args: %pK\n", device); + return -EINVAL; + } + + mutex_lock(&device->lock); + + if (__resume(device, sid)) { + s_vpr_e(sid, "Resume from power collapse failed\n"); + rc = -ENODEV; + goto exit; + } + + rc = __set_clocks(device, freq, sid); +exit: + mutex_unlock(&device->lock); + + return rc; +} + +static int __scale_clocks(struct venus_hfi_device *device, u32 sid) +{ + int rc = 0; + struct allowed_clock_rates_table *allowed_clks_tbl = NULL; + u32 rate = 0; + + allowed_clks_tbl = device->res->allowed_clks_tbl; + rate = device->clk_freq ? device->clk_freq : + allowed_clks_tbl[0].clock_rate; + + rc = __set_clocks(device, rate, sid); + return rc; +} + +/* Writes into cmdq without raising an interrupt */ +static int __iface_cmdq_write_relaxed(struct venus_hfi_device *device, + void *pkt, bool *requires_interrupt, u32 sid) +{ + struct vidc_iface_q_info *q_info; + struct vidc_hal_cmd_pkt_hdr *cmd_packet; + int result = -E2BIG; + + if (!device || !pkt) { + s_vpr_e(sid, "%s: invalid params %pK %pK\n", + __func__, device, pkt); + return -EINVAL; + } + + __strict_check(device); + + if (!__core_in_valid_state(device)) { + s_vpr_e(sid, "%s: fw not in init state\n", __func__); + result = -EINVAL; + goto err_q_null; + } + + cmd_packet = (struct vidc_hal_cmd_pkt_hdr *)pkt; + device->last_packet_type = cmd_packet->packet_type; + + q_info = &device->iface_queues[VIDC_IFACEQ_CMDQ_IDX]; + if (!q_info) { + s_vpr_e(sid, "cannot write to shared Q's\n"); + goto err_q_null; + } + + if (!q_info->q_array.align_virtual_addr) { + s_vpr_e(sid, "cannot write to shared CMD Q's\n"); + result = -ENODATA; + goto err_q_null; + } + + __sim_modify_cmd_packet((u8 *)pkt, device); + if (__resume(device, sid)) { + s_vpr_e(sid, "%s: Power on failed\n", __func__); + goto err_q_write; + } + + if (!__write_queue(q_info, (u8 *)pkt, requires_interrupt, sid)) { + if (device->res->sw_power_collapsible) { + cancel_delayed_work(&venus_hfi_pm_work); + if (!queue_delayed_work(device->venus_pm_workq, + &venus_hfi_pm_work, + msecs_to_jiffies( + device->res->msm_vidc_pwr_collapse_delay))) { + s_vpr_l(sid, "PM work already scheduled\n"); + } + } + + result = 0; + } else { + s_vpr_e(sid, "__iface_cmdq_write: queue full\n"); + } + +err_q_write: +err_q_null: + return result; +} + +static int __iface_cmdq_write(struct venus_hfi_device *device, + void *pkt, u32 sid) +{ + bool needs_interrupt = false; + int rc = __iface_cmdq_write_relaxed(device, pkt, &needs_interrupt, sid); + + if (!rc && needs_interrupt) + call_venus_op(device, raise_interrupt, device, sid); + + return rc; +} + +static int __iface_msgq_read(struct venus_hfi_device *device, void *pkt) +{ + u32 tx_req_is_set = 0; + int rc = 0; + struct vidc_iface_q_info *q_info; + + if (!pkt) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + __strict_check(device); + + if (!__core_in_valid_state(device)) { + d_vpr_e("%s: fw not in init state\n", __func__); + rc = -EINVAL; + goto read_error_null; + } + + q_info = &device->iface_queues[VIDC_IFACEQ_MSGQ_IDX]; + if (!q_info->q_array.align_virtual_addr) { + d_vpr_e("cannot read from shared MSG Q's\n"); + rc = -ENODATA; + goto read_error_null; + } + + if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) { + __hal_sim_modify_msg_packet((u8 *)pkt, device); + if (tx_req_is_set) + call_venus_op(device, raise_interrupt, device, + DEFAULT_SID); + rc = 0; + } else + rc = -ENODATA; + +read_error_null: + return rc; +} + +static int __iface_dbgq_read(struct venus_hfi_device *device, void *pkt) +{ + u32 tx_req_is_set = 0; + int rc = 0; + struct vidc_iface_q_info *q_info; + + if (!pkt) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + __strict_check(device); + + q_info = &device->iface_queues[VIDC_IFACEQ_DBGQ_IDX]; + if (!q_info->q_array.align_virtual_addr) { + d_vpr_e("cannot read from shared DBG Q's\n"); + rc = -ENODATA; + goto dbg_error_null; + } + + if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) { + if (tx_req_is_set) + call_venus_op(device, raise_interrupt, device, + DEFAULT_SID); + rc = 0; + } else + rc = -ENODATA; + +dbg_error_null: + return rc; +} + +static void __set_queue_hdr_defaults(struct hfi_queue_header *q_hdr) +{ + q_hdr->qhdr_status = 0x1; + q_hdr->qhdr_type = VIDC_IFACEQ_DFLT_QHDR; + q_hdr->qhdr_q_size = VIDC_IFACEQ_QUEUE_SIZE / 4; + q_hdr->qhdr_pkt_size = 0; + q_hdr->qhdr_rx_wm = 0x1; + q_hdr->qhdr_tx_wm = 0x1; + q_hdr->qhdr_rx_req = 0x1; + q_hdr->qhdr_tx_req = 0x0; + q_hdr->qhdr_rx_irq_status = 0x0; + q_hdr->qhdr_tx_irq_status = 0x0; + q_hdr->qhdr_read_idx = 0x0; + q_hdr->qhdr_write_idx = 0x0; +} + +static void __interface_queues_release(struct venus_hfi_device *device) +{ + int i; + struct hfi_mem_map_table *qdss; + struct hfi_mem_map *mem_map; + int num_entries = device->res->qdss_addr_set.count; + unsigned long mem_map_table_base_addr; + struct context_bank_info *cb; + + if (device->qdss.align_virtual_addr) { + qdss = (struct hfi_mem_map_table *) + device->qdss.align_virtual_addr; + qdss->mem_map_num_entries = num_entries; + mem_map_table_base_addr = + device->qdss.align_device_addr + + sizeof(struct hfi_mem_map_table); + qdss->mem_map_table_base_addr = + (u32)mem_map_table_base_addr; + if ((unsigned long)qdss->mem_map_table_base_addr != + mem_map_table_base_addr) { + d_vpr_e("Invalid mem_map_table_base_addr %#lx", + mem_map_table_base_addr); + } + + mem_map = (struct hfi_mem_map *)(qdss + 1); + cb = msm_smem_get_context_bank(MSM_VIDC_UNKNOWN, + false, device->res, HAL_BUFFER_INTERNAL_CMD_QUEUE, + DEFAULT_SID); + + for (i = 0; cb && i < num_entries; i++) { + iommu_unmap(cb->domain, + mem_map[i].virtual_addr, + mem_map[i].size); + } + + __smem_free(device, &device->qdss.mem_data); + } + + __smem_free(device, &device->iface_q_table.mem_data); + __smem_free(device, &device->sfr.mem_data); + + for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) { + device->iface_queues[i].q_hdr = NULL; + device->iface_queues[i].q_array.align_virtual_addr = NULL; + device->iface_queues[i].q_array.align_device_addr = 0; + } + + device->iface_q_table.align_virtual_addr = NULL; + device->iface_q_table.align_device_addr = 0; + + device->qdss.align_virtual_addr = NULL; + device->qdss.align_device_addr = 0; + + device->sfr.align_virtual_addr = NULL; + device->sfr.align_device_addr = 0; + + device->mem_addr.align_virtual_addr = NULL; + device->mem_addr.align_device_addr = 0; + +} + +static int __get_qdss_iommu_virtual_addr(struct venus_hfi_device *dev, + struct hfi_mem_map *mem_map, struct iommu_domain *domain) +{ + int i; + int rc = 0; + dma_addr_t iova = QDSS_IOVA_START; + int num_entries = dev->res->qdss_addr_set.count; + struct addr_range *qdss_addr_tbl = dev->res->qdss_addr_set.addr_tbl; + + if (!num_entries) + return -ENODATA; + + for (i = 0; i < num_entries; i++) { + if (domain) { + rc = iommu_map(domain, iova, + qdss_addr_tbl[i].start, + qdss_addr_tbl[i].size, + IOMMU_READ | IOMMU_WRITE); + + if (rc) { + d_vpr_e( + "IOMMU QDSS mapping failed for addr %#x\n", + qdss_addr_tbl[i].start); + rc = -ENOMEM; + break; + } + } else { + iova = qdss_addr_tbl[i].start; + } + + mem_map[i].virtual_addr = (u32)iova; + mem_map[i].physical_addr = qdss_addr_tbl[i].start; + mem_map[i].size = qdss_addr_tbl[i].size; + mem_map[i].attr = 0x0; + + iova += mem_map[i].size; + } + + if (i < num_entries) { + d_vpr_e("QDSS mapping failed, Freeing other entries %d\n", i); + + for (--i; domain && i >= 0; i--) { + iommu_unmap(domain, + mem_map[i].virtual_addr, + mem_map[i].size); + } + } + + return rc; +} + +static int __interface_queues_init(struct venus_hfi_device *dev) +{ + struct hfi_queue_table_header *q_tbl_hdr; + struct hfi_queue_header *q_hdr; + u32 i; + int rc = 0; + struct hfi_mem_map_table *qdss; + struct hfi_mem_map *mem_map; + struct vidc_iface_q_info *iface_q; + struct hfi_sfr_struct *vsfr; + struct vidc_mem_addr *mem_addr; + int offset = 0; + int num_entries = dev->res->qdss_addr_set.count; + phys_addr_t fw_bias = 0; + size_t q_size; + unsigned long mem_map_table_base_addr; + struct context_bank_info *cb; + + q_size = SHARED_QSIZE - ALIGNED_SFR_SIZE - ALIGNED_QDSS_SIZE; + mem_addr = &dev->mem_addr; + if (!is_iommu_present(dev->res)) + fw_bias = dev->hal_data->firmware_base; + rc = __smem_alloc(dev, mem_addr, q_size, 1, SMEM_UNCACHED, + HAL_BUFFER_INTERNAL_CMD_QUEUE); + if (rc) { + d_vpr_e("iface_q_table_alloc_fail\n"); + goto fail_alloc_queue; + } + + dev->iface_q_table.align_virtual_addr = mem_addr->align_virtual_addr; + dev->iface_q_table.align_device_addr = mem_addr->align_device_addr - + fw_bias; + dev->iface_q_table.mem_size = VIDC_IFACEQ_TABLE_SIZE; + dev->iface_q_table.mem_data = mem_addr->mem_data; + offset += dev->iface_q_table.mem_size; + + for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) { + iface_q = &dev->iface_queues[i]; + iface_q->q_array.align_device_addr = mem_addr->align_device_addr + + offset - fw_bias; + iface_q->q_array.align_virtual_addr = + mem_addr->align_virtual_addr + offset; + iface_q->q_array.mem_size = VIDC_IFACEQ_QUEUE_SIZE; + offset += iface_q->q_array.mem_size; + iface_q->q_hdr = VIDC_IFACEQ_GET_QHDR_START_ADDR( + dev->iface_q_table.align_virtual_addr, i); + __set_queue_hdr_defaults(iface_q->q_hdr); + } + + if ((msm_vidc_fw_debug_mode & HFI_DEBUG_MODE_QDSS) && num_entries) { + rc = __smem_alloc(dev, mem_addr, + ALIGNED_QDSS_SIZE, 1, SMEM_UNCACHED, + HAL_BUFFER_INTERNAL_CMD_QUEUE); + if (rc) { + d_vpr_e( + "qdss_alloc_fail: QDSS messages logging will not work\n"); + dev->qdss.align_device_addr = 0; + } else { + dev->qdss.align_device_addr = + mem_addr->align_device_addr - fw_bias; + dev->qdss.align_virtual_addr = + mem_addr->align_virtual_addr; + dev->qdss.mem_size = ALIGNED_QDSS_SIZE; + dev->qdss.mem_data = mem_addr->mem_data; + } + } + + rc = __smem_alloc(dev, mem_addr, + ALIGNED_SFR_SIZE, 1, SMEM_UNCACHED, + HAL_BUFFER_INTERNAL_CMD_QUEUE); + if (rc) { + d_vpr_e("sfr_alloc_fail: SFR not will work\n"); + dev->sfr.align_device_addr = 0; + } else { + dev->sfr.align_device_addr = mem_addr->align_device_addr - + fw_bias; + dev->sfr.align_virtual_addr = mem_addr->align_virtual_addr; + dev->sfr.mem_size = ALIGNED_SFR_SIZE; + dev->sfr.mem_data = mem_addr->mem_data; + vsfr = (struct hfi_sfr_struct *) dev->sfr.align_virtual_addr; + vsfr->bufSize = ALIGNED_SFR_SIZE; + } + + q_tbl_hdr = (struct hfi_queue_table_header *) + dev->iface_q_table.align_virtual_addr; + q_tbl_hdr->qtbl_version = 0; + q_tbl_hdr->device_addr = (void *)dev; + strlcpy(q_tbl_hdr->name, "msm_v4l2_vidc", sizeof(q_tbl_hdr->name)); + q_tbl_hdr->qtbl_size = VIDC_IFACEQ_TABLE_SIZE; + q_tbl_hdr->qtbl_qhdr0_offset = sizeof(struct hfi_queue_table_header); + q_tbl_hdr->qtbl_qhdr_size = sizeof(struct hfi_queue_header); + q_tbl_hdr->qtbl_num_q = VIDC_IFACEQ_NUMQ; + q_tbl_hdr->qtbl_num_active_q = VIDC_IFACEQ_NUMQ; + + iface_q = &dev->iface_queues[VIDC_IFACEQ_CMDQ_IDX]; + q_hdr = iface_q->q_hdr; + q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr; + q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q; + + iface_q = &dev->iface_queues[VIDC_IFACEQ_MSGQ_IDX]; + q_hdr = iface_q->q_hdr; + q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr; + q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q; + + iface_q = &dev->iface_queues[VIDC_IFACEQ_DBGQ_IDX]; + q_hdr = iface_q->q_hdr; + q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr; + q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q; + /* + * Set receive request to zero on debug queue as there is no + * need of interrupt from video hardware for debug messages + */ + q_hdr->qhdr_rx_req = 0; + + if (dev->qdss.align_virtual_addr) { + qdss = (struct hfi_mem_map_table *)dev->qdss.align_virtual_addr; + qdss->mem_map_num_entries = num_entries; + mem_map_table_base_addr = dev->qdss.align_device_addr + + sizeof(struct hfi_mem_map_table); + qdss->mem_map_table_base_addr = mem_map_table_base_addr; + + mem_map = (struct hfi_mem_map *)(qdss + 1); + cb = msm_smem_get_context_bank(MSM_VIDC_UNKNOWN, false, + dev->res, HAL_BUFFER_INTERNAL_CMD_QUEUE, DEFAULT_SID); + if (!cb) { + d_vpr_e("%s: failed to get context bank\n", __func__); + return -EINVAL; + } + + rc = __get_qdss_iommu_virtual_addr(dev, mem_map, cb->domain); + if (rc) { + d_vpr_e("IOMMU mapping failed, Freeing qdss memdata\n"); + __smem_free(dev, &dev->qdss.mem_data); + dev->qdss.align_virtual_addr = NULL; + dev->qdss.align_device_addr = 0; + } + } + + call_venus_op(dev, setup_ucregion_memmap, dev, DEFAULT_SID); + return 0; +fail_alloc_queue: + return -ENOMEM; +} + +static int __sys_set_debug(struct venus_hfi_device *device, u32 debug, u32 sid) +{ + u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE]; + int rc = 0; + struct hfi_cmd_sys_set_property_packet *pkt = + (struct hfi_cmd_sys_set_property_packet *) &packet; + + rc = call_hfi_pkt_op(device, sys_debug_config, pkt, debug); + if (rc) { + s_vpr_e(sid, "Debug mode setting to FW failed\n"); + return -ENOTEMPTY; + } + + if (__iface_cmdq_write(device, pkt, sid)) + return -ENOTEMPTY; + return 0; +} + +static int __sys_set_coverage(struct venus_hfi_device *device, + u32 mode, u32 sid) +{ + u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE]; + int rc = 0; + struct hfi_cmd_sys_set_property_packet *pkt = + (struct hfi_cmd_sys_set_property_packet *) &packet; + + rc = call_hfi_pkt_op(device, sys_coverage_config, + pkt, mode, sid); + if (rc) { + s_vpr_e(sid, "Coverage mode setting to FW failed\n"); + return -ENOTEMPTY; + } + + if (__iface_cmdq_write(device, pkt, sid)) { + s_vpr_e(sid, "Failed to send coverage pkt to f/w\n"); + return -ENOTEMPTY; + } + + return 0; +} + +static int __sys_set_power_control(struct venus_hfi_device *device, + bool enable, u32 sid) +{ + struct regulator_info *rinfo; + bool supported = false; + u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE]; + struct hfi_cmd_sys_set_property_packet *pkt = + (struct hfi_cmd_sys_set_property_packet *) &packet; + + venus_hfi_for_each_regulator(device, rinfo) { + if (rinfo->has_hw_power_collapse) { + supported = true; + break; + } + } + + if (!supported) + return 0; + + call_hfi_pkt_op(device, sys_power_control, pkt, enable); + if (__iface_cmdq_write(device, pkt, sid)) + return -ENOTEMPTY; + return 0; +} + +static int venus_hfi_core_init(void *device) +{ + int rc = 0; + struct hfi_cmd_sys_init_packet pkt; + struct hfi_cmd_sys_get_property_packet version_pkt; + struct venus_hfi_device *dev; + + if (!device) { + d_vpr_e("Invalid device\n"); + return -ENODEV; + } + + dev = device; + + d_vpr_h("Core initializing\n"); + + mutex_lock(&dev->lock); + + dev->bus_vote = DEFAULT_BUS_VOTE; + + rc = __load_fw(dev); + if (rc) { + d_vpr_e("Failed to load Venus FW\n"); + goto err_load_fw; + } + + __set_state(dev, VENUS_STATE_INIT); + + d_vpr_h("Dev_Virt: %pa, Reg_Virt: %pK\n", + &dev->hal_data->firmware_base, + dev->hal_data->register_base); + + + rc = __interface_queues_init(dev); + if (rc) { + d_vpr_e("failed to init queues\n"); + rc = -ENOMEM; + goto err_core_init; + } + + rc = call_venus_op(dev, boot_firmware, dev, DEFAULT_SID); + if (rc) { + d_vpr_e("Failed to start core\n"); + rc = -ENODEV; + goto err_core_init; + } + + rc = call_hfi_pkt_op(dev, sys_init, &pkt, HFI_VIDEO_ARCH_OX); + if (rc) { + d_vpr_e("Failed to create sys init pkt\n"); + goto err_core_init; + } + + if (__iface_cmdq_write(dev, &pkt, DEFAULT_SID)) { + rc = -ENOTEMPTY; + goto err_core_init; + } + + rc = call_hfi_pkt_op(dev, sys_image_version, &version_pkt); + if (rc || __iface_cmdq_write(dev, &version_pkt, DEFAULT_SID)) + d_vpr_e("Failed to send image version pkt to f/w\n"); + + __sys_set_debug(device, (msm_vidc_debug & FW_LOGMASK) >> FW_LOGSHIFT, + DEFAULT_SID); + + __enable_subcaches(device, DEFAULT_SID); + __set_subcaches(device, DEFAULT_SID); + + __set_ubwc_config(device); + + d_vpr_h("Core inited successfully\n"); + mutex_unlock(&dev->lock); + return rc; +err_core_init: + __set_state(dev, VENUS_STATE_DEINIT); + __unload_fw(dev); +err_load_fw: + d_vpr_e("Core init failed\n"); + mutex_unlock(&dev->lock); + return rc; +} + +static int venus_hfi_core_release(void *dev) +{ + int rc = 0; + struct venus_hfi_device *device = dev; + struct hal_session *session, *next; + + if (!device) { + d_vpr_e("invalid device\n"); + return -ENODEV; + } + + mutex_lock(&device->lock); + d_vpr_h("Core releasing\n"); + + __resume(device, DEFAULT_SID); + __set_state(device, VENUS_STATE_DEINIT); + + __unload_fw(device); + + /* unlink all sessions from device */ + list_for_each_entry_safe(session, next, &device->sess_head, list) + list_del(&session->list); + + d_vpr_h("Core released successfully\n"); + mutex_unlock(&device->lock); + + return rc; +} + +static int venus_hfi_core_ping(void *device, u32 sid) +{ + struct hfi_cmd_sys_ping_packet pkt; + int rc = 0; + struct venus_hfi_device *dev; + + if (!device) { + d_vpr_e("invalid device\n"); + return -ENODEV; + } + + dev = device; + mutex_lock(&dev->lock); + + rc = call_hfi_pkt_op(dev, sys_ping, &pkt, sid); + if (rc) { + d_vpr_e("core_ping: failed to create packet\n"); + goto err_create_pkt; + } + + if (__iface_cmdq_write(dev, &pkt, sid)) + rc = -ENOTEMPTY; + +err_create_pkt: + mutex_unlock(&dev->lock); + return rc; +} + +static int venus_hfi_core_trigger_ssr(void *device, + enum hal_ssr_trigger_type ssr_type, u32 sub_client_id, + u32 test_addr) +{ + struct hfi_cmd_sys_test_ssr_packet pkt; + int rc = 0; + struct venus_hfi_device *dev; + + if (!device) { + d_vpr_e("invalid device\n"); + return -ENODEV; + } + + dev = device; + mutex_lock(&dev->lock); + + rc = call_hfi_pkt_op(dev, ssr_cmd, &pkt, ssr_type, + sub_client_id, test_addr); + if (rc) { + d_vpr_e("core_ping: failed to create packet\n"); + goto err_create_pkt; + } + + if (__iface_cmdq_write(dev, &pkt, DEFAULT_SID)) + rc = -ENOTEMPTY; + +err_create_pkt: + mutex_unlock(&dev->lock); + return rc; +} + +static int venus_hfi_session_set_property(void *sess, + u32 ptype, void *pdata, u32 size) +{ + u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE]; + struct hfi_cmd_session_set_property_packet *pkt = + (struct hfi_cmd_session_set_property_packet *) &packet; + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + int rc = 0; + + mutex_lock(&device->lock); + + if (!__is_session_valid(device, session, __func__)) { + rc = -EINVAL; + goto err_set_prop; + } + s_vpr_h(session->sid, "in set_prop,with prop id: %#x\n", ptype); + + rc = call_hfi_pkt_op(device, session_set_property, + pkt, session->sid, ptype, pdata, size); + + if (rc == -ENOTSUPP) { + s_vpr_e(session->sid, + "set property: unsupported prop id: %#x\n", ptype); + rc = 0; + goto err_set_prop; + } else if (rc) { + s_vpr_e(session->sid, + "set property: failed to create packet\n"); + rc = -EINVAL; + goto err_set_prop; + } + + if (__iface_cmdq_write(device, pkt, session->sid)) { + rc = -ENOTEMPTY; + goto err_set_prop; + } + +err_set_prop: + mutex_unlock(&device->lock); + return rc; +} + +static void __set_default_sys_properties(struct venus_hfi_device *device, + u32 sid) +{ + if (__sys_set_debug(device, + (msm_vidc_debug & FW_LOGMASK) >> FW_LOGSHIFT, sid)) + s_vpr_e(sid, "Setting fw_debug msg ON failed\n"); + if (__sys_set_power_control(device, true, sid)) + s_vpr_e(sid, "Setting h/w power collapse ON failed\n"); +} + +static void __session_clean(struct hal_session *session) +{ + struct hal_session *temp, *next; + struct venus_hfi_device *device = &venus_hfi_dev; + + if (!__is_session_valid(device, session, __func__)) + return; + s_vpr_h(session->sid, "deleted the session: %pK\n", session); + /* + * session might have been removed from the device list in + * core_release, so check and remove if it is in the list + */ + list_for_each_entry_safe(temp, next, &device->sess_head, list) { + if (session == temp) { + list_del(&session->list); + break; + } + } + /* Poison the session handle with zeros */ + *session = (struct hal_session){ {0} }; + kfree(session); +} + +static int venus_hfi_session_clean(void *sess) +{ + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + + mutex_lock(&device->lock); + __session_clean(session); + mutex_unlock(&device->lock); + return 0; +} + +static int venus_hfi_session_init(void *device, void *inst_id, + enum hal_domain session_type, enum hal_video_codec codec_type, + void **new_session, u32 sid) +{ + struct hfi_cmd_sys_session_init_packet pkt; + struct venus_hfi_device *dev; + struct hal_session *s; + + if (!device || !new_session) { + d_vpr_e("%s: invalid input\n", __func__); + return -EINVAL; + } + + dev = device; + mutex_lock(&dev->lock); + + s = kzalloc(sizeof(struct hal_session), GFP_KERNEL); + if (!s) { + s_vpr_e(sid, "new session fail: Out of memory\n"); + goto err_session_init_fail; + } + + s->inst_id = inst_id; + s->is_decoder = (session_type == HAL_VIDEO_DOMAIN_DECODER); + s->codec = codec_type; + s->domain = session_type; + s->sid = sid; + s_vpr_hp(sid, "%s: inst %pK, session %pK, codec 0x%x, domain 0x%x\n", + __func__, inst_id, s, s->codec, s->domain); + + list_add_tail(&s->list, &dev->sess_head); + + __set_default_sys_properties(device, sid); + + if (call_hfi_pkt_op(dev, session_init, &pkt, + sid, session_type, codec_type)) { + s_vpr_e(sid, "session_init: failed to create packet\n"); + goto err_session_init_fail; + } + + *new_session = s; + if (__iface_cmdq_write(dev, &pkt, sid)) + goto err_session_init_fail; + + mutex_unlock(&dev->lock); + return 0; + +err_session_init_fail: + if (s) + __session_clean(s); + *new_session = NULL; + mutex_unlock(&dev->lock); + return -EINVAL; +} + +static int __send_session_cmd(struct hal_session *session, int pkt_type) +{ + struct vidc_hal_session_cmd_pkt pkt; + int rc = 0; + struct venus_hfi_device *device = &venus_hfi_dev; + + if (!__is_session_valid(device, session, __func__)) + return -EINVAL; + + rc = call_hfi_pkt_op(device, session_cmd, + &pkt, pkt_type, session->sid); + if (rc == -EPERM) + return 0; + + if (rc) { + s_vpr_e(session->sid, "send session cmd: create pkt failed\n"); + goto err_create_pkt; + } + + if (__iface_cmdq_write(device, &pkt, session->sid)) + rc = -ENOTEMPTY; + +err_create_pkt: + return rc; +} + +static int venus_hfi_session_end(void *sess) +{ + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + int rc = 0; + + mutex_lock(&device->lock); + if (!__is_session_valid(device, session, __func__)) { + rc = -EINVAL; + goto exit; + } + + if (msm_vidc_fw_coverage) { + if (__sys_set_coverage(device, msm_vidc_fw_coverage, + session->sid)) + s_vpr_e(session->sid, "Fw_coverage msg ON failed\n"); + } + rc = __send_session_cmd(session, HFI_CMD_SYS_SESSION_END); +exit: + mutex_unlock(&device->lock); + return rc; +} + +static int venus_hfi_session_abort(void *sess) +{ + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + int rc = 0; + + mutex_lock(&device->lock); + + __flush_debug_queue(device, NULL); + rc = __send_session_cmd(session, HFI_CMD_SYS_SESSION_ABORT); + + mutex_unlock(&device->lock); + + return rc; +} + +static int venus_hfi_session_set_buffers(void *sess, + struct vidc_buffer_addr_info *buffer_info) +{ + struct hfi_cmd_session_set_buffers_packet *pkt; + u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE]; + int rc = 0; + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + + if (!buffer_info) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + mutex_lock(&device->lock); + + if (!__is_session_valid(device, session, __func__)) { + rc = -EINVAL; + goto err_create_pkt; + } + if (buffer_info->buffer_type == HAL_BUFFER_INPUT) { + /* + * Hardware doesn't care about input buffers being + * published beforehand + */ + rc = 0; + goto err_create_pkt; + } + + pkt = (struct hfi_cmd_session_set_buffers_packet *)packet; + + rc = call_hfi_pkt_op(device, session_set_buffers, + pkt, session->sid, buffer_info); + if (rc) { + s_vpr_e(session->sid, "set buffers: failed to create packet\n"); + goto err_create_pkt; + } + + s_vpr_h(session->sid, "set buffers: %#x\n", buffer_info->buffer_type); + if (__iface_cmdq_write(device, pkt, session->sid)) + rc = -ENOTEMPTY; + +err_create_pkt: + mutex_unlock(&device->lock); + return rc; +} + +static int venus_hfi_session_release_buffers(void *sess, + struct vidc_buffer_addr_info *buffer_info) +{ + struct hfi_cmd_session_release_buffer_packet *pkt; + u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE]; + int rc = 0; + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + + if (!buffer_info) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + mutex_lock(&device->lock); + + if (!__is_session_valid(device, session, __func__)) { + rc = -EINVAL; + goto err_create_pkt; + } + if (buffer_info->buffer_type == HAL_BUFFER_INPUT) { + rc = 0; + goto err_create_pkt; + } + + pkt = (struct hfi_cmd_session_release_buffer_packet *) packet; + + rc = call_hfi_pkt_op(device, session_release_buffers, + pkt, session->sid, buffer_info); + if (rc) { + s_vpr_e(session->sid, "%s: failed to create packet\n", + __func__); + goto err_create_pkt; + } + + s_vpr_h(session->sid, "Release buffers: %#x\n", + buffer_info->buffer_type); + if (__iface_cmdq_write(device, pkt, session->sid)) + rc = -ENOTEMPTY; + +err_create_pkt: + mutex_unlock(&device->lock); + return rc; +} + +static int venus_hfi_session_load_res(void *sess) +{ + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + int rc = 0; + + mutex_lock(&device->lock); + rc = __send_session_cmd(session, HFI_CMD_SESSION_LOAD_RESOURCES); + mutex_unlock(&device->lock); + + return rc; +} + +static int venus_hfi_session_release_res(void *sess) +{ + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + int rc = 0; + + mutex_lock(&device->lock); + rc = __send_session_cmd(session, HFI_CMD_SESSION_RELEASE_RESOURCES); + mutex_unlock(&device->lock); + + return rc; +} + +static int venus_hfi_session_start(void *sess) +{ + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + int rc = 0; + + mutex_lock(&device->lock); + rc = __send_session_cmd(session, HFI_CMD_SESSION_START); + mutex_unlock(&device->lock); + + return rc; +} + +static int venus_hfi_session_continue(void *sess) +{ + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + int rc = 0; + + mutex_lock(&device->lock); + rc = __send_session_cmd(session, HFI_CMD_SESSION_CONTINUE); + mutex_unlock(&device->lock); + + return rc; +} + +static int venus_hfi_session_stop(void *sess) +{ + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + int rc = 0; + + mutex_lock(&device->lock); + rc = __send_session_cmd(session, HFI_CMD_SESSION_STOP); + mutex_unlock(&device->lock); + + return rc; +} + +static int __session_etb(struct hal_session *session, + struct vidc_frame_data *input_frame, bool relaxed) +{ + int rc = 0; + struct venus_hfi_device *device = &venus_hfi_dev; + + if (!__is_session_valid(device, session, __func__)) + return -EINVAL; + + if (session->is_decoder) { + struct hfi_cmd_session_empty_buffer_compressed_packet pkt; + + rc = call_hfi_pkt_op(device, session_etb_decoder, + &pkt, session->sid, input_frame); + if (rc) { + s_vpr_e(session->sid, + "etb decoder: failed to create pkt\n"); + goto err_create_pkt; + } + + if (!relaxed) + rc = __iface_cmdq_write(device, &pkt, session->sid); + else + rc = __iface_cmdq_write_relaxed(device, + &pkt, NULL, session->sid); + if (rc) + goto err_create_pkt; + } else { + struct hfi_cmd_session_empty_buffer_uncompressed_plane0_packet + pkt; + + rc = call_hfi_pkt_op(device, session_etb_encoder, + &pkt, session->sid, input_frame); + if (rc) { + s_vpr_e(session->sid, + "etb encoder: failed to create pkt\n"); + goto err_create_pkt; + } + + if (!relaxed) + rc = __iface_cmdq_write(device, &pkt, session->sid); + else + rc = __iface_cmdq_write_relaxed(device, + &pkt, NULL, session->sid); + if (rc) + goto err_create_pkt; + } + +err_create_pkt: + return rc; +} + +static int venus_hfi_session_etb(void *sess, + struct vidc_frame_data *input_frame) +{ + int rc = 0; + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + + if (!input_frame) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + mutex_lock(&device->lock); + rc = __session_etb(session, input_frame, false); + mutex_unlock(&device->lock); + return rc; +} + +static int __session_ftb(struct hal_session *session, + struct vidc_frame_data *output_frame, bool relaxed) +{ + int rc = 0; + struct venus_hfi_device *device = &venus_hfi_dev; + struct hfi_cmd_session_fill_buffer_packet pkt; + + if (!__is_session_valid(device, session, __func__)) + return -EINVAL; + + rc = call_hfi_pkt_op(device, session_ftb, + &pkt, session->sid, output_frame); + if (rc) { + s_vpr_e(session->sid, "Session ftb: failed to create pkt\n"); + goto err_create_pkt; + } + + if (!relaxed) + rc = __iface_cmdq_write(device, &pkt, session->sid); + else + rc = __iface_cmdq_write_relaxed(device, + &pkt, NULL, session->sid); + +err_create_pkt: + return rc; +} + +static int venus_hfi_session_ftb(void *sess, + struct vidc_frame_data *output_frame) +{ + int rc = 0; + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + + if (!output_frame) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + mutex_lock(&device->lock); + rc = __session_ftb(session, output_frame, false); + mutex_unlock(&device->lock); + return rc; +} + +static int venus_hfi_session_process_batch(void *sess, + int num_etbs, struct vidc_frame_data etbs[], + int num_ftbs, struct vidc_frame_data ftbs[]) +{ + int rc = 0, c = 0; + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + bool is_last_frame = false; + + mutex_lock(&device->lock); + + if (!__is_session_valid(device, session, __func__)) { + rc = -EINVAL; + goto err_etbs_and_ftbs; + } + + for (c = 0; c < num_ftbs; ++c) { + is_last_frame = (c + 1 == num_ftbs); + rc = __session_ftb(session, &ftbs[c], !is_last_frame); + if (rc) { + s_vpr_e(session->sid, + "Failed to queue batched ftb: %d\n", rc); + goto err_etbs_and_ftbs; + } + } + + for (c = 0; c < num_etbs; ++c) { + is_last_frame = (c + 1 == num_etbs); + rc = __session_etb(session, &etbs[c], !is_last_frame); + if (rc) { + s_vpr_e(session->sid, + "Failed to queue batched etb: %d\n", rc); + goto err_etbs_and_ftbs; + } + } + +err_etbs_and_ftbs: + mutex_unlock(&device->lock); + return rc; +} + +static int venus_hfi_session_get_buf_req(void *sess) +{ + struct hfi_cmd_session_get_property_packet pkt; + int rc = 0; + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + + mutex_lock(&device->lock); + + if (!__is_session_valid(device, session, __func__)) { + rc = -ENODEV; + goto err_create_pkt; + } + rc = call_hfi_pkt_op(device, session_get_buf_req, + &pkt, session->sid); + if (rc) { + s_vpr_e(session->sid, "%s: failed to create pkt\n", __func__); + goto err_create_pkt; + } + + if (__iface_cmdq_write(device, &pkt, session->sid)) + rc = -ENOTEMPTY; +err_create_pkt: + mutex_unlock(&device->lock); + return rc; +} + +static int venus_hfi_session_flush(void *sess, enum hal_flush flush_mode) +{ + struct hfi_cmd_session_flush_packet pkt; + int rc = 0; + struct hal_session *session = sess; + struct venus_hfi_device *device = &venus_hfi_dev; + + mutex_lock(&device->lock); + if (!__is_session_valid(device, session, __func__)) { + rc = -ENODEV; + goto err_create_pkt; + } + rc = call_hfi_pkt_op(device, session_flush, + &pkt, session->sid, flush_mode); + if (rc) { + s_vpr_e(session->sid, "Session flush: failed to create pkt\n"); + goto err_create_pkt; + } + + if (__iface_cmdq_write(device, &pkt, session->sid)) + rc = -ENOTEMPTY; +err_create_pkt: + mutex_unlock(&device->lock); + return rc; +} + +static int __check_core_registered(struct hal_device_data core, + phys_addr_t fw_addr, u8 *reg_addr, u32 reg_size, + phys_addr_t irq) +{ + struct venus_hfi_device *device; + struct hal_data *hal_data; + struct list_head *curr, *next; + + if (!core.dev_count) { + d_vpr_e("no device Registered\n"); + return -EINVAL; + } + + list_for_each_safe(curr, next, &core.dev_head) { + device = list_entry(curr, + struct venus_hfi_device, list); + hal_data = device->hal_data; + if (hal_data && hal_data->irq == irq && + (CONTAINS(hal_data->firmware_base, + FIRMWARE_SIZE, fw_addr) || + CONTAINS(fw_addr, FIRMWARE_SIZE, + hal_data->firmware_base) || + CONTAINS(hal_data->register_base, + reg_size, reg_addr) || + CONTAINS(reg_addr, reg_size, + hal_data->register_base) || + OVERLAPS(hal_data->register_base, + reg_size, reg_addr, reg_size) || + OVERLAPS(reg_addr, reg_size, + hal_data->register_base, + reg_size) || + OVERLAPS(hal_data->firmware_base, + FIRMWARE_SIZE, fw_addr, + FIRMWARE_SIZE) || + OVERLAPS(fw_addr, FIRMWARE_SIZE, + hal_data->firmware_base, + FIRMWARE_SIZE))) { + return 0; + } + + d_vpr_e("Device not registered\n"); + return -EINVAL; + } + return -EINVAL; +} + +static void __process_fatal_error( + struct venus_hfi_device *device) +{ + struct msm_vidc_cb_cmd_done cmd_done = {0}; + + cmd_done.device_id = device->device_id; + device->callback(HAL_SYS_ERROR, &cmd_done); +} + +int __prepare_pc(struct venus_hfi_device *device) +{ + int rc = 0; + struct hfi_cmd_sys_pc_prep_packet pkt; + + rc = call_hfi_pkt_op(device, sys_pc_prep, &pkt); + if (rc) { + d_vpr_e("Failed to create sys pc prep pkt\n"); + goto err_pc_prep; + } + + if (__iface_cmdq_write(device, &pkt, DEFAULT_SID)) + rc = -ENOTEMPTY; + if (rc) + d_vpr_e("Failed to prepare venus for power off"); +err_pc_prep: + return rc; +} + +static void venus_hfi_pm_handler(struct work_struct *work) +{ + int rc = 0; + struct venus_hfi_device *device = list_first_entry( + &hal_ctxt.dev_head, struct venus_hfi_device, list); + + if (!device) { + d_vpr_e("%s: NULL device\n", __func__); + return; + } + + d_vpr_h("Entering %s\n", __func__); + /* + * It is ok to check this variable outside the lock since + * it is being updated in this context only + */ + if (device->skip_pc_count >= VIDC_MAX_PC_SKIP_COUNT) { + d_vpr_e("Failed to PC for %d times\n", + device->skip_pc_count); + device->skip_pc_count = 0; + __process_fatal_error(device); + return; + } + + mutex_lock(&device->lock); + rc = __power_collapse(device, false); + mutex_unlock(&device->lock); + switch (rc) { + case 0: + device->skip_pc_count = 0; + /* Cancel pending delayed works if any */ + cancel_delayed_work(&venus_hfi_pm_work); + d_vpr_h("%s: power collapse successful!\n", __func__); + break; + case -EBUSY: + device->skip_pc_count = 0; + d_vpr_h("%s: retry PC as dsp is busy\n", __func__); + queue_delayed_work(device->venus_pm_workq, + &venus_hfi_pm_work, msecs_to_jiffies( + device->res->msm_vidc_pwr_collapse_delay)); + break; + case -EAGAIN: + device->skip_pc_count++; + d_vpr_e("%s: retry power collapse (count %d)\n", + __func__, device->skip_pc_count); + queue_delayed_work(device->venus_pm_workq, + &venus_hfi_pm_work, msecs_to_jiffies( + device->res->msm_vidc_pwr_collapse_delay)); + break; + default: + d_vpr_e("%s: power collapse failed\n", __func__); + break; + } +} + +static int __power_collapse(struct venus_hfi_device *device, bool force) +{ + int rc = 0; + + if (!device) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + if (!device->power_enabled) { + d_vpr_h("%s: Power already disabled\n", __func__); + goto exit; + } + + if (!__core_in_valid_state(device)) { + d_vpr_e("%s: Core not in init state\n", __func__); + return -EINVAL; + } + + rc = call_venus_op(device, prepare_pc, device); + if (rc) + goto skip_power_off; + + __flush_debug_queue(device, device->raw_packet); + + rc = __suspend(device); + if (rc) + d_vpr_e("Failed __suspend\n"); + +exit: + return rc; + +skip_power_off: + return -EAGAIN; +} + +static void print_sfr_message(struct venus_hfi_device *device) +{ + struct hfi_sfr_struct *vsfr = NULL; + u32 vsfr_size = 0; + void *p = NULL; + + vsfr = (struct hfi_sfr_struct *)device->sfr.align_virtual_addr; + if (vsfr) { + if (vsfr->bufSize != device->sfr.mem_size) { + d_vpr_e("Invalid SFR buf size %d actual %d\n", + vsfr->bufSize, device->sfr.mem_size); + return; + } + vsfr_size = vsfr->bufSize - sizeof(u32); + p = memchr(vsfr->rg_data, '\0', vsfr_size); + /* SFR isn't guaranteed to be NULL terminated */ + if (p == NULL) + vsfr->rg_data[vsfr_size - 1] = '\0'; + + d_vpr_e("SFR Message from FW: %s\n", vsfr->rg_data); + } +} + +static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet) +{ + bool local_packet = false; + enum vidc_msg_prio log_level = msm_vidc_debug; + + if (!device) { + d_vpr_e("%s: invalid params\n", __func__); + return; + } + + if (!packet) { + packet = kzalloc(VIDC_IFACEQ_VAR_HUGE_PKT_SIZE, GFP_KERNEL); + if (!packet) { + d_vpr_e("In %s() Fail to allocate mem\n", __func__); + return; + } + + local_packet = true; + + /* + * Local packek is used when error occurred. + * It is good to print these logs to printk as well. + */ + log_level |= FW_PRINTK; + } + +#define SKIP_INVALID_PKT(pkt_size, payload_size, pkt_hdr_size) ({ \ + if (pkt_size < pkt_hdr_size || \ + payload_size < MIN_PAYLOAD_SIZE || \ + payload_size > \ + (pkt_size - pkt_hdr_size + sizeof(u8))) { \ + d_vpr_e("%s: invalid msg size - %d\n", \ + __func__, pkt->msg_size); \ + continue; \ + } \ + }) + + while (!__iface_dbgq_read(device, packet)) { + struct hfi_packet_header *pkt = + (struct hfi_packet_header *) packet; + + if (pkt->size < sizeof(struct hfi_packet_header)) { + d_vpr_e("Invalid pkt size - %s\n", __func__); + continue; + } + + if (pkt->packet_type == HFI_MSG_SYS_DEBUG) { + struct hfi_msg_sys_debug_packet *pkt = + (struct hfi_msg_sys_debug_packet *) packet; + + SKIP_INVALID_PKT(pkt->size, + pkt->msg_size, sizeof(*pkt)); + + /* + * All fw messages starts with new line character. This + * causes dprintk to print this message in two lines + * in the kernel log. Ignoring the first character + * from the message fixes this to print it in a single + * line. + */ + pkt->rg_msg_data[pkt->msg_size-1] = '\0'; + dprintk_firmware(log_level, "%s", &pkt->rg_msg_data[1]); + } + } +#undef SKIP_INVALID_PKT + + if (local_packet) + kfree(packet); +} + +static bool __is_session_valid(struct venus_hfi_device *device, + struct hal_session *session, const char *func) +{ + struct hal_session *temp = NULL; + + if (!device || !session) + goto invalid; + + list_for_each_entry(temp, &device->sess_head, list) + if (session == temp) + return true; + +invalid: + d_vpr_e("%s: device %pK, invalid session %pK\n", func, device, session); + return false; +} + +static struct hal_session *__get_session(struct venus_hfi_device *device, + u32 sid) +{ + struct hal_session *temp = NULL; + + list_for_each_entry(temp, &device->sess_head, list) { + if (sid == temp->sid) + return temp; + } + + return NULL; +} + +static bool __watchdog_common(u32 intr_status) +{ + bool rc = false; + + if (intr_status & WRAPPER_INTR_STATUS_A2HWD_BMSK) + rc = true; + + return rc; +} + +static int __response_handler(struct venus_hfi_device *device) +{ + struct msm_vidc_cb_info *packets; + int packet_count = 0; + u8 *raw_packet = NULL; + bool requeue_pm_work = true; + + if (!device || device->state != VENUS_STATE_INIT) + return 0; + + packets = device->response_pkt; + + raw_packet = device->raw_packet; + + if (!raw_packet || !packets) { + d_vpr_e("%s: Invalid args %pK, %pK\n", + __func__, raw_packet, packets); + return 0; + } + + if (call_venus_op(device, watchdog, device->intr_status)) { + struct msm_vidc_cb_info info = { + .response_type = HAL_SYS_WATCHDOG_TIMEOUT, + .response.cmd = { + .device_id = device->device_id, + } + }; + + print_sfr_message(device); + + d_vpr_e("Received watchdog timeout\n"); + packets[packet_count++] = info; + goto exit; + } + + /* Bleed the msg queue dry of packets */ + while (!__iface_msgq_read(device, raw_packet)) { + void **inst_id = NULL; + struct msm_vidc_cb_info *info = &packets[packet_count++]; + int rc = 0; + + rc = hfi_process_msg_packet(device->device_id, + (struct vidc_hal_msg_pkt_hdr *)raw_packet, info); + if (rc) { + d_vpr_e("Corrupt/unknown packet found, discarding\n"); + --packet_count; + continue; + } + + /* Process the packet types that we're interested in */ + switch (info->response_type) { + case HAL_SYS_ERROR: + print_sfr_message(device); + break; + case HAL_SYS_RELEASE_RESOURCE_DONE: + d_vpr_h("Received SYS_RELEASE_RESOURCE\n"); + break; + case HAL_SYS_INIT_DONE: + d_vpr_h("Received SYS_INIT_DONE\n"); + break; + case HAL_SESSION_LOAD_RESOURCE_DONE: + break; + default: + break; + } + + /* For session-related packets, validate session */ + switch (info->response_type) { + case HAL_SESSION_LOAD_RESOURCE_DONE: + case HAL_SESSION_INIT_DONE: + case HAL_SESSION_END_DONE: + case HAL_SESSION_ABORT_DONE: + case HAL_SESSION_START_DONE: + case HAL_SESSION_STOP_DONE: + case HAL_SESSION_FLUSH_DONE: + case HAL_SESSION_SUSPEND_DONE: + case HAL_SESSION_RESUME_DONE: + case HAL_SESSION_SET_PROP_DONE: + case HAL_SESSION_GET_PROP_DONE: + case HAL_SESSION_RELEASE_BUFFER_DONE: + case HAL_SESSION_RELEASE_RESOURCE_DONE: + case HAL_SESSION_PROPERTY_INFO: + case HAL_SYS_PING_ACK: + inst_id = &info->response.cmd.inst_id; + break; + case HAL_SESSION_ERROR: + case HAL_SESSION_ETB_DONE: + case HAL_SESSION_FTB_DONE: + inst_id = &info->response.data.inst_id; + break; + case HAL_SESSION_EVENT_CHANGE: + inst_id = &info->response.event.inst_id; + break; + case HAL_RESPONSE_UNUSED: + default: + inst_id = NULL; + break; + } + + /* + * hfi_process_msg_packet provides a sid, we need to coerce + * the sid value back to pointer(inst_id) that we can + * use. Ideally, hfi_process_msg_packet should take care of + * this, but it doesn't have required information for it + */ + if (inst_id) { + struct hal_session *session = NULL; + + if (upper_32_bits((uintptr_t)*inst_id) != 0) { + d_vpr_e("Upper 32-bits != 0 for sess_id=%pK\n", + *inst_id); + } + session = __get_session(device, + (u32)(uintptr_t)*inst_id); + if (!session) { + d_vpr_e( + "Received a packet (%#x) for an unrecognized session (%pK), discarding\n", + info->response_type, *inst_id); + --packet_count; + continue; + } + + *inst_id = session->inst_id; + } + + if (packet_count >= max_packets) { + d_vpr_e( + "Too many packets in message queue to handle at once, deferring read\n"); + break; + } + + /* do not read packets after sys error packet */ + if (info->response_type == HAL_SYS_ERROR) + break; + } + + if (requeue_pm_work && device->res->sw_power_collapsible) { + cancel_delayed_work(&venus_hfi_pm_work); + if (!queue_delayed_work(device->venus_pm_workq, + &venus_hfi_pm_work, + msecs_to_jiffies( + device->res->msm_vidc_pwr_collapse_delay))) { + d_vpr_e("PM work already scheduled\n"); + } + } + +exit: + __flush_debug_queue(device, raw_packet); + + return packet_count; +} + +static void venus_hfi_core_work_handler(struct work_struct *work) +{ + struct venus_hfi_device *device = list_first_entry( + &hal_ctxt.dev_head, struct venus_hfi_device, list); + int num_responses = 0, i = 0; + u32 intr_status; + + mutex_lock(&device->lock); + if (!__core_in_valid_state(device)) { + d_vpr_e("%s: Core not in init state\n", __func__); + goto err_no_work; + } + + if (!device->callback) { + d_vpr_e("No interrupt callback function: %pK\n", + device); + goto err_no_work; + } + + if (__resume(device, DEFAULT_SID)) { + d_vpr_e("%s: Power enable failed\n", __func__); + goto err_no_work; + } + + call_venus_op(device, core_clear_interrupt, device); + num_responses = __response_handler(device); + +err_no_work: + + /* Keep the interrupt status before releasing device lock */ + intr_status = device->intr_status; + mutex_unlock(&device->lock); + + /* + * Issue the callbacks outside of the locked contex to preserve + * re-entrancy. + */ + + for (i = 0; !IS_ERR_OR_NULL(device->response_pkt) && + i < num_responses; ++i) { + struct msm_vidc_cb_info *r = &device->response_pkt[i]; + + if (!__core_in_valid_state(device)) { + d_vpr_e( + "Ignore responses from %d to %d as device is in invalid state", + (i + 1), num_responses); + break; + } + device->callback(r->response_type, &r->response); + } + + /* We need re-enable the irq which was disabled in ISR handler */ + if (!call_venus_op(device, watchdog, intr_status)) + enable_irq(device->hal_data->irq); + + /* + * XXX: Don't add any code beyond here. Reacquiring locks after release + * it above doesn't guarantee the atomicity that we're aiming for. + */ +} + +static DECLARE_WORK(venus_hfi_work, venus_hfi_core_work_handler); + +static irqreturn_t venus_hfi_isr(int irq, void *dev) +{ + struct venus_hfi_device *device = dev; + + disable_irq_nosync(irq); + queue_work(device->vidc_workq, &venus_hfi_work); + return IRQ_HANDLED; +} + +static int __init_regs_and_interrupts(struct venus_hfi_device *device, + struct msm_vidc_platform_resources *res) +{ + struct hal_data *hal = NULL; + int rc = 0; + + rc = __check_core_registered(hal_ctxt, res->firmware_base, + (u8 *)(uintptr_t)res->register_base, + res->register_size, res->irq); + if (!rc) { + d_vpr_e("Core present/Already added\n"); + rc = -EEXIST; + goto err_core_init; + } + + d_vpr_h("HAL_DATA will be assigned now\n"); + hal = kzalloc(sizeof(struct hal_data), GFP_KERNEL); + if (!hal) { + d_vpr_e("Failed to alloc\n"); + rc = -ENOMEM; + goto err_core_init; + } + + hal->irq = res->irq; + hal->firmware_base = res->firmware_base; + hal->register_base = devm_ioremap_nocache(&res->pdev->dev, + res->register_base, res->register_size); + hal->register_size = res->register_size; + if (!hal->register_base) { + d_vpr_e("could not map reg addr %pa of size %d\n", + &res->register_base, res->register_size); + goto error_irq_fail; + } + + device->hal_data = hal; + rc = request_irq(res->irq, venus_hfi_isr, IRQF_TRIGGER_HIGH, + "msm_vidc", device); + if (unlikely(rc)) { + d_vpr_e("%s: request_irq failed\n", __func__); + goto error_irq_fail; + } + + disable_irq_nosync(res->irq); + d_vpr_h("firmware_base = %pa, reg_base = %pa, reg_size = %d\n", + &res->firmware_base, &res->register_base, + res->register_size); + + return rc; + +error_irq_fail: + kfree(hal); +err_core_init: + return rc; + +} + +static inline void __deinit_clocks(struct venus_hfi_device *device) +{ + struct clock_info *cl; + + device->clk_freq = 0; + venus_hfi_for_each_clock_reverse(device, cl) { + if (cl->clk) { + clk_put(cl->clk); + cl->clk = NULL; + } + } +} + +static inline int __init_clocks(struct venus_hfi_device *device) +{ + int rc = 0; + struct clock_info *cl = NULL; + + if (!device) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + venus_hfi_for_each_clock(device, cl) { + d_vpr_h("%s: scalable? %d, count %d\n", + cl->name, cl->has_scaling, cl->count); + } + + venus_hfi_for_each_clock(device, cl) { + if (!cl->clk) { + cl->clk = clk_get(&device->res->pdev->dev, cl->name); + if (IS_ERR_OR_NULL(cl->clk)) { + d_vpr_e("Failed to get clock: %s\n", cl->name); + rc = PTR_ERR(cl->clk) ? + PTR_ERR(cl->clk) : -EINVAL; + cl->clk = NULL; + goto err_clk_get; + } + } + } + device->clk_freq = 0; + return 0; + +err_clk_get: + __deinit_clocks(device); + return rc; +} + +static int __handle_reset_clk(struct msm_vidc_platform_resources *res, + int reset_index, enum reset_state state, u32 sid) +{ + int rc = 0; + struct reset_control *rst; + struct reset_set *rst_set = &res->reset_set; + + if (!rst_set->reset_tbl) + return 0; + + rst = rst_set->reset_tbl[reset_index].rst; + s_vpr_h(sid, "reset_clk: name %s reset_state %d rst %pK\n", + rst_set->reset_tbl[reset_index].name, state, rst); + + switch (state) { + case INIT: + if (rst) + goto skip_reset_init; + + rst = devm_reset_control_get(&res->pdev->dev, + rst_set->reset_tbl[reset_index].name); + if (IS_ERR(rst)) + rc = PTR_ERR(rst); + + rst_set->reset_tbl[reset_index].rst = rst; + break; + case ASSERT: + if (!rst) { + rc = PTR_ERR(rst); + goto failed_to_reset; + } + + rc = reset_control_assert(rst); + break; + case DEASSERT: + if (!rst) { + rc = PTR_ERR(rst); + goto failed_to_reset; + } + rc = reset_control_deassert(rst); + break; + default: + s_vpr_e(sid, "Invalid reset request\n"); + if (rc) + goto failed_to_reset; + } + + return 0; + +skip_reset_init: +failed_to_reset: + return rc; +} + +void __disable_unprepare_clks(struct venus_hfi_device *device) +{ + struct clock_info *cl; + + if (!device) { + d_vpr_e("%s: invalid params\n", __func__); + return; + } + + venus_hfi_for_each_clock_reverse(device, cl) { + d_vpr_h("Clock: %s disable and unprepare\n", + cl->name); + + if (!__clk_is_enabled(cl->clk)) + d_vpr_e("%s: clock %s already disabled\n", + __func__, cl->name); + + clk_disable_unprepare(cl->clk); + + if (__clk_is_enabled(cl->clk)) + d_vpr_e("%s: clock %s not disabled\n", + __func__, cl->name); + } +} + +int __reset_ahb2axi_bridge_common(struct venus_hfi_device *device, u32 sid) +{ + int rc, i; + + if (!device) { + s_vpr_e(sid, "NULL device\n"); + rc = -EINVAL; + goto failed_to_reset; + } + + for (i = 0; i < device->res->reset_set.count; i++) { + rc = __handle_reset_clk(device->res, i, ASSERT, sid); + if (rc) { + s_vpr_e(sid, "failed to assert reset clocks\n"); + goto failed_to_reset; + } + + /* wait for deassert */ + usleep_range(400, 500); + + rc = __handle_reset_clk(device->res, i, DEASSERT, sid); + if (rc) { + s_vpr_e(sid, "failed to deassert reset clocks\n"); + goto failed_to_reset; + } + } + + return 0; + +failed_to_reset: + return rc; +} + +static inline int __prepare_enable_clks(struct venus_hfi_device *device, + u32 sid) +{ + struct clock_info *cl = NULL, *cl_fail = NULL; + int rc = 0, c = 0; + + if (!device) { + s_vpr_e(sid, "%s: invalid params\n", __func__); + return -EINVAL; + } + + venus_hfi_for_each_clock(device, cl) { + /* + * For the clocks we control, set the rate prior to preparing + * them. Since we don't really have a load at this point, scale + * it to the lowest frequency possible + */ + if (cl->has_scaling) + __set_clk_rate(device, cl, + clk_round_rate(cl->clk, 0), sid); + + if (__clk_is_enabled(cl->clk)) + s_vpr_e(sid, "%s: clock %s already enabled\n", + __func__, cl->name); + + rc = clk_prepare_enable(cl->clk); + if (rc) { + s_vpr_e(sid, "Failed to enable clocks\n"); + cl_fail = cl; + goto fail_clk_enable; + } + + if (!__clk_is_enabled(cl->clk)) + s_vpr_e(sid, "%s: clock %s not enabled\n", + __func__, cl->name); + + c++; + s_vpr_h(sid, "Clock: %s prepared and enabled\n", cl->name); + } + + call_venus_op(device, clock_config_on_enable, device, sid); + return rc; + +fail_clk_enable: + venus_hfi_for_each_clock_reverse_continue(device, cl, c) { + s_vpr_e(sid, "Clock: %s disable and unprepare\n", + cl->name); + clk_disable_unprepare(cl->clk); + } + + return rc; +} + +static void __deinit_bus(struct venus_hfi_device *device) +{ + struct bus_info *bus = NULL; + + if (!device) + return; + + device->bus_vote = DEFAULT_BUS_VOTE; + + venus_hfi_for_each_bus_reverse(device, bus) { + icc_put(bus->path); + bus->path = NULL; + } +} + +static int __init_bus(struct venus_hfi_device *device) +{ + struct bus_info *bus = NULL; + int rc = 0; + + if (!device) + return -EINVAL; + + venus_hfi_for_each_bus(device, bus) { + if (!strcmp(bus->name, "venus-llcc")) { + if (msm_vidc_syscache_disable) { + d_vpr_h("Skipping LLC bus init: %s\n", + bus->name); + continue; + } + } + bus->path = of_icc_get(bus->dev, bus->name); + if (IS_ERR_OR_NULL(bus->path)) { + rc = PTR_ERR(bus->path) ? + PTR_ERR(bus->path) : -EBADHANDLE; + + d_vpr_e("Failed to register bus %s: %d\n", + bus->name, rc); + bus->path = NULL; + goto err_add_dev; + } + } + + return 0; + +err_add_dev: + __deinit_bus(device); + return rc; +} + +static void __deinit_regulators(struct venus_hfi_device *device) +{ + struct regulator_info *rinfo = NULL; + + venus_hfi_for_each_regulator_reverse(device, rinfo) { + if (rinfo->regulator) { + regulator_put(rinfo->regulator); + rinfo->regulator = NULL; + } + } +} + +static int __init_regulators(struct venus_hfi_device *device) +{ + int rc = 0; + struct regulator_info *rinfo = NULL; + + venus_hfi_for_each_regulator(device, rinfo) { + rinfo->regulator = regulator_get(&device->res->pdev->dev, + rinfo->name); + if (IS_ERR_OR_NULL(rinfo->regulator)) { + rc = PTR_ERR(rinfo->regulator) ? + PTR_ERR(rinfo->regulator) : -EBADHANDLE; + d_vpr_e("Failed to get regulator: %s\n", rinfo->name); + rinfo->regulator = NULL; + goto err_reg_get; + } + } + + return 0; + +err_reg_get: + __deinit_regulators(device); + return rc; +} + +static void __deinit_subcaches(struct venus_hfi_device *device) +{ + struct subcache_info *sinfo = NULL; + + if (!device) { + d_vpr_e("deinit_subcaches: invalid device %pK\n", device); + goto exit; + } + + if (!is_sys_cache_present(device)) + goto exit; + + venus_hfi_for_each_subcache_reverse(device, sinfo) { + if (sinfo->subcache) { + d_vpr_h("deinit_subcaches: %s\n", sinfo->name); + llcc_slice_putd(sinfo->subcache); + sinfo->subcache = NULL; + } + } + +exit: + return; +} + +static int __init_subcaches(struct venus_hfi_device *device) +{ + int rc = 0; + struct subcache_info *sinfo = NULL; + + if (!device) { + d_vpr_e("init_subcaches: invalid device %pK\n", + device); + return -EINVAL; + } + + if (!is_sys_cache_present(device)) + return 0; + + venus_hfi_for_each_subcache(device, sinfo) { + if (!strcmp("vidsc0", sinfo->name)) { + sinfo->subcache = llcc_slice_getd(LLCC_VIDSC0); + } else if (!strcmp("vidsc1", sinfo->name)) { + sinfo->subcache = llcc_slice_getd(LLCC_VIDSC1); + } else if (!strcmp("vidscfw", sinfo->name)) { + sinfo->subcache = llcc_slice_getd(LLCC_VIDFW); + } else { + d_vpr_e("Invalid subcache name %s\n", + sinfo->name); + } + if (IS_ERR_OR_NULL(sinfo->subcache)) { + rc = PTR_ERR(sinfo->subcache) ? + PTR_ERR(sinfo->subcache) : -EBADHANDLE; + d_vpr_e("init_subcaches: invalid subcache: %s rc %d\n", + sinfo->name, rc); + sinfo->subcache = NULL; + goto err_subcache_get; + } + d_vpr_h("init_subcaches: %s\n", sinfo->name); + } + + return 0; + +err_subcache_get: + __deinit_subcaches(device); + return rc; +} + +static int __init_resources(struct venus_hfi_device *device, + struct msm_vidc_platform_resources *res) +{ + int i, rc = 0; + + rc = __init_regulators(device); + if (rc) { + d_vpr_e("Failed to get all regulators\n"); + return -ENODEV; + } + + rc = __init_clocks(device); + if (rc) { + d_vpr_e("Failed to init clocks\n"); + rc = -ENODEV; + goto err_init_clocks; + } + + for (i = 0; i < device->res->reset_set.count; i++) { + rc = __handle_reset_clk(res, i, INIT, DEFAULT_SID); + if (rc) { + d_vpr_e("Failed to init reset clocks\n"); + rc = -ENODEV; + goto err_init_reset_clk; + } + } + + rc = __init_bus(device); + if (rc) { + d_vpr_e("Failed to init bus: %d\n", rc); + goto err_init_bus; + } + + rc = __init_subcaches(device); + if (rc) + d_vpr_e("Failed to init subcaches: %d\n", rc); + + return rc; + +err_init_reset_clk: +err_init_bus: + __deinit_clocks(device); +err_init_clocks: + __deinit_regulators(device); + return rc; +} + +static void __deinit_resources(struct venus_hfi_device *device) +{ + __deinit_subcaches(device); + __deinit_bus(device); + __deinit_clocks(device); + __deinit_regulators(device); +} + +static int __protect_cp_mem(struct venus_hfi_device *device) +{ + struct tzbsp_memprot memprot; + int rc = 0; + struct context_bank_info *cb; + + if (!device) + return -EINVAL; + + memprot.cp_start = 0x0; + memprot.cp_size = 0x0; + memprot.cp_nonpixel_start = 0x0; + memprot.cp_nonpixel_size = 0x0; + + mutex_lock(&device->res->cb_lock); + list_for_each_entry(cb, &device->res->context_banks, list) { + if (!strcmp(cb->name, "venus_ns")) { + memprot.cp_size = cb->addr_range.start; + + d_vpr_h("%s: memprot.cp_size: %#x\n", + __func__, memprot.cp_size); + } + + if (!strcmp(cb->name, "venus_sec_non_pixel")) { + memprot.cp_nonpixel_start = cb->addr_range.start; + memprot.cp_nonpixel_size = cb->addr_range.size; + + d_vpr_h("%s: cp_nonpixel_start: %#x size: %#x\n", + __func__, memprot.cp_nonpixel_start, + memprot.cp_nonpixel_size); + } + } + mutex_unlock(&device->res->cb_lock); + + rc = qcom_scm_mem_protect_video(memprot.cp_start, memprot.cp_size, + memprot.cp_nonpixel_start, memprot.cp_nonpixel_size); + + if (rc) + d_vpr_e("Failed to protect memory(%d)\n", rc); + + trace_venus_hfi_var_done( + memprot.cp_start, memprot.cp_size, + memprot.cp_nonpixel_start, memprot.cp_nonpixel_size); + return rc; +} + +static int __disable_regulator(struct regulator_info *rinfo, + struct venus_hfi_device *device) +{ + int rc = 0; + + d_vpr_h("Disabling regulator %s\n", rinfo->name); + + /* + * This call is needed. Driver needs to acquire the control back + * from HW in order to disable the regualtor. Else the behavior + * is unknown. + */ + + rc = __acquire_regulator(rinfo, device, DEFAULT_SID); + if (rc) { + /* + * This is somewhat fatal, but nothing we can do + * about it. We can't disable the regulator w/o + * getting it back under s/w control + */ + d_vpr_e("Failed to acquire control on %s\n", + rinfo->name); + + goto disable_regulator_failed; + } + + if (!regulator_is_enabled(rinfo->regulator)) + d_vpr_e("%s: regulator %s already disabled\n", + __func__, rinfo->name); + + rc = regulator_disable(rinfo->regulator); + if (rc) { + d_vpr_e("Failed to disable %s: %d\n", + rinfo->name, rc); + goto disable_regulator_failed; + } + + if (regulator_is_enabled(rinfo->regulator)) + d_vpr_e("%s: regulator %s not disabled\n", + __func__, rinfo->name); + + return 0; +disable_regulator_failed: + + /* Bring attention to this issue */ + msm_vidc_res_handle_fatal_hw_error(device->res, true); + return rc; +} + +static int __enable_hw_power_collapse(struct venus_hfi_device *device, u32 sid) +{ + int rc = 0; + + rc = __hand_off_regulators(device, sid); + if (rc) + s_vpr_e(sid, "%s: Failed to enable HW power collapse %d\n", + __func__, rc); + return rc; +} + +static int __enable_regulators(struct venus_hfi_device *device, u32 sid) +{ + int rc = 0, c = 0; + struct regulator_info *rinfo; + + s_vpr_h(sid, "Enabling regulators\n"); + + venus_hfi_for_each_regulator(device, rinfo) { + if (regulator_is_enabled(rinfo->regulator)) + s_vpr_e(sid, "%s: regulator %s already enabled\n", + __func__, rinfo->name); + + rc = regulator_enable(rinfo->regulator); + if (rc) { + s_vpr_e(sid, "Failed to enable %s: %d\n", + rinfo->name, rc); + goto err_reg_enable_failed; + } + + if (!regulator_is_enabled(rinfo->regulator)) + s_vpr_e(sid, "%s: regulator %s not enabled\n", + __func__, rinfo->name); + + s_vpr_h(sid, "Enabled regulator %s\n", + rinfo->name); + c++; + } + + return 0; + +err_reg_enable_failed: + venus_hfi_for_each_regulator_reverse_continue(device, rinfo, c) + __disable_regulator(rinfo, device); + + return rc; +} + +int __disable_regulators(struct venus_hfi_device *device) +{ + struct regulator_info *rinfo; + + d_vpr_h("Disabling regulators\n"); + venus_hfi_for_each_regulator_reverse(device, rinfo) + __disable_regulator(rinfo, device); + + return 0; +} + +static int __enable_subcaches(struct venus_hfi_device *device, u32 sid) +{ + int rc = 0; + u32 c = 0; + struct subcache_info *sinfo; + + if (msm_vidc_syscache_disable || !is_sys_cache_present(device)) + return 0; + + /* Activate subcaches */ + venus_hfi_for_each_subcache(device, sinfo) { + rc = llcc_slice_activate(sinfo->subcache); + if (rc) { + s_vpr_e(sid, "Failed to activate %s: %d\n", + sinfo->name, rc); + msm_vidc_res_handle_fatal_hw_error(device->res, true); + goto err_activate_fail; + } + sinfo->isactive = true; + s_vpr_h(sid, "Activated subcache %s\n", sinfo->name); + c++; + } + + s_vpr_h(sid, "Activated %d Subcaches to Venus\n", c); + + return 0; + +err_activate_fail: + __release_subcaches(device, sid); + __disable_subcaches(device, sid); + return 0; +} + +static int __set_subcaches(struct venus_hfi_device *device, u32 sid) +{ + int rc = 0; + u32 c = 0; + struct subcache_info *sinfo; + u32 resource[VIDC_MAX_SUBCACHE_SIZE]; + struct hfi_resource_syscache_info_type *sc_res_info; + struct hfi_resource_subcache_type *sc_res; + struct vidc_resource_hdr rhdr; + + if (device->res->sys_cache_res_set) { + s_vpr_h(sid, "Subcaches already set to Venus\n"); + return 0; + } + + memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE)); + + sc_res_info = (struct hfi_resource_syscache_info_type *)resource; + sc_res = &(sc_res_info->rg_subcache_entries[0]); + + venus_hfi_for_each_subcache(device, sinfo) { + if (sinfo->isactive) { + sc_res[c].size = sinfo->subcache->slice_size; + sc_res[c].sc_id = sinfo->subcache->slice_id; + c++; + } + } + + /* Set resource to Venus for activated subcaches */ + if (c) { + s_vpr_h(sid, "Setting %d Subcaches\n", c); + + rhdr.resource_handle = sc_res_info; /* cookie */ + rhdr.resource_id = VIDC_RESOURCE_SYSCACHE; + + sc_res_info->num_entries = c; + + rc = __core_set_resource(device, &rhdr, (void *)sc_res_info); + if (rc) { + s_vpr_e(sid, "Failed to set subcaches %d\n", rc); + goto err_fail_set_subacaches; + } + + venus_hfi_for_each_subcache(device, sinfo) { + if (sinfo->isactive) + sinfo->isset = true; + } + + s_vpr_h(sid, "Set Subcaches done to Venus\n"); + device->res->sys_cache_res_set = true; + } + + return 0; + +err_fail_set_subacaches: + __disable_subcaches(device, sid); + + return 0; +} + +static int __release_subcaches(struct venus_hfi_device *device, u32 sid) +{ + struct subcache_info *sinfo; + int rc = 0; + u32 c = 0; + u32 resource[VIDC_MAX_SUBCACHE_SIZE]; + struct hfi_resource_syscache_info_type *sc_res_info; + struct hfi_resource_subcache_type *sc_res; + struct vidc_resource_hdr rhdr; + + if (msm_vidc_syscache_disable || !is_sys_cache_present(device)) + return 0; + + memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE)); + + sc_res_info = (struct hfi_resource_syscache_info_type *)resource; + sc_res = &(sc_res_info->rg_subcache_entries[0]); + + /* Release resource command to Venus */ + venus_hfi_for_each_subcache_reverse(device, sinfo) { + if (sinfo->isset) { + /* Update the entry */ + sc_res[c].size = sinfo->subcache->slice_size; + sc_res[c].sc_id = sinfo->subcache->slice_id; + c++; + sinfo->isset = false; + } + } + + if (c > 0) { + s_vpr_h(sid, "Releasing %d subcaches\n", c); + rhdr.resource_handle = sc_res_info; /* cookie */ + rhdr.resource_id = VIDC_RESOURCE_SYSCACHE; + + rc = __core_release_resource(device, &rhdr); + if (rc) + s_vpr_e(sid, "Failed to release %d subcaches\n", c); + } + + device->res->sys_cache_res_set = false; + + return 0; +} + +static int __disable_subcaches(struct venus_hfi_device *device, u32 sid) +{ + struct subcache_info *sinfo; + int rc = 0; + + if (msm_vidc_syscache_disable || !is_sys_cache_present(device)) + return 0; + + /* De-activate subcaches */ + venus_hfi_for_each_subcache_reverse(device, sinfo) { + if (sinfo->isactive) { + s_vpr_h(sid, "De-activate subcache %s\n", + sinfo->name); + rc = llcc_slice_deactivate(sinfo->subcache); + if (rc) { + s_vpr_e(sid, "Failed to de-activate %s: %d\n", + sinfo->name, rc); + } + sinfo->isactive = false; + } + } + + return 0; +} + +static int __set_ubwc_config(struct venus_hfi_device *device) +{ + u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE]; + int rc = 0; + + struct hfi_cmd_sys_set_property_packet *pkt = + (struct hfi_cmd_sys_set_property_packet *) &packet; + + if (!device->res->ubwc_config) + return 0; + + rc = call_hfi_pkt_op(device, sys_ubwc_config, pkt, + device->res->ubwc_config); + if (rc) { + d_vpr_e("ubwc config setting to FW failed\n"); + rc = -ENOTEMPTY; + goto fail_to_set_ubwc_config; + } + + if (__iface_cmdq_write(device, pkt, DEFAULT_SID)) { + rc = -ENOTEMPTY; + goto fail_to_set_ubwc_config; + } + + d_vpr_h("Configured UBWC Config to Venus\n"); + +fail_to_set_ubwc_config: + return rc; +} + +static int __venus_power_on(struct venus_hfi_device *device, u32 sid) +{ + int rc = 0; + + if (device->power_enabled) + return 0; + + device->power_enabled = true; + /* Vote for all hardware resources */ + rc = __vote_buses(device, INT_MAX, INT_MAX, sid); + if (rc) { + s_vpr_e(sid, "Failed to vote buses, err: %d\n", rc); + goto fail_vote_buses; + } + + rc = __enable_regulators(device, sid); + if (rc) { + s_vpr_e(sid, "Failed to enable GDSC, err = %d\n", rc); + goto fail_enable_gdsc; + } + + rc = call_venus_op(device, reset_ahb2axi_bridge, device, sid); + if (rc) { + s_vpr_e(sid, "Failed to reset ahb2axi: %d\n", rc); + goto fail_enable_clks; + } + + rc = __prepare_enable_clks(device, sid); + if (rc) { + s_vpr_e(sid, "Failed to enable clocks: %d\n", rc); + goto fail_enable_clks; + } + + rc = __scale_clocks(device, sid); + if (rc) { + s_vpr_e(sid, + "Failed to scale clocks, performance might be affected\n"); + rc = 0; + } + + /* + * Re-program all of the registers that get reset as a result of + * regulator_disable() and _enable() + */ + __set_registers(device, sid); + + call_venus_op(device, interrupt_init, device, sid); + device->intr_status = 0; + enable_irq(device->hal_data->irq); + + return rc; + +fail_enable_clks: + __disable_regulators(device); +fail_enable_gdsc: + __unvote_buses(device, sid); +fail_vote_buses: + device->power_enabled = false; + return rc; +} + +static inline int __suspend(struct venus_hfi_device *device) +{ + int rc = 0; + + if (!device) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } else if (!device->power_enabled) { + d_vpr_h("Power already disabled\n"); + return 0; + } + + d_vpr_h("Entering suspend\n"); + + rc = __tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND, DEFAULT_SID); + if (rc) { + d_vpr_e("Failed to suspend video core %d\n", rc); + goto err_tzbsp_suspend; + } + + __disable_subcaches(device, DEFAULT_SID); + + call_venus_op(device, power_off, device); + d_vpr_h("Venus power off\n"); + return rc; + +err_tzbsp_suspend: + return rc; +} + +static inline int __resume(struct venus_hfi_device *device, u32 sid) +{ + int rc = 0; + + if (!device) { + s_vpr_e(sid, "%s: invalid params\n", __func__); + return -EINVAL; + } else if (device->power_enabled) { + goto exit; + } else if (!__core_in_valid_state(device)) { + s_vpr_e(sid, "venus_hfi_device in deinit state."); + return -EINVAL; + } + + s_vpr_h(sid, "Resuming from power collapse\n"); + rc = __venus_power_on(device, sid); + if (rc) { + s_vpr_e(sid, "Failed to power on venus\n"); + goto err_venus_power_on; + } + + /* Reboot the firmware */ + rc = __tzbsp_set_video_state(TZBSP_VIDEO_STATE_RESUME, sid); + if (rc) { + s_vpr_e(sid, "Failed to resume video core %d\n", rc); + goto err_set_video_state; + } + + /* + * Hand off control of regulators to h/w _after_ loading fw. + * Note that the GDSC will turn off when switching from normal + * (s/w triggered) to fast (HW triggered) unless the h/w vote is + * present. + */ + if (__enable_hw_power_collapse(device, sid)) + s_vpr_e(sid, "Failed to enabled inter-frame PC\n"); + + call_venus_op(device, setup_ucregion_memmap, device, sid); + + /* Wait for boot completion */ + rc = call_venus_op(device, boot_firmware, device, sid); + if (rc) { + s_vpr_e(sid, "Failed to reset venus core\n"); + goto err_reset_core; + } + + __sys_set_debug(device, (msm_vidc_debug & FW_LOGMASK) >> FW_LOGSHIFT, + sid); + + __enable_subcaches(device, sid); + __set_subcaches(device, sid); + + s_vpr_h(sid, "Resumed from power collapse\n"); +exit: + /* Don't reset skip_pc_count for SYS_PC_PREP cmd */ + if (device->last_packet_type != HFI_CMD_SYS_PC_PREP) + device->skip_pc_count = 0; + return rc; +err_reset_core: + __tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND, sid); +err_set_video_state: + call_venus_op(device, power_off, device); +err_venus_power_on: + s_vpr_e(sid, "Failed to resume from power collapse\n"); + return rc; +} + +static int __load_fw(struct venus_hfi_device *device) +{ + int rc = 0; + + /* Initialize resources */ + rc = __init_resources(device, device->res); + if (rc) { + d_vpr_e("Failed to init resources: %d\n", rc); + goto fail_init_res; + } + + rc = __initialize_packetization(device); + if (rc) { + d_vpr_e("Failed to initialize packetization\n"); + goto fail_init_pkt; + } + trace_msm_v4l2_vidc_fw_load_start("msm_v4l2_vidc venus_fw load start"); + + rc = __venus_power_on(device, DEFAULT_SID); + if (rc) { + d_vpr_e("Failed to power on venus in in load_fw\n"); + goto fail_venus_power_on; + } + + if (!device->res->firmware_base) { + if (!device->resources.fw.cookie) + device->resources.fw.cookie = + subsystem_get_with_fwname("venus", + device->res->fw_name); + + if (IS_ERR_OR_NULL(device->resources.fw.cookie)) { + d_vpr_e("Failed to download firmware\n"); + device->resources.fw.cookie = NULL; + rc = -ENOMEM; + goto fail_load_fw; + } + } else { + d_vpr_e("Firmware base must be 0\n"); + } + + if (!device->res->firmware_base) { + rc = __protect_cp_mem(device); + if (rc) { + d_vpr_e("Failed to protect memory\n"); + goto fail_protect_mem; + } + } + /* + * Hand off control of regulators to h/w _after_ loading fw. + * Note that the GDSC will turn off when switching from normal + * (s/w triggered) to fast (HW triggered) unless the h/w vote is + * present. + */ + if (__enable_hw_power_collapse(device, DEFAULT_SID)) + d_vpr_e("Failed to enabled inter-frame PC\n"); + + trace_msm_v4l2_vidc_fw_load_end("msm_v4l2_vidc venus_fw load end"); + return rc; +fail_protect_mem: + if (device->resources.fw.cookie) + subsystem_put(device->resources.fw.cookie); + device->resources.fw.cookie = NULL; +fail_load_fw: + call_venus_op(device, power_off, device); +fail_venus_power_on: +fail_init_pkt: + __deinit_resources(device); +fail_init_res: + trace_msm_v4l2_vidc_fw_load_end("msm_v4l2_vidc venus_fw load end"); + return rc; +} + +static void __unload_fw(struct venus_hfi_device *device) +{ + if (!device->resources.fw.cookie) + return; + + cancel_delayed_work(&venus_hfi_pm_work); + if (device->state != VENUS_STATE_DEINIT) + flush_workqueue(device->venus_pm_workq); + + subsystem_put(device->resources.fw.cookie); + __interface_queues_release(device); + call_venus_op(device, power_off, device); + device->resources.fw.cookie = NULL; + __deinit_resources(device); + + d_vpr_h("Firmware unloaded successfully\n"); +} + +static int venus_hfi_get_fw_info(void *dev, struct hal_fw_info *fw_info) +{ + int i = 0, j = 0; + struct venus_hfi_device *device = dev; + size_t smem_block_size = 0; + u8 *smem_table_ptr; + char version[VENUS_VERSION_LENGTH] = ""; + const u32 smem_image_index_venus = 14 * 128; + + if (!device || !fw_info) { + d_vpr_e("%s: Invalid parameter: device = %pK fw_info = %pK\n", + __func__, device, fw_info); + return -EINVAL; + } + + mutex_lock(&device->lock); + + smem_table_ptr = qcom_smem_get(QCOM_SMEM_HOST_ANY, + SMEM_IMAGE_VERSION_TABLE, &smem_block_size); + if (smem_table_ptr && + ((smem_image_index_venus + + VENUS_VERSION_LENGTH) <= smem_block_size)) + memcpy(version, + smem_table_ptr + smem_image_index_venus, + VENUS_VERSION_LENGTH); + + while (version[i] != 'V' && version[i] != 'v' && + ++i < VENUS_VERSION_LENGTH) + ; + + if (i >= VENUS_VERSION_LENGTH - 1) { + d_vpr_e("Venus version string is not proper\n"); + fw_info->version[0] = '\0'; + goto fail_version_string; + } + + for (; i < VENUS_VERSION_LENGTH && j < VENUS_VERSION_LENGTH - 1; i++) + fw_info->version[j++] = version[i]; + fw_info->version[j] = '\0'; + +fail_version_string: + d_vpr_h("F/W version retrieved : %s\n", fw_info->version); + fw_info->base_addr = device->hal_data->firmware_base; + fw_info->register_base = device->res->register_base; + fw_info->register_size = device->hal_data->register_size; + fw_info->irq = device->hal_data->irq; + + mutex_unlock(&device->lock); + return 0; +} + +static int venus_hfi_get_core_capabilities(void *dev) +{ + struct venus_hfi_device *device = dev; + int rc = 0; + + if (!device) + return -EINVAL; + + mutex_lock(&device->lock); + + rc = HAL_VIDEO_ENCODER_ROTATION_CAPABILITY | + HAL_VIDEO_ENCODER_SCALING_CAPABILITY | + HAL_VIDEO_ENCODER_DEINTERLACE_CAPABILITY | + HAL_VIDEO_DECODER_MULTI_STREAM_CAPABILITY; + + mutex_unlock(&device->lock); + + return rc; +} + +static void __noc_error_info_common(struct venus_hfi_device *device) +{ + u32 val = 0; + u32 sid = DEFAULT_SID; + + if (!device) { + d_vpr_e("%s: null device\n", __func__); + return; + } + + if (__read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS + + VCODEC_CORE0_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS, + DEFAULT_SID)) { + val = __read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS + + VCODEC_CORE0_VIDEO_NOC_ERR_SWID_LOW_OFFS, sid); + d_vpr_e("VCODEC_NOC_ERR_SWID_LOW: %#x\n", val); + val = __read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS + + VCODEC_CORE0_VIDEO_NOC_ERR_SWID_HIGH_OFFS, sid); + d_vpr_e("VCODEC_NOC_ERR_SWID_HIGH: %#x\n", val); + val = __read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS + + VCODEC_CORE0_VIDEO_NOC_ERR_MAINCTL_LOW_OFFS, sid); + d_vpr_e("VCODEC_NOC_ERR_MAINCTL_LOW: %#x\n", val); + val = __read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS + + VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG0_LOW_OFFS, sid); + d_vpr_e("VCODEC_NOC_ERR_ERRLOG0_LOW: %#x\n", val); + val = __read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS + + VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG0_HIGH_OFFS, sid); + d_vpr_e("VCODEC_NOC_ERR_ERRLOG0_HIGH: %#x\n", val); + val = __read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS + + VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG1_LOW_OFFS, sid); + d_vpr_e("VCODEC_NOC_ERR_ERRLOG1_LOW: %#x\n", val); + val = __read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS + + VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG1_HIGH_OFFS, sid); + d_vpr_e("VCODEC_NOC_ERR_ERRLOG1_HIGH: %#x\n", val); + val = __read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS + + VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG2_LOW_OFFS, sid); + d_vpr_e("VCODEC_NOC_ERR_ERRLOG2_LOW: %#x\n", val); + val = __read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS + + VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG2_HIGH_OFFS, sid); + d_vpr_e("VCODEC_NOC_ERR_ERRLOG2_HIGH: %#x\n", val); + val = __read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS + + VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG3_LOW_OFFS, sid); + d_vpr_e("VCODEC_NOC_ERR_ERRLOG3_LOW: %#x\n", val); + val = __read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS + + VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG3_HIGH_OFFS, sid); + d_vpr_e("VCODEC_NOC_ERR_ERRLOG3_HIGH: %#x\n", val); + } +} + +static int venus_hfi_noc_error_info(void *dev) +{ + struct venus_hfi_device *device; + + if (!dev) { + d_vpr_e("%s: null device\n", __func__); + return -EINVAL; + } + device = dev; + + mutex_lock(&device->lock); + d_vpr_e("%s: non error information\n", __func__); + + call_venus_op(device, noc_error_info, device); + + mutex_unlock(&device->lock); + + return 0; +} + +static int __initialize_packetization(struct venus_hfi_device *device) +{ + int rc = 0; + + if (!device || !device->res) { + d_vpr_e("%s: invalid params %pK\n", __func__, device); + return -EINVAL; + } + + device->packetization_type = HFI_PACKETIZATION_4XX; + + device->pkt_ops = hfi_get_pkt_ops_handle(device->packetization_type); + if (!device->pkt_ops) { + rc = -EINVAL; + d_vpr_e("Failed to get pkt_ops handle\n"); + } + + return rc; +} + +void __init_venus_ops(struct venus_hfi_device *device) +{ + if (device->res->vpu_ver == VPU_VERSION_AR50_LITE) + device->vpu_ops = &ar50_lite_ops; + else + device->vpu_ops = &iris2_ops; +} + +static struct venus_hfi_device *__add_device(u32 device_id, + struct msm_vidc_platform_resources *res, + hfi_cmd_response_callback callback) +{ + struct venus_hfi_device *hdevice = &venus_hfi_dev; + int rc = 0; + + if (!res || !callback) { + d_vpr_e("%s: Invalid Parameters %pK %pK\n", + __func__, res, callback); + return NULL; + } + + d_vpr_h("%s: entered, device_id: %d\n", __func__, device_id); + + hdevice->response_pkt = kmalloc_array(max_packets, + sizeof(*hdevice->response_pkt), GFP_KERNEL); + if (!hdevice->response_pkt) { + d_vpr_e("failed to allocate response_pkt\n"); + goto err_cleanup; + } + + hdevice->raw_packet = + kzalloc(VIDC_IFACEQ_VAR_HUGE_PKT_SIZE, GFP_KERNEL); + if (!hdevice->raw_packet) { + d_vpr_e("failed to allocate raw packet\n"); + goto err_cleanup; + } + + rc = __init_regs_and_interrupts(hdevice, res); + if (rc) + goto err_cleanup; + + hdevice->res = res; + hdevice->device_id = device_id; + hdevice->callback = (msm_vidc_callback) callback; + + __init_venus_ops(hdevice); + + hdevice->vidc_workq = create_singlethread_workqueue( + "msm_vidc_workerq_venus"); + if (!hdevice->vidc_workq) { + d_vpr_e("%s: create vidc workq failed\n", __func__); + goto err_cleanup; + } + + hdevice->venus_pm_workq = create_singlethread_workqueue( + "pm_workerq_venus"); + if (!hdevice->venus_pm_workq) { + d_vpr_e("%s: create pm workq failed\n", __func__); + goto err_cleanup; + } + + if (!hal_ctxt.dev_count) + INIT_LIST_HEAD(&hal_ctxt.dev_head); + + mutex_init(&hdevice->lock); + INIT_LIST_HEAD(&hdevice->list); + INIT_LIST_HEAD(&hdevice->sess_head); + list_add_tail(&hdevice->list, &hal_ctxt.dev_head); + hal_ctxt.dev_count++; + + return hdevice; + +err_cleanup: + if (hdevice->vidc_workq) + destroy_workqueue(hdevice->vidc_workq); + kfree(hdevice->response_pkt); + kfree(hdevice->raw_packet); + return NULL; +} + +static struct venus_hfi_device *__get_device(u32 device_id, + struct msm_vidc_platform_resources *res, + hfi_cmd_response_callback callback) +{ + if (!res || !callback) { + d_vpr_e("%s: invalid params: %pK %pK\n", + __func__, res, callback); + return NULL; + } + + return __add_device(device_id, res, callback); +} + +void venus_hfi_delete_device(void *device) +{ + struct venus_hfi_device *close, *tmp, *dev; + + if (!device) + return; + + dev = (struct venus_hfi_device *) device; + + list_for_each_entry_safe(close, tmp, &hal_ctxt.dev_head, list) { + if (close->hal_data->irq == dev->hal_data->irq) { + hal_ctxt.dev_count--; + list_del(&close->list); + mutex_destroy(&close->lock); + destroy_workqueue(close->vidc_workq); + destroy_workqueue(close->venus_pm_workq); + free_irq(dev->hal_data->irq, close); + iounmap(dev->hal_data->register_base); + kfree(close->hal_data); + kfree(close->response_pkt); + kfree(close->raw_packet); + break; + } + } +} + +static void venus_init_hfi_callbacks(struct hfi_device *hdev) +{ + hdev->core_init = venus_hfi_core_init; + hdev->core_ping = venus_hfi_core_ping; + hdev->core_release = venus_hfi_core_release; + hdev->core_trigger_ssr = venus_hfi_core_trigger_ssr; + hdev->session_init = venus_hfi_session_init; + hdev->session_end = venus_hfi_session_end; + hdev->session_abort = venus_hfi_session_abort; + hdev->session_clean = venus_hfi_session_clean; + hdev->session_set_buffers = venus_hfi_session_set_buffers; + hdev->session_release_buffers = venus_hfi_session_release_buffers; + hdev->session_load_res = venus_hfi_session_load_res; + hdev->session_release_res = venus_hfi_session_release_res; + hdev->session_start = venus_hfi_session_start; + hdev->session_continue = venus_hfi_session_continue; + hdev->session_stop = venus_hfi_session_stop; + hdev->session_etb = venus_hfi_session_etb; + hdev->session_ftb = venus_hfi_session_ftb; + hdev->session_process_batch = venus_hfi_session_process_batch; + hdev->session_get_buf_req = venus_hfi_session_get_buf_req; + hdev->session_flush = venus_hfi_session_flush; + hdev->session_set_property = venus_hfi_session_set_property; + hdev->session_pause = venus_hfi_session_pause; + hdev->session_resume = venus_hfi_session_resume; + hdev->scale_clocks = venus_hfi_scale_clocks; + hdev->vote_bus = venus_hfi_vote_buses; + hdev->get_fw_info = venus_hfi_get_fw_info; + hdev->get_core_capabilities = venus_hfi_get_core_capabilities; + hdev->suspend = venus_hfi_suspend; + hdev->flush_debug_queue = venus_hfi_flush_debug_queue; + hdev->noc_error_info = venus_hfi_noc_error_info; +} + +int venus_hfi_initialize(struct hfi_device *hdev, u32 device_id, + struct msm_vidc_platform_resources *res, + hfi_cmd_response_callback callback) +{ + int rc = 0; + + if (!hdev || !res || !callback) { + d_vpr_e("%s: invalid params: %pK %pK %pK\n", + __func__, hdev, res, callback); + rc = -EINVAL; + goto err_venus_hfi_init; + } + + hdev->hfi_device_data = __get_device(device_id, res, callback); + + if (IS_ERR_OR_NULL(hdev->hfi_device_data)) { + rc = PTR_ERR(hdev->hfi_device_data) ? + PTR_ERR(hdev->hfi_device_data) : -EINVAL; + goto err_venus_hfi_init; + } + + venus_init_hfi_callbacks(hdev); + +err_venus_hfi_init: + return rc; +} diff --git a/techpack/video/msm/vidc/hfi_common.h b/techpack/video/msm/vidc/hfi_common.h new file mode 100644 index 000000000000..216e3c25a5ad --- /dev/null +++ b/techpack/video/msm/vidc/hfi_common.h @@ -0,0 +1,327 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __HFI_COMMON_H__ +#define __HFI_COMMON_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "vidc_hfi_api.h" +#include "vidc_hfi_helper.h" +#include "vidc_hfi_api.h" +#include "vidc_hfi.h" +#include "msm_vidc_resources.h" +#include "msm_vidc_bus.h" +#include "hfi_packetization.h" +#include "hfi_io_common.h" + +#define HFI_MASK_QHDR_TX_TYPE 0xFF000000 +#define HFI_MASK_QHDR_RX_TYPE 0x00FF0000 +#define HFI_MASK_QHDR_PRI_TYPE 0x0000FF00 +#define HFI_MASK_QHDR_Q_ID_TYPE 0x000000FF +#define HFI_Q_ID_HOST_TO_CTRL_CMD_Q 0x00 +#define HFI_Q_ID_CTRL_TO_HOST_MSG_Q 0x01 +#define HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q 0x02 +#define HFI_MASK_QHDR_STATUS 0x000000FF + +#define VIDC_MAX_UNCOMPRESSED_FMT_PLANES 3 + +#define VIDC_IFACEQ_NUMQ 3 +#define VIDC_IFACEQ_CMDQ_IDX 0 +#define VIDC_IFACEQ_MSGQ_IDX 1 +#define VIDC_IFACEQ_DBGQ_IDX 2 +#define VIDC_IFACEQ_MAX_BUF_COUNT 50 +#define VIDC_IFACE_MAX_PARALLEL_CLNTS 16 +#define VIDC_IFACEQ_DFLT_QHDR 0x01010000 + +#define VIDC_MAX_NAME_LENGTH 64 +#define VIDC_MAX_PC_SKIP_COUNT 10 +#define VIDC_MAX_SUBCACHES 4 +#define VIDC_MAX_SUBCACHE_SIZE 52 + +struct hfi_queue_table_header { + u32 qtbl_version; + u32 qtbl_size; + u32 qtbl_qhdr0_offset; + u32 qtbl_qhdr_size; + u32 qtbl_num_q; + u32 qtbl_num_active_q; + void *device_addr; + char name[256]; +}; + +struct hfi_queue_header { + u32 qhdr_status; + u32 qhdr_start_addr; + u32 qhdr_type; + u32 qhdr_q_size; + u32 qhdr_pkt_size; + u32 qhdr_pkt_drop_cnt; + u32 qhdr_rx_wm; + u32 qhdr_tx_wm; + u32 qhdr_rx_req; + u32 qhdr_tx_req; + u32 qhdr_rx_irq_status; + u32 qhdr_tx_irq_status; + u32 qhdr_read_idx; + u32 qhdr_write_idx; +}; + +struct hfi_mem_map_table { + u32 mem_map_num_entries; + u32 mem_map_table_base_addr; +}; + +struct hfi_mem_map { + u32 virtual_addr; + u32 physical_addr; + u32 size; + u32 attr; +}; + +#define VIDC_IFACEQ_TABLE_SIZE (sizeof(struct hfi_queue_table_header) \ + + sizeof(struct hfi_queue_header) * VIDC_IFACEQ_NUMQ) + +#define VIDC_IFACEQ_QUEUE_SIZE (VIDC_IFACEQ_MAX_PKT_SIZE * \ + VIDC_IFACEQ_MAX_BUF_COUNT * VIDC_IFACE_MAX_PARALLEL_CLNTS) + +#define VIDC_IFACEQ_GET_QHDR_START_ADDR(ptr, i) \ + (void *)((ptr + sizeof(struct hfi_queue_table_header)) + \ + (i * sizeof(struct hfi_queue_header))) + +#define QDSS_SIZE 4096 +#define SFR_SIZE 4096 + +#define QUEUE_SIZE (VIDC_IFACEQ_TABLE_SIZE + \ + (VIDC_IFACEQ_QUEUE_SIZE * VIDC_IFACEQ_NUMQ)) + +#define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K) +#define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K) +#define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K) +#define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \ + ALIGNED_QDSS_SIZE, SZ_1M) + +enum vidc_hw_reg { + VIDC_HWREG_CTRL_STATUS = 0x1, + VIDC_HWREG_QTBL_INFO = 0x2, + VIDC_HWREG_QTBL_ADDR = 0x3, + VIDC_HWREG_CTRLR_RESET = 0x4, + VIDC_HWREG_IFACEQ_FWRXREQ = 0x5, + VIDC_HWREG_IFACEQ_FWTXREQ = 0x6, + VIDC_HWREG_VHI_SOFTINTEN = 0x7, + VIDC_HWREG_VHI_SOFTINTSTATUS = 0x8, + VIDC_HWREG_VHI_SOFTINTCLR = 0x9, + VIDC_HWREG_HVI_SOFTINTEN = 0xA, +}; + +struct vidc_mem_addr { + u32 align_device_addr; + u8 *align_virtual_addr; + u32 mem_size; + struct msm_smem mem_data; +}; + +struct vidc_iface_q_info { + void *q_hdr; + struct vidc_mem_addr q_array; +}; + +/* + * These are helper macros to iterate over various lists within + * venus_hfi_device->res. The intention is to cut down on a lot of boiler-plate + * code + */ + +/* Read as "for each 'thing' in a set of 'thingies'" */ +#define venus_hfi_for_each_thing(__device, __thing, __thingy) \ + venus_hfi_for_each_thing_continue(__device, __thing, __thingy, 0) + +#define venus_hfi_for_each_thing_reverse(__device, __thing, __thingy) \ + venus_hfi_for_each_thing_reverse_continue(__device, __thing, __thingy, \ + (__device)->res->__thingy##_set.count - 1) + +/* TODO: the __from parameter technically not required since we can figure it + * out with some pointer magic (i.e. __thing - __thing##_tbl[0]). If this macro + * sees extensive use, probably worth cleaning it up but for now omitting it + * since it introduces unnecessary complexity. + */ +#define venus_hfi_for_each_thing_continue(__device, __thing, __thingy, __from) \ + for (__thing = &(__device)->res->\ + __thingy##_set.__thingy##_tbl[__from]; \ + __thing < &(__device)->res->__thingy##_set.__thingy##_tbl[0] + \ + ((__device)->res->__thingy##_set.count - __from); \ + ++__thing) + +#define venus_hfi_for_each_thing_reverse_continue(__device, __thing, __thingy, \ + __from) \ + for (__thing = &(__device)->res->\ + __thingy##_set.__thingy##_tbl[__from]; \ + __thing >= &(__device)->res->__thingy##_set.__thingy##_tbl[0]; \ + --__thing) + +/* Regular set helpers */ +#define venus_hfi_for_each_regulator(__device, __rinfo) \ + venus_hfi_for_each_thing(__device, __rinfo, regulator) + +#define venus_hfi_for_each_regulator_reverse(__device, __rinfo) \ + venus_hfi_for_each_thing_reverse(__device, __rinfo, regulator) + +#define venus_hfi_for_each_regulator_reverse_continue(__device, __rinfo, \ + __from) \ + venus_hfi_for_each_thing_reverse_continue(__device, __rinfo, \ + regulator, __from) + +/* Clock set helpers */ +#define venus_hfi_for_each_clock(__device, __cinfo) \ + venus_hfi_for_each_thing(__device, __cinfo, clock) + +#define venus_hfi_for_each_clock_reverse(__device, __cinfo) \ + venus_hfi_for_each_thing_reverse(__device, __cinfo, clock) + +#define venus_hfi_for_each_clock_reverse_continue(__device, __rinfo, \ + __from) \ + venus_hfi_for_each_thing_reverse_continue(__device, __rinfo, \ + clock, __from) + +/* Bus set helpers */ +#define venus_hfi_for_each_bus(__device, __binfo) \ + venus_hfi_for_each_thing(__device, __binfo, bus) +#define venus_hfi_for_each_bus_reverse(__device, __binfo) \ + venus_hfi_for_each_thing_reverse(__device, __binfo, bus) + +/* Subcache set helpers */ +#define venus_hfi_for_each_subcache(__device, __sinfo) \ + venus_hfi_for_each_thing(__device, __sinfo, subcache) +#define venus_hfi_for_each_subcache_reverse(__device, __sinfo) \ + venus_hfi_for_each_thing_reverse(__device, __sinfo, subcache) + +#define call_venus_op(d, op, ...) \ + (((d) && (d)->vpu_ops && (d)->vpu_ops->op) ? \ + ((d)->vpu_ops->op(__VA_ARGS__)):0) + +/* Internal data used in vidc_hal not exposed to msm_vidc*/ +struct hal_data { + u32 irq; + phys_addr_t firmware_base; + u8 __iomem *register_base; + u32 register_size; +}; + +struct venus_resources { + struct msm_vidc_fw fw; +}; + +enum venus_hfi_state { + VENUS_STATE_DEINIT = 1, + VENUS_STATE_INIT, +}; + +enum reset_state { + INIT = 1, + ASSERT, + DEASSERT, +}; + +struct venus_hfi_device; + +struct venus_hfi_vpu_ops { + void (*interrupt_init)(struct venus_hfi_device *device, u32 sid); + void (*setup_ucregion_memmap)(struct venus_hfi_device *device, u32 sid); + void (*clock_config_on_enable)(struct venus_hfi_device *device, + u32 sid); + int (*reset_ahb2axi_bridge)(struct venus_hfi_device *device, u32 sid); + void (*power_off)(struct venus_hfi_device *device); + int (*prepare_pc)(struct venus_hfi_device *device); + void (*raise_interrupt)(struct venus_hfi_device *device, u32 sid); + bool (*watchdog)(u32 intr_status); + void (*noc_error_info)(struct venus_hfi_device *device); + void (*core_clear_interrupt)(struct venus_hfi_device *device); + int (*boot_firmware)(struct venus_hfi_device *device, u32 sid); +}; + +struct venus_hfi_device { + struct list_head list; + struct list_head sess_head; + u32 intr_status; + u32 device_id; + u32 clk_freq; + u32 last_packet_type; + struct msm_vidc_bus_data bus_vote; + bool power_enabled; + struct mutex lock; + msm_vidc_callback callback; + struct vidc_mem_addr iface_q_table; + struct vidc_mem_addr dsp_iface_q_table; + struct vidc_mem_addr qdss; + struct vidc_mem_addr sfr; + struct vidc_mem_addr mem_addr; + struct vidc_iface_q_info iface_queues[VIDC_IFACEQ_NUMQ]; + struct vidc_iface_q_info dsp_iface_queues[VIDC_IFACEQ_NUMQ]; + u32 dsp_flags; + struct hal_data *hal_data; + struct workqueue_struct *vidc_workq; + struct workqueue_struct *venus_pm_workq; + int spur_count; + int reg_count; + struct venus_resources resources; + struct msm_vidc_platform_resources *res; + enum venus_hfi_state state; + struct hfi_packetization_ops *pkt_ops; + enum hfi_packetization_type packetization_type; + struct msm_vidc_cb_info *response_pkt; + u8 *raw_packet; + unsigned int skip_pc_count; + struct venus_hfi_vpu_ops *vpu_ops; +}; + +void venus_hfi_delete_device(void *device); + +int venus_hfi_initialize(struct hfi_device *hdev, u32 device_id, + struct msm_vidc_platform_resources *res, + hfi_cmd_response_callback callback); + +struct lut const *__lut(int width, int height, int fps); +fp_t __compression_ratio(struct lut const *entry, int bpp); +void __dump(struct dump dump[], int len, u32 sid); + +void __write_register(struct venus_hfi_device *device, + u32 reg, u32 value, u32 sid); +void __write_register_masked(struct venus_hfi_device *device, + u32 reg, u32 value, u32 mask, u32 sid); +int __read_register(struct venus_hfi_device *device, u32 reg, u32 sid); +void __disable_unprepare_clks(struct venus_hfi_device *device); +int __disable_regulators(struct venus_hfi_device *device); +int __unvote_buses(struct venus_hfi_device *device, u32 sid); +int __reset_ahb2axi_bridge_common(struct venus_hfi_device *device, u32 sid); +int __prepare_pc(struct venus_hfi_device *device); + +/* IRIS2 specific */ +void __interrupt_init_iris2(struct venus_hfi_device *device, u32 sid); +void __setup_ucregion_memory_map_iris2(struct venus_hfi_device *device, + u32 sid); +void __power_off_iris2(struct venus_hfi_device *device); +int __prepare_pc_iris2(struct venus_hfi_device *device); +void __raise_interrupt_iris2(struct venus_hfi_device *device, u32 sid); +bool __watchdog_iris2(u32 intr_status); +void __noc_error_info_iris2(struct venus_hfi_device *device); +void __core_clear_interrupt_iris2(struct venus_hfi_device *device); +int __boot_firmware_iris2(struct venus_hfi_device *device, u32 sid); + +/* AR50_LITE specific */ +void __interrupt_init_ar50_lt(struct venus_hfi_device *device, u32 sid); +void __setup_ucregion_memory_map_ar50_lt(struct venus_hfi_device *device, u32 sid); +void __power_off_ar50_lt(struct venus_hfi_device *device); +int __prepare_pc_ar50_lt(struct venus_hfi_device *device); +void __raise_interrupt_ar50_lt(struct venus_hfi_device *device, u32 sid); +void __core_clear_interrupt_ar50_lt(struct venus_hfi_device *device); +int __boot_firmware_ar50_lt(struct venus_hfi_device *device, u32 sid); + +#endif diff --git a/techpack/video/msm/vidc/hfi_io_common.h b/techpack/video/msm/vidc/hfi_io_common.h new file mode 100644 index 000000000000..d68555a9d10a --- /dev/null +++ b/techpack/video/msm/vidc/hfi_io_common.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __HFI_IO_COMMON_H__ +#define __HFI_IO_COMMON_H__ + +#include + +#define VBIF_BASE_OFFS 0x00080000 + +#define CPU_BASE_OFFS 0x000C0000 +#define CPU_CS_BASE_OFFS (CPU_BASE_OFFS + 0x00012000) +#define CPU_IC_BASE_OFFS (CPU_BASE_OFFS + 0x0001F000) + +#define CPU_CS_A2HSOFTINT (CPU_CS_BASE_OFFS + 0x18) +#define CPU_CS_A2HSOFTINTCLR (CPU_CS_BASE_OFFS + 0x1C) +#define CPU_CS_VMIMSG (CPU_CS_BASE_OFFS + 0x34) +#define CPU_CS_VMIMSGAG0 (CPU_CS_BASE_OFFS + 0x38) +#define CPU_CS_VMIMSGAG1 (CPU_CS_BASE_OFFS + 0x3C) +#define CPU_CS_SCIACMD (CPU_CS_BASE_OFFS + 0x48) + +/* HFI_CTRL_STATUS */ +#define CPU_CS_SCIACMDARG0 (CPU_CS_BASE_OFFS + 0x4C) +#define CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK 0xfe +#define CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY 0x100 +#define CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_IDLE_MSG_BMSK 0x40000000 + +/* HFI_QTBL_INFO */ +#define CPU_CS_SCIACMDARG1 (CPU_CS_BASE_OFFS + 0x50) + +/* HFI_QTBL_ADDR */ +#define CPU_CS_SCIACMDARG2 (CPU_CS_BASE_OFFS + 0x54) + +/* HFI_VERSION_INFO */ +#define CPU_CS_SCIACMDARG3 (CPU_CS_BASE_OFFS + 0x58) + +/* SFR_ADDR */ +#define CPU_CS_SCIBCMD (CPU_CS_BASE_OFFS + 0x5C) + +/* MMAP_ADDR */ +#define CPU_CS_SCIBCMDARG0 (CPU_CS_BASE_OFFS + 0x60) + +/* UC_REGION_ADDR */ +#define CPU_CS_SCIBARG1 (CPU_CS_BASE_OFFS + 0x64) + +/* UC_REGION_ADDR */ +#define CPU_CS_SCIBARG2 (CPU_CS_BASE_OFFS + 0x68) + +#define CPU_IC_SOFTINT (CPU_IC_BASE_OFFS + 0x18) +#define CPU_IC_SOFTINT_H2A_SHFT 0xF + +/* + * -------------------------------------------------------------------------- + * MODULE: wrapper + * -------------------------------------------------------------------------- + */ +#define WRAPPER_BASE_OFFS 0x000E0000 +#define WRAPPER_INTR_STATUS (WRAPPER_BASE_OFFS + 0x0C) +#define WRAPPER_INTR_STATUS_A2HWD_BMSK 0x10 +#define WRAPPER_INTR_STATUS_A2H_BMSK 0x4 + +#define WRAPPER_INTR_MASK (WRAPPER_BASE_OFFS + 0x10) +#define WRAPPER_INTR_MASK_A2HWD_BMSK 0x10 +#define WRAPPER_INTR_MASK_A2HVCODEC_BMSK 0x8 +#define WRAPPER_INTR_MASK_A2HCPU_BMSK 0x4 +#define WRAPPER_INTR_CLEAR (WRAPPER_BASE_OFFS + 0x14) + +#define WRAPPER_CPU_CLOCK_CONFIG (WRAPPER_BASE_OFFS + 0x2000) +#define WRAPPER_CPU_CGC_DIS (WRAPPER_BASE_OFFS + 0x2010) +#define WRAPPER_CPU_STATUS (WRAPPER_BASE_OFFS + 0x2014) + +#define CTRL_INIT CPU_CS_SCIACMD + +#define CTRL_STATUS CPU_CS_SCIACMDARG0 +#define CTRL_ERROR_STATUS__M \ + CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK +#define CTRL_INIT_IDLE_MSG_BMSK \ + CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_IDLE_MSG_BMSK +#define CTRL_STATUS_PC_READY \ + CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY + + +#define QTBL_INFO CPU_CS_SCIACMDARG1 + +#define QTBL_ADDR CPU_CS_SCIACMDARG2 + +#define VERSION_INFO CPU_CS_SCIACMDARG3 + +#define SFR_ADDR CPU_CS_SCIBCMD +#define MMAP_ADDR CPU_CS_SCIBCMDARG0 +#define UC_REGION_ADDR CPU_CS_SCIBARG1 +#define UC_REGION_SIZE CPU_CS_SCIBARG2 + +/* HFI_DSP_QTBL_ADDR + * 31:3 - HFI_DSP_QTBL_ADDR + * 4-byte aligned Address + */ +#define HFI_DSP_QTBL_ADDR CPU_CS_VMIMSG + +/* HFI_DSP_UC_REGION_ADDR + * 31:20 - HFI_DSP_UC_REGION_ADDR + * 1MB aligned address. + * Uncached Region start Address. This region covers + * HFI DSP QTable, + * HFI DSP Queue Headers, + * HFI DSP Queues, + */ +#define HFI_DSP_UC_REGION_ADDR CPU_CS_VMIMSGAG0 + +/* HFI_DSP_UC_REGION_SIZE + * 31:20 - HFI_DSP_UC_REGION_SIZE + * Multiples of 1MB. + * Size of the DSP_UC_REGION Uncached Region + */ +#define HFI_DSP_UC_REGION_SIZE CPU_CS_VMIMSGAG1 + +/* + * -------------------------------------------------------------------------- + * MODULE: vcodec noc error log registers + * -------------------------------------------------------------------------- + */ +#define VCODEC_CORE0_VIDEO_NOC_BASE_OFFS 0x00004000 +#define VCODEC_CORE0_VIDEO_NOC_ERR_SWID_LOW_OFFS 0x0500 +#define VCODEC_CORE0_VIDEO_NOC_ERR_SWID_HIGH_OFFS 0x0504 +#define VCODEC_CORE0_VIDEO_NOC_ERR_MAINCTL_LOW_OFFS 0x0508 +#define VCODEC_CORE0_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS 0x0510 +#define VCODEC_CORE0_VIDEO_NOC_ERR_ERRCLR_LOW_OFFS 0x0518 +#define VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG0_LOW_OFFS 0x0520 +#define VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG0_HIGH_OFFS 0x0524 +#define VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG1_LOW_OFFS 0x0528 +#define VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG1_HIGH_OFFS 0x052C +#define VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG2_LOW_OFFS 0x0530 +#define VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG2_HIGH_OFFS 0x0534 +#define VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG3_LOW_OFFS 0x0538 +#define VCODEC_CORE0_VIDEO_NOC_ERR_ERRLOG3_HIGH_OFFS 0x053C +#endif diff --git a/techpack/video/msm/vidc/hfi_iris2.c b/techpack/video/msm/vidc/hfi_iris2.c new file mode 100644 index 000000000000..5ac62b788ea4 --- /dev/null +++ b/techpack/video/msm/vidc/hfi_iris2.c @@ -0,0 +1,416 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#include "msm_vidc_debug.h" +#include "hfi_common.h" + +#define VBIF_BASE_OFFS_IRIS2 0x00080000 + +#define CPU_BASE_OFFS_IRIS2 0x000A0000 +#define AON_BASE_OFFS 0x000E0000 +#define CPU_CS_BASE_OFFS_IRIS2 (CPU_BASE_OFFS_IRIS2) +#define CPU_IC_BASE_OFFS_IRIS2 (CPU_BASE_OFFS_IRIS2) + +#define CPU_CS_A2HSOFTINTCLR_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x1C) +#define CPU_CS_VCICMD_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x20) +#define CPU_CS_VCICMDARG0_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x24) +#define CPU_CS_VCICMDARG1_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x28) +#define CPU_CS_VCICMDARG2_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x2C) +#define CPU_CS_VCICMDARG3_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x30) +#define CPU_CS_VMIMSG_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x34) +#define CPU_CS_VMIMSGAG0_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x38) +#define CPU_CS_VMIMSGAG1_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x3C) +#define CPU_CS_SCIACMD_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x48) +#define CPU_CS_H2XSOFTINTEN_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x148) + +/* HFI_CTRL_STATUS */ +#define CPU_CS_SCIACMDARG0_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x4C) +#define CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK_IRIS2 0xfe +#define CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY_IRIS2 0x100 +#define CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_IDLE_MSG_BMSK_IRIS2 0x40000000 + +/* HFI_QTBL_INFO */ +#define CPU_CS_SCIACMDARG1_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x50) + +/* HFI_QTBL_ADDR */ +#define CPU_CS_SCIACMDARG2_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x54) + +/* HFI_VERSION_INFO */ +#define CPU_CS_SCIACMDARG3_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x58) + +/* SFR_ADDR */ +#define CPU_CS_SCIBCMD_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x5C) + +/* MMAP_ADDR */ +#define CPU_CS_SCIBCMDARG0_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x60) + +/* UC_REGION_ADDR */ +#define CPU_CS_SCIBARG1_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x64) + +/* UC_REGION_ADDR */ +#define CPU_CS_SCIBARG2_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x68) + +/* FAL10 Feature Control */ +#define CPU_CS_X2RPMh_IRIS2 (CPU_CS_BASE_OFFS_IRIS2 + 0x168) +#define CPU_CS_X2RPMh_MASK0_BMSK_IRIS2 0x1 +#define CPU_CS_X2RPMh_MASK0_SHFT_IRIS2 0x0 +#define CPU_CS_X2RPMh_MASK1_BMSK_IRIS2 0x2 +#define CPU_CS_X2RPMh_MASK1_SHFT_IRIS2 0x1 +#define CPU_CS_X2RPMh_SWOVERRIDE_BMSK_IRIS2 0x4 +#define CPU_CS_X2RPMh_SWOVERRIDE_SHFT_IRIS2 0x3 + +#define CPU_IC_SOFTINT_IRIS2 (CPU_IC_BASE_OFFS_IRIS2 + 0x150) +#define CPU_IC_SOFTINT_H2A_SHFT_IRIS2 0x0 + +/* + * -------------------------------------------------------------------------- + * MODULE: wrapper + * -------------------------------------------------------------------------- + */ +#define WRAPPER_BASE_OFFS_IRIS2 0x000B0000 +#define WRAPPER_INTR_STATUS_IRIS2 (WRAPPER_BASE_OFFS_IRIS2 + 0x0C) +#define WRAPPER_INTR_STATUS_A2HWD_BMSK_IRIS2 0x8 +#define WRAPPER_INTR_STATUS_A2H_BMSK_IRIS2 0x4 + +#define WRAPPER_INTR_MASK_IRIS2 (WRAPPER_BASE_OFFS_IRIS2 + 0x10) +#define WRAPPER_INTR_MASK_A2HWD_BMSK_IRIS2 0x8 +#define WRAPPER_INTR_MASK_A2HCPU_BMSK_IRIS2 0x4 + +#define WRAPPER_CPU_CLOCK_CONFIG_IRIS2 (WRAPPER_BASE_OFFS_IRIS2 + 0x2000) +#define WRAPPER_CPU_CGC_DIS_IRIS2 (WRAPPER_BASE_OFFS_IRIS2 + 0x2010) +#define WRAPPER_CPU_STATUS_IRIS2 (WRAPPER_BASE_OFFS_IRIS2 + 0x2014) + +#define WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_IRIS2 (WRAPPER_BASE_OFFS_IRIS2 + 0x54) +#define WRAPPER_DEBUG_BRIDGE_LPI_STATUS_IRIS2 (WRAPPER_BASE_OFFS_IRIS2 + 0x58) +/* + * -------------------------------------------------------------------------- + * MODULE: tz_wrapper + * -------------------------------------------------------------------------- + */ +#define WRAPPER_TZ_BASE_OFFS 0x000C0000 +#define WRAPPER_TZ_CPU_CLOCK_CONFIG (WRAPPER_TZ_BASE_OFFS) +#define WRAPPER_TZ_CPU_STATUS (WRAPPER_TZ_BASE_OFFS + 0x10) + +#define CTRL_INIT_IRIS2 CPU_CS_SCIACMD_IRIS2 + +#define CTRL_STATUS_IRIS2 CPU_CS_SCIACMDARG0_IRIS2 +#define CTRL_ERROR_STATUS__M_IRIS2 \ + CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK_IRIS2 +#define CTRL_INIT_IDLE_MSG_BMSK_IRIS2 \ + CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_IDLE_MSG_BMSK_IRIS2 +#define CTRL_STATUS_PC_READY_IRIS2 \ + CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY_IRIS2 + + +#define QTBL_INFO_IRIS2 CPU_CS_SCIACMDARG1_IRIS2 + +#define QTBL_ADDR_IRIS2 CPU_CS_SCIACMDARG2_IRIS2 + +#define VERSION_INFO_IRIS2 CPU_CS_SCIACMDARG3_IRIS2 + +#define SFR_ADDR_IRIS2 CPU_CS_SCIBCMD_IRIS2 +#define MMAP_ADDR_IRIS2 CPU_CS_SCIBCMDARG0_IRIS2 +#define UC_REGION_ADDR_IRIS2 CPU_CS_SCIBARG1_IRIS2 +#define UC_REGION_SIZE_IRIS2 CPU_CS_SCIBARG2_IRIS2 + +#define AON_WRAPPER_MVP_NOC_LPI_CONTROL (AON_BASE_OFFS) +#define AON_WRAPPER_MVP_NOC_LPI_STATUS (AON_BASE_OFFS + 0x4) + +/* + * -------------------------------------------------------------------------- + * MODULE: vcodec noc error log registers (iris2) + * -------------------------------------------------------------------------- + */ +#define VCODEC_NOC_VIDEO_A_NOC_BASE_OFFS 0x00010000 +#define VCODEC_NOC_ERL_MAIN_SWID_LOW 0x00011200 +#define VCODEC_NOC_ERL_MAIN_SWID_HIGH 0x00011204 +#define VCODEC_NOC_ERL_MAIN_MAINCTL_LOW 0x00011208 +#define VCODEC_NOC_ERL_MAIN_ERRVLD_LOW 0x00011210 +#define VCODEC_NOC_ERL_MAIN_ERRCLR_LOW 0x00011218 +#define VCODEC_NOC_ERL_MAIN_ERRLOG0_LOW 0x00011220 +#define VCODEC_NOC_ERL_MAIN_ERRLOG0_HIGH 0x00011224 +#define VCODEC_NOC_ERL_MAIN_ERRLOG1_LOW 0x00011228 +#define VCODEC_NOC_ERL_MAIN_ERRLOG1_HIGH 0x0001122C +#define VCODEC_NOC_ERL_MAIN_ERRLOG2_LOW 0x00011230 +#define VCODEC_NOC_ERL_MAIN_ERRLOG2_HIGH 0x00011234 +#define VCODEC_NOC_ERL_MAIN_ERRLOG3_LOW 0x00011238 +#define VCODEC_NOC_ERL_MAIN_ERRLOG3_HIGH 0x0001123C + +void __interrupt_init_iris2(struct venus_hfi_device *device, u32 sid) +{ + u32 mask_val = 0; + + /* All interrupts should be disabled initially 0x1F6 : Reset value */ + mask_val = __read_register(device, WRAPPER_INTR_MASK_IRIS2, sid); + + /* Write 0 to unmask CPU and WD interrupts */ + mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BMSK_IRIS2| + WRAPPER_INTR_MASK_A2HCPU_BMSK_IRIS2); + __write_register(device, WRAPPER_INTR_MASK_IRIS2, mask_val, sid); +} + +void __setup_ucregion_memory_map_iris2(struct venus_hfi_device *device, u32 sid) +{ + __write_register(device, UC_REGION_ADDR_IRIS2, + (u32)device->iface_q_table.align_device_addr, sid); + __write_register(device, UC_REGION_SIZE_IRIS2, SHARED_QSIZE, sid); + __write_register(device, QTBL_ADDR_IRIS2, + (u32)device->iface_q_table.align_device_addr, sid); + __write_register(device, QTBL_INFO_IRIS2, 0x01, sid); + if (device->sfr.align_device_addr) + __write_register(device, SFR_ADDR_IRIS2, + (u32)device->sfr.align_device_addr, sid); + if (device->qdss.align_device_addr) + __write_register(device, MMAP_ADDR_IRIS2, + (u32)device->qdss.align_device_addr, sid); + /* update queues vaddr for debug purpose */ + __write_register(device, CPU_CS_VCICMDARG0_IRIS2, + (u32)device->iface_q_table.align_virtual_addr, sid); + __write_register(device, CPU_CS_VCICMDARG1_IRIS2, + (u32)((u64)device->iface_q_table.align_virtual_addr >> 32), + sid); +} + +void __power_off_iris2(struct venus_hfi_device *device) +{ + u32 lpi_status, reg_status = 0, count = 0, max_count = 10; + u32 sid = DEFAULT_SID; + + if (!device->power_enabled) + return; + + if (!(device->intr_status & WRAPPER_INTR_STATUS_A2HWD_BMSK_IRIS2)) + disable_irq_nosync(device->hal_data->irq); + device->intr_status = 0; + + /* HPG 6.1.2 Step 1 */ + __write_register(device, CPU_CS_X2RPMh_IRIS2, 0x3, sid); + + /* HPG 6.1.2 Step 2, noc to low power */ + if (device->res->vpu_ver == VPU_VERSION_IRIS2_1) + goto skip_aon_mvp_noc; + __write_register(device, AON_WRAPPER_MVP_NOC_LPI_CONTROL, 0x1, sid); + while (!reg_status && count < max_count) { + lpi_status = + __read_register(device, + AON_WRAPPER_MVP_NOC_LPI_STATUS, sid); + reg_status = lpi_status & BIT(0); + d_vpr_h("Noc: lpi_status %d noc_status %d (count %d)\n", + lpi_status, reg_status, count); + usleep_range(50, 100); + count++; + } + if (count == max_count) { + d_vpr_e("NOC not in qaccept status %d\n", reg_status); + } + + /* HPG 6.1.2 Step 3, debug bridge to low power */ +skip_aon_mvp_noc: + __write_register(device, + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_IRIS2, 0x7, sid); + reg_status = 0; + count = 0; + while ((reg_status != 0x7) && count < max_count) { + lpi_status = __read_register(device, + WRAPPER_DEBUG_BRIDGE_LPI_STATUS_IRIS2, sid); + reg_status = lpi_status & 0x7; + d_vpr_h("DBLP Set : lpi_status %d reg_status %d (count %d)\n", + lpi_status, reg_status, count); + usleep_range(50, 100); + count++; + } + if (count == max_count) + d_vpr_e("DBLP Set: status %d\n", reg_status); + + /* HPG 6.1.2 Step 4, debug bridge to lpi release */ + __write_register(device, + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_IRIS2, 0x0, sid); + lpi_status = 0x1; + count = 0; + while (lpi_status && count < max_count) { + lpi_status = __read_register(device, + WRAPPER_DEBUG_BRIDGE_LPI_STATUS_IRIS2, sid); + d_vpr_h("DBLP Release: lpi_status %d(count %d)\n", + lpi_status, count); + usleep_range(50, 100); + count++; + } + if (count == max_count) + d_vpr_e("DBLP Release: lpi_status %d\n", lpi_status); + + /* HPG 6.1.2 Step 6 */ + __disable_unprepare_clks(device); + + /* HPG 6.1.2 Step 5 */ + if (__disable_regulators(device)) + d_vpr_e("%s: Failed to disable regulators\n", __func__); + + if (__unvote_buses(device, sid)) + d_vpr_e("%s: Failed to unvote for buses\n", __func__); + device->power_enabled = false; +} + +int __prepare_pc_iris2(struct venus_hfi_device *device) +{ + int rc = 0; + u32 wfi_status = 0, idle_status = 0, pc_ready = 0; + u32 ctrl_status = 0; + int count = 0; + const int max_tries = 10; + + ctrl_status = __read_register(device, CTRL_STATUS_IRIS2, DEFAULT_SID); + pc_ready = ctrl_status & CTRL_STATUS_PC_READY_IRIS2; + idle_status = ctrl_status & BIT(30); + + if (pc_ready) { + d_vpr_h("Already in pc_ready state\n"); + return 0; + } + + wfi_status = BIT(0) & __read_register(device, WRAPPER_TZ_CPU_STATUS, + DEFAULT_SID); + if (!wfi_status || !idle_status) { + d_vpr_e("Skipping PC, wfi status not set\n"); + goto skip_power_off; + } + + rc = __prepare_pc(device); + if (rc) { + d_vpr_e("Failed __prepare_pc %d\n", rc); + goto skip_power_off; + } + + while (count < max_tries) { + wfi_status = BIT(0) & __read_register(device, + WRAPPER_TZ_CPU_STATUS, DEFAULT_SID); + ctrl_status = __read_register(device, + CTRL_STATUS_IRIS2, DEFAULT_SID); + if (wfi_status && (ctrl_status & CTRL_STATUS_PC_READY_IRIS2)) + break; + usleep_range(150, 250); + count++; + } + + if (count == max_tries) { + d_vpr_e("Skip PC. Core is not in right state\n"); + goto skip_power_off; + } + + return rc; + +skip_power_off: + d_vpr_e("Skip PC, wfi=%#x, idle=%#x, pcr=%#x, ctrl=%#x)\n", + wfi_status, idle_status, pc_ready, ctrl_status); + return -EAGAIN; +} + +void __raise_interrupt_iris2(struct venus_hfi_device *device, u32 sid) +{ + __write_register(device, CPU_IC_SOFTINT_IRIS2, + 1 << CPU_IC_SOFTINT_H2A_SHFT_IRIS2, sid); +} + +bool __watchdog_iris2(u32 intr_status) +{ + bool rc = false; + + if (intr_status & WRAPPER_INTR_STATUS_A2HWD_BMSK_IRIS2) + rc = true; + + return rc; +} + +void __noc_error_info_iris2(struct venus_hfi_device *device) +{ + u32 val = 0; + u32 sid = DEFAULT_SID; + + if (device->res->vpu_ver == VPU_VERSION_IRIS2_1) + return; + val = __read_register(device, VCODEC_NOC_ERL_MAIN_SWID_LOW, sid); + d_vpr_e("VCODEC_NOC_ERL_MAIN_SWID_LOW: %#x\n", val); + val = __read_register(device, VCODEC_NOC_ERL_MAIN_SWID_HIGH, sid); + d_vpr_e("VCODEC_NOC_ERL_MAIN_SWID_HIGH: %#x\n", val); + val = __read_register(device, VCODEC_NOC_ERL_MAIN_MAINCTL_LOW, sid); + d_vpr_e("VCODEC_NOC_ERL_MAIN_MAINCTL_LOW: %#x\n", val); + val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRVLD_LOW, sid); + d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRVLD_LOW: %#x\n", val); + val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRCLR_LOW, sid); + d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRCLR_LOW: %#x\n", val); + val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG0_LOW, sid); + d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG0_LOW: %#x\n", val); + val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG0_HIGH, sid); + d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG0_HIGH: %#x\n", val); + val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG1_LOW, sid); + d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG1_LOW: %#x\n", val); + val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG1_HIGH, sid); + d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG1_HIGH: %#x\n", val); + val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG2_LOW, sid); + d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG2_LOW: %#x\n", val); + val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG2_HIGH, sid); + d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG2_HIGH: %#x\n", val); + val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG3_LOW, sid); + d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG3_LOW: %#x\n", val); + val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG3_HIGH, sid); + d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG3_HIGH: %#x\n", val); +} + +void __core_clear_interrupt_iris2(struct venus_hfi_device *device) +{ + u32 intr_status = 0, mask = 0; + + if (!device) { + d_vpr_e("%s: NULL device\n", __func__); + return; + } + + intr_status = __read_register(device, WRAPPER_INTR_STATUS_IRIS2, + DEFAULT_SID); + mask = (WRAPPER_INTR_STATUS_A2H_BMSK_IRIS2| + WRAPPER_INTR_STATUS_A2HWD_BMSK_IRIS2| + CTRL_INIT_IDLE_MSG_BMSK_IRIS2); + + if (intr_status & mask) { + device->intr_status |= intr_status; + device->reg_count++; + d_vpr_l("INTERRUPT: times: %d interrupt_status: %d\n", + device->reg_count, intr_status); + } else { + device->spur_count++; + } + + __write_register(device, CPU_CS_A2HSOFTINTCLR_IRIS2, 1, DEFAULT_SID); +} + +int __boot_firmware_iris2(struct venus_hfi_device *device, u32 sid) +{ + int rc = 0; + u32 ctrl_init_val = 0, ctrl_status = 0, count = 0, max_tries = 1000; + + ctrl_init_val = BIT(0); + + __write_register(device, CTRL_INIT_IRIS2, ctrl_init_val, sid); + while (!ctrl_status && count < max_tries) { + ctrl_status = __read_register(device, CTRL_STATUS_IRIS2, sid); + if ((ctrl_status & CTRL_ERROR_STATUS__M_IRIS2) == 0x4) { + s_vpr_e(sid, "invalid setting for UC_REGION\n"); + break; + } + + usleep_range(50, 100); + count++; + } + + if (count >= max_tries) { + s_vpr_e(sid, "Error booting up vidc firmware\n"); + rc = -ETIME; + } + + /* Enable interrupt before sending commands to venus */ + __write_register(device, CPU_CS_H2XSOFTINTEN_IRIS2, 0x1, sid); + __write_register(device, CPU_CS_X2RPMh_IRIS2, 0x0, sid); + + return rc; +} diff --git a/techpack/video/msm/vidc/hfi_packetization.c b/techpack/video/msm/vidc/hfi_packetization.c new file mode 100644 index 000000000000..00f92a68c406 --- /dev/null +++ b/techpack/video/msm/vidc/hfi_packetization.c @@ -0,0 +1,743 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ +#include "hfi_packetization.h" +#include "msm_vidc_debug.h" + +u32 vidc_get_hfi_domain(enum hal_domain hal_domain, u32 sid) +{ + u32 hfi_domain; + + switch (hal_domain) { + case HAL_VIDEO_DOMAIN_VPE: + hfi_domain = HFI_VIDEO_DOMAIN_VPE; + break; + case HAL_VIDEO_DOMAIN_ENCODER: + hfi_domain = HFI_VIDEO_DOMAIN_ENCODER; + break; + case HAL_VIDEO_DOMAIN_DECODER: + hfi_domain = HFI_VIDEO_DOMAIN_DECODER; + break; + default: + s_vpr_e(sid, "%s: invalid domain 0x%x\n", + __func__, hal_domain); + hfi_domain = 0; + break; + } + return hfi_domain; +} + +u32 vidc_get_hfi_codec(enum hal_video_codec hal_codec, u32 sid) +{ + u32 hfi_codec = 0; + + switch (hal_codec) { + case HAL_VIDEO_CODEC_H264: + hfi_codec = HFI_VIDEO_CODEC_H264; + break; + case HAL_VIDEO_CODEC_MPEG1: + hfi_codec = HFI_VIDEO_CODEC_MPEG1; + break; + case HAL_VIDEO_CODEC_MPEG2: + hfi_codec = HFI_VIDEO_CODEC_MPEG2; + break; + case HAL_VIDEO_CODEC_HEVC: + hfi_codec = HFI_VIDEO_CODEC_HEVC; + break; + case HAL_VIDEO_CODEC_VP9: + hfi_codec = HFI_VIDEO_CODEC_VP9; + break; + default: + s_vpr_h(sid, "%s: invalid codec 0x%x\n", + __func__, hal_codec); + hfi_codec = 0; + break; + } + return hfi_codec; +} + +int create_pkt_cmd_sys_init(struct hfi_cmd_sys_init_packet *pkt, + u32 arch_type) +{ + int rc = 0; + + if (!pkt) + return -EINVAL; + + pkt->packet_type = HFI_CMD_SYS_INIT; + pkt->size = sizeof(struct hfi_cmd_sys_init_packet); + pkt->arch_type = arch_type; + return rc; +} + +int create_pkt_cmd_sys_ping(struct hfi_cmd_sys_ping_packet *pkt, u32 sid) +{ + int rc = 0; + + if (!pkt) + return -EINVAL; + + pkt->size = sizeof(struct hfi_cmd_sys_ping_packet); + pkt->packet_type = HFI_CMD_SYS_PING; + pkt->sid = sid; + + return rc; +} + +int create_pkt_cmd_sys_pc_prep(struct hfi_cmd_sys_pc_prep_packet *pkt) +{ + int rc = 0; + + if (!pkt) + return -EINVAL; + + pkt->packet_type = HFI_CMD_SYS_PC_PREP; + pkt->size = sizeof(struct hfi_cmd_sys_pc_prep_packet); + return rc; +} + +int create_pkt_cmd_sys_debug_config( + struct hfi_cmd_sys_set_property_packet *pkt, + u32 mode) +{ + struct hfi_debug_config *hfi; + + if (!pkt) + return -EINVAL; + + pkt->size = sizeof(struct hfi_cmd_sys_set_property_packet) + + sizeof(struct hfi_debug_config) + sizeof(u32); + pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY; + pkt->num_properties = 1; + pkt->rg_property_data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG; + hfi = (struct hfi_debug_config *) &pkt->rg_property_data[1]; + hfi->debug_config = mode; + hfi->debug_mode = HFI_DEBUG_MODE_QUEUE; + if (msm_vidc_fw_debug_mode + <= (HFI_DEBUG_MODE_QUEUE | HFI_DEBUG_MODE_QDSS)) + hfi->debug_mode = msm_vidc_fw_debug_mode; + return 0; +} + +int create_pkt_cmd_sys_coverage_config( + struct hfi_cmd_sys_set_property_packet *pkt, + u32 mode, u32 sid) +{ + if (!pkt) { + s_vpr_e(sid, "In %s(), No input packet\n", __func__); + return -EINVAL; + } + + pkt->size = sizeof(struct hfi_cmd_sys_set_property_packet) + + sizeof(u32); + pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY; + pkt->num_properties = 1; + pkt->rg_property_data[0] = HFI_PROPERTY_SYS_CONFIG_COVERAGE; + pkt->rg_property_data[1] = mode; + s_vpr_h(sid, "Firmware coverage mode %d\n", pkt->rg_property_data[1]); + return 0; +} + +int create_pkt_cmd_sys_set_resource( + struct hfi_cmd_sys_set_resource_packet *pkt, + struct vidc_resource_hdr *res_hdr, + void *res_value) +{ + int rc = 0; + u32 i = 0; + + if (!pkt || !res_hdr || !res_value) { + d_vpr_e("Invalid paramas pkt %pK res_hdr %pK res_value %pK\n", + pkt, res_hdr, res_value); + return -EINVAL; + } + + pkt->packet_type = HFI_CMD_SYS_SET_RESOURCE; + pkt->size = sizeof(struct hfi_cmd_sys_set_resource_packet); + pkt->resource_handle = hash32_ptr(res_hdr->resource_handle); + + switch (res_hdr->resource_id) { + case VIDC_RESOURCE_SYSCACHE: + { + struct hfi_resource_syscache_info_type *res_sc_info = + (struct hfi_resource_syscache_info_type *) res_value; + struct hfi_resource_subcache_type *res_sc = + (struct hfi_resource_subcache_type *) + &(res_sc_info->rg_subcache_entries[0]); + + struct hfi_resource_syscache_info_type *hfi_sc_info = + (struct hfi_resource_syscache_info_type *) + &pkt->rg_resource_data[0]; + + struct hfi_resource_subcache_type *hfi_sc = + (struct hfi_resource_subcache_type *) + &(hfi_sc_info->rg_subcache_entries[0]); + + pkt->resource_type = HFI_RESOURCE_SYSCACHE; + hfi_sc_info->num_entries = res_sc_info->num_entries; + + pkt->size += (sizeof(struct hfi_resource_subcache_type)) + * hfi_sc_info->num_entries; + + for (i = 0; i < hfi_sc_info->num_entries; i++) { + hfi_sc[i] = res_sc[i]; + d_vpr_h("entry hfi#%d, sc_id %d, size %d\n", + i, hfi_sc[i].sc_id, hfi_sc[i].size); + } + break; + } + default: + d_vpr_e("Invalid resource_id %d\n", res_hdr->resource_id); + rc = -ENOTSUPP; + } + + return rc; +} + +int create_pkt_cmd_sys_release_resource( + struct hfi_cmd_sys_release_resource_packet *pkt, + struct vidc_resource_hdr *res_hdr) +{ + int rc = 0; + + if (!pkt || !res_hdr) { + d_vpr_e("Invalid paramas pkt %pK res_hdr %pK\n", + pkt, res_hdr); + return -EINVAL; + } + + pkt->size = sizeof(struct hfi_cmd_sys_release_resource_packet); + pkt->packet_type = HFI_CMD_SYS_RELEASE_RESOURCE; + pkt->resource_handle = hash32_ptr(res_hdr->resource_handle); + + switch (res_hdr->resource_id) { + case VIDC_RESOURCE_SYSCACHE: + pkt->resource_type = HFI_RESOURCE_SYSCACHE; + break; + default: + d_vpr_e("Invalid resource_id %d\n", res_hdr->resource_id); + rc = -ENOTSUPP; + } + + d_vpr_h("rel_res: pkt_type 0x%x res_type 0x%x prepared\n", + pkt->packet_type, pkt->resource_type); + + return rc; +} + +inline int create_pkt_cmd_sys_session_init( + struct hfi_cmd_sys_session_init_packet *pkt, + u32 sid, u32 session_domain, u32 session_codec) +{ + int rc = 0; + + if (!pkt) + return -EINVAL; + + pkt->size = sizeof(struct hfi_cmd_sys_session_init_packet); + pkt->packet_type = HFI_CMD_SYS_SESSION_INIT; + pkt->sid = sid; + pkt->session_domain = vidc_get_hfi_domain(session_domain, sid); + pkt->session_codec = vidc_get_hfi_codec(session_codec, sid); + if (!pkt->session_codec) + return -EINVAL; + + return rc; +} + + +int create_pkt_cmd_sys_ubwc_config( + struct hfi_cmd_sys_set_property_packet *pkt, + struct msm_vidc_ubwc_config_data *ubwc_config) +{ + int rc = 0; + struct hfi_cmd_sys_set_ubwc_config_packet_type *hfi; + + if (!pkt) + return -EINVAL; + + pkt->size = sizeof(struct hfi_cmd_sys_set_property_packet) + + sizeof(struct hfi_cmd_sys_set_ubwc_config_packet_type) + + sizeof(u32); + + pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY; + pkt->num_properties = 1; + pkt->rg_property_data[0] = HFI_PROPERTY_SYS_UBWC_CONFIG; + hfi = (struct hfi_cmd_sys_set_ubwc_config_packet_type *) + &pkt->rg_property_data[1]; + + hfi->max_channels = ubwc_config->max_channels; + hfi->override_bit_info.max_channel_override = + ubwc_config->override_bit_info.max_channel_override; + + hfi->mal_length = ubwc_config->mal_length; + hfi->override_bit_info.mal_length_override = + ubwc_config->override_bit_info.mal_length_override; + + hfi->highest_bank_bit = ubwc_config->highest_bank_bit; + hfi->override_bit_info.hb_override = + ubwc_config->override_bit_info.hb_override; + + hfi->bank_swzl_level = ubwc_config->bank_swzl_level; + hfi->override_bit_info.bank_swzl_level_override = + ubwc_config->override_bit_info.bank_swzl_level_override; + + hfi->bank_spreading = ubwc_config->bank_spreading; + hfi->override_bit_info.bank_spreading_override = + ubwc_config->override_bit_info.bank_spreading_override; + + return rc; +} + +int create_pkt_cmd_session_cmd(struct vidc_hal_session_cmd_pkt *pkt, + int pkt_type, u32 sid) +{ + int rc = 0; + + if (!pkt) + return -EINVAL; + + pkt->size = sizeof(struct vidc_hal_session_cmd_pkt); + pkt->packet_type = pkt_type; + pkt->sid = sid; + + return rc; +} + +int create_pkt_cmd_sys_power_control( + struct hfi_cmd_sys_set_property_packet *pkt, u32 enable) +{ + struct hfi_enable *hfi; + + if (!pkt) { + d_vpr_e("%s: No input packet\n", __func__); + return -EINVAL; + } + + pkt->size = sizeof(struct hfi_cmd_sys_set_property_packet) + + sizeof(struct hfi_enable) + sizeof(u32); + pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY; + pkt->num_properties = 1; + pkt->rg_property_data[0] = HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL; + hfi = (struct hfi_enable *) &pkt->rg_property_data[1]; + hfi->enable = enable; + return 0; +} + +static u32 get_hfi_buffer(int hal_buffer, u32 sid) +{ + u32 buffer; + + switch (hal_buffer) { + case HAL_BUFFER_INPUT: + buffer = HFI_BUFFER_INPUT; + break; + case HAL_BUFFER_OUTPUT: + buffer = HFI_BUFFER_OUTPUT; + break; + case HAL_BUFFER_OUTPUT2: + buffer = HFI_BUFFER_OUTPUT2; + break; + case HAL_BUFFER_EXTRADATA_INPUT: + buffer = HFI_BUFFER_EXTRADATA_INPUT; + break; + case HAL_BUFFER_EXTRADATA_OUTPUT: + buffer = HFI_BUFFER_EXTRADATA_OUTPUT; + break; + case HAL_BUFFER_EXTRADATA_OUTPUT2: + buffer = HFI_BUFFER_EXTRADATA_OUTPUT2; + break; + case HAL_BUFFER_INTERNAL_SCRATCH: + buffer = HFI_BUFFER_COMMON_INTERNAL_SCRATCH; + break; + case HAL_BUFFER_INTERNAL_SCRATCH_1: + buffer = HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1; + break; + case HAL_BUFFER_INTERNAL_SCRATCH_2: + buffer = HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2; + break; + case HAL_BUFFER_INTERNAL_PERSIST: + buffer = HFI_BUFFER_INTERNAL_PERSIST; + break; + case HAL_BUFFER_INTERNAL_PERSIST_1: + buffer = HFI_BUFFER_INTERNAL_PERSIST_1; + break; + default: + s_vpr_e(sid, "Invalid buffer: %#x\n", hal_buffer); + buffer = 0; + break; + } + return buffer; +} + +int create_pkt_cmd_session_set_buffers( + struct hfi_cmd_session_set_buffers_packet *pkt, + u32 sid, struct vidc_buffer_addr_info *buffer_info) +{ + int rc = 0; + u32 i = 0; + + if (!pkt) + return -EINVAL; + + pkt->packet_type = HFI_CMD_SESSION_SET_BUFFERS; + pkt->sid = sid; + pkt->buffer_size = buffer_info->buffer_size; + pkt->min_buffer_size = buffer_info->buffer_size; + pkt->num_buffers = buffer_info->num_buffers; + + if (buffer_info->buffer_type == HAL_BUFFER_OUTPUT || + buffer_info->buffer_type == HAL_BUFFER_OUTPUT2) { + struct hfi_buffer_info *buff; + + pkt->extra_data_size = buffer_info->extradata_size; + + pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) - + sizeof(u32) + (buffer_info->num_buffers * + sizeof(struct hfi_buffer_info)); + buff = (struct hfi_buffer_info *) pkt->rg_buffer_info; + for (i = 0; i < pkt->num_buffers; i++) { + buff->buffer_addr = + (u32)buffer_info->align_device_addr; + buff->extra_data_addr = + (u32)buffer_info->extradata_addr; + } + } else { + pkt->extra_data_size = 0; + pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) + + ((buffer_info->num_buffers - 1) * sizeof(u32)); + for (i = 0; i < pkt->num_buffers; i++) { + pkt->rg_buffer_info[i] = + (u32)buffer_info->align_device_addr; + } + } + + pkt->buffer_type = + get_hfi_buffer(buffer_info->buffer_type, pkt->sid); + if (!pkt->buffer_type) + return -EINVAL; + + return rc; +} + +int create_pkt_cmd_session_release_buffers( + struct hfi_cmd_session_release_buffer_packet *pkt, + u32 sid, struct vidc_buffer_addr_info *buffer_info) +{ + int rc = 0; + u32 i = 0; + + if (!pkt) + return -EINVAL; + + pkt->packet_type = HFI_CMD_SESSION_RELEASE_BUFFERS; + pkt->sid = sid; + pkt->buffer_size = buffer_info->buffer_size; + pkt->num_buffers = buffer_info->num_buffers; + + if (buffer_info->buffer_type == HAL_BUFFER_OUTPUT || + buffer_info->buffer_type == HAL_BUFFER_OUTPUT2) { + struct hfi_buffer_info *buff; + + buff = (struct hfi_buffer_info *) pkt->rg_buffer_info; + for (i = 0; i < pkt->num_buffers; i++) { + buff->buffer_addr = + (u32)buffer_info->align_device_addr; + buff->extra_data_addr = + (u32)buffer_info->extradata_addr; + } + pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) - + sizeof(u32) + (buffer_info->num_buffers * + sizeof(struct hfi_buffer_info)); + } else { + for (i = 0; i < pkt->num_buffers; i++) { + pkt->rg_buffer_info[i] = + (u32)buffer_info->align_device_addr; + } + pkt->extra_data_size = 0; + pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) + + ((buffer_info->num_buffers - 1) * sizeof(u32)); + } + pkt->response_req = buffer_info->response_required; + pkt->buffer_type = + get_hfi_buffer(buffer_info->buffer_type, pkt->sid); + if (!pkt->buffer_type) + return -EINVAL; + return rc; +} + +int create_pkt_cmd_session_etb_decoder( + struct hfi_cmd_session_empty_buffer_compressed_packet *pkt, + u32 sid, struct vidc_frame_data *input_frame) +{ + int rc = 0; + + if (!pkt) + return -EINVAL; + + pkt->size = + sizeof(struct hfi_cmd_session_empty_buffer_compressed_packet); + pkt->packet_type = HFI_CMD_SESSION_EMPTY_BUFFER; + pkt->sid = sid; + pkt->time_stamp_hi = upper_32_bits(input_frame->timestamp); + pkt->time_stamp_lo = lower_32_bits(input_frame->timestamp); + pkt->flags = input_frame->flags; + pkt->mark_target = 0xff; + pkt->mark_data = 0xff; + pkt->offset = input_frame->offset; + pkt->alloc_len = input_frame->alloc_len; + pkt->filled_len = input_frame->filled_len; + pkt->input_tag = input_frame->input_tag; + pkt->packet_buffer = (u32)input_frame->device_addr; + pkt->extra_data_buffer = 0; + pkt->rgData[0] = 0; + + trace_msm_v4l2_vidc_buffer_event_start("ETB", + input_frame->device_addr, input_frame->timestamp, + input_frame->alloc_len, input_frame->filled_len, + input_frame->offset); + + if (!pkt->packet_buffer) + rc = -EINVAL; + return rc; +} + +int create_pkt_cmd_session_etb_encoder( + struct hfi_cmd_session_empty_buffer_uncompressed_plane0_packet *pkt, + u32 sid, struct vidc_frame_data *input_frame) +{ + int rc = 0; + + if (!pkt) + return -EINVAL; + + pkt->size = sizeof(struct + hfi_cmd_session_empty_buffer_uncompressed_plane0_packet); + pkt->packet_type = HFI_CMD_SESSION_EMPTY_BUFFER; + pkt->sid = sid; + pkt->view_id = 0; + pkt->time_stamp_hi = upper_32_bits(input_frame->timestamp); + pkt->time_stamp_lo = lower_32_bits(input_frame->timestamp); + pkt->flags = input_frame->flags; + pkt->mark_target = 0xff; + pkt->mark_data = 0xff; + pkt->offset = input_frame->offset; + pkt->alloc_len = input_frame->alloc_len; + pkt->filled_len = input_frame->filled_len; + pkt->input_tag = input_frame->input_tag; + pkt->packet_buffer = (u32)input_frame->device_addr; + pkt->extra_data_buffer = (u32)input_frame->extradata_addr; + pkt->rgData[0] = input_frame->extradata_size; + + trace_msm_v4l2_vidc_buffer_event_start("ETB", + input_frame->device_addr, input_frame->timestamp, + input_frame->alloc_len, input_frame->filled_len, + input_frame->offset); + + if (!pkt->packet_buffer) + rc = -EINVAL; + return rc; +} + +int create_pkt_cmd_session_ftb(struct hfi_cmd_session_fill_buffer_packet *pkt, + u32 sid, struct vidc_frame_data *output_frame) +{ + int rc = 0; + + if (!pkt || !output_frame) + return -EINVAL; + + pkt->size = sizeof(struct hfi_cmd_session_fill_buffer_packet); + pkt->packet_type = HFI_CMD_SESSION_FILL_BUFFER; + pkt->sid = sid; + + if (output_frame->buffer_type == HAL_BUFFER_OUTPUT) + pkt->stream_id = 0; + else if (output_frame->buffer_type == HAL_BUFFER_OUTPUT2) + pkt->stream_id = 1; + + if (!output_frame->device_addr) + return -EINVAL; + + pkt->packet_buffer = (u32)output_frame->device_addr; + pkt->extra_data_buffer = (u32)output_frame->extradata_addr; + pkt->alloc_len = output_frame->alloc_len; + pkt->filled_len = output_frame->filled_len; + pkt->offset = output_frame->offset; + pkt->rgData[0] = output_frame->extradata_size; + + trace_msm_v4l2_vidc_buffer_event_start("FTB", + output_frame->device_addr, output_frame->timestamp, + output_frame->alloc_len, output_frame->filled_len, + output_frame->offset); + + return rc; +} + +int create_pkt_cmd_session_get_buf_req( + struct hfi_cmd_session_get_property_packet *pkt, + u32 sid) +{ + int rc = 0; + + if (!pkt) + return -EINVAL; + + pkt->size = sizeof(struct hfi_cmd_session_get_property_packet); + pkt->packet_type = HFI_CMD_SESSION_GET_PROPERTY; + pkt->sid = sid; + pkt->num_properties = 1; + pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS; + + return rc; +} + +int create_pkt_cmd_session_flush(struct hfi_cmd_session_flush_packet *pkt, + u32 sid, enum hal_flush flush_mode) +{ + int rc = 0; + + if (!pkt) + return -EINVAL; + + pkt->size = sizeof(struct hfi_cmd_session_flush_packet); + pkt->packet_type = HFI_CMD_SESSION_FLUSH; + pkt->sid = sid; + switch (flush_mode) { + case HAL_FLUSH_INPUT: + pkt->flush_type = HFI_FLUSH_INPUT; + break; + case HAL_FLUSH_OUTPUT: + pkt->flush_type = HFI_FLUSH_OUTPUT; + break; + case HAL_FLUSH_ALL: + pkt->flush_type = HFI_FLUSH_ALL; + break; + default: + s_vpr_e(pkt->sid, "Invalid flush mode: %#x\n", flush_mode); + return -EINVAL; + } + return rc; +} + +int create_pkt_cmd_session_set_property( + struct hfi_cmd_session_set_property_packet *pkt, + u32 sid, + u32 ptype, void *pdata, u32 size) +{ + if (!pkt) + return -EINVAL; + + pkt->size = sizeof(struct hfi_cmd_session_set_property_packet); + pkt->packet_type = HFI_CMD_SESSION_SET_PROPERTY; + pkt->sid = sid; + pkt->num_properties = 1; + pkt->size += size; + pkt->rg_property_data[0] = ptype; + if (size && pdata) + memcpy(&pkt->rg_property_data[1], pdata, size); + + s_vpr_h(pkt->sid, "Setting HAL Property = 0x%x\n", ptype); + return 0; +} + +static int get_hfi_ssr_type(enum hal_ssr_trigger_type type) +{ + int rc = HFI_TEST_SSR_SW_ERR_FATAL; + + switch (type) { + case SSR_ERR_FATAL: + rc = HFI_TEST_SSR_SW_ERR_FATAL; + break; + case SSR_SW_DIV_BY_ZERO: + rc = HFI_TEST_SSR_SW_DIV_BY_ZERO; + break; + case SSR_HW_WDOG_IRQ: + rc = HFI_TEST_SSR_HW_WDOG_IRQ; + break; + case SSR_NOC_ERROR: + rc = HFI_TEST_SSR_NOC_ERROR; + break; + case SSR_VCODEC_HUNG: + rc = HFI_TEST_SSR_VCODEC_HUNG; + break; + default: + d_vpr_e("SSR trigger type not recognized, using WDOG.\n"); + } + return rc; +} + +int create_pkt_ssr_cmd(struct hfi_cmd_sys_test_ssr_packet *pkt, + enum hal_ssr_trigger_type ssr_type, u32 sub_client_id, + u32 test_addr) +{ + struct hfi_ssr_payload payload; + if (!pkt) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + pkt->size = sizeof(struct hfi_cmd_sys_test_ssr_packet) - sizeof(u32); + pkt->packet_type = HFI_CMD_SYS_TEST_SSR; + pkt->trigger_type = get_hfi_ssr_type(ssr_type); + if (pkt->trigger_type == HFI_TEST_SSR_NOC_ERROR || + pkt->trigger_type == HFI_TEST_SSR_VCODEC_HUNG) { + pkt->size += sizeof(struct hfi_ssr_payload); + payload.sub_client_id = sub_client_id; + payload.test_addr = test_addr; + memcpy(&pkt->rg_data[0], &payload, sizeof(struct hfi_ssr_payload)); + } + return 0; +} + +int create_pkt_cmd_sys_image_version( + struct hfi_cmd_sys_get_property_packet *pkt) +{ + if (!pkt) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + pkt->size = sizeof(struct hfi_cmd_sys_get_property_packet); + pkt->packet_type = HFI_CMD_SYS_GET_PROPERTY; + pkt->num_properties = 1; + pkt->rg_property_data[0] = HFI_PROPERTY_SYS_IMAGE_VERSION; + return 0; +} + +static struct hfi_packetization_ops hfi_default = { + .sys_init = create_pkt_cmd_sys_init, + .sys_ping = create_pkt_cmd_sys_ping, + .sys_pc_prep = create_pkt_cmd_sys_pc_prep, + .sys_power_control = create_pkt_cmd_sys_power_control, + .sys_set_resource = create_pkt_cmd_sys_set_resource, + .sys_debug_config = create_pkt_cmd_sys_debug_config, + .sys_coverage_config = create_pkt_cmd_sys_coverage_config, + .sys_release_resource = create_pkt_cmd_sys_release_resource, + .sys_image_version = create_pkt_cmd_sys_image_version, + .sys_ubwc_config = create_pkt_cmd_sys_ubwc_config, + .ssr_cmd = create_pkt_ssr_cmd, + .session_init = create_pkt_cmd_sys_session_init, + .session_cmd = create_pkt_cmd_session_cmd, + .session_set_buffers = create_pkt_cmd_session_set_buffers, + .session_release_buffers = create_pkt_cmd_session_release_buffers, + .session_etb_decoder = create_pkt_cmd_session_etb_decoder, + .session_etb_encoder = create_pkt_cmd_session_etb_encoder, + .session_ftb = create_pkt_cmd_session_ftb, + .session_get_buf_req = create_pkt_cmd_session_get_buf_req, + .session_flush = create_pkt_cmd_session_flush, + .session_set_property = create_pkt_cmd_session_set_property, +}; + +struct hfi_packetization_ops *hfi_get_pkt_ops_handle( + enum hfi_packetization_type type) +{ + d_vpr_h("%s selected\n", type == HFI_PACKETIZATION_4XX ? + "4xx packetization" : "Unknown hfi"); + + switch (type) { + case HFI_PACKETIZATION_4XX: + return &hfi_default; + } + + return NULL; +} diff --git a/techpack/video/msm/vidc/hfi_packetization.h b/techpack/video/msm/vidc/hfi_packetization.h new file mode 100644 index 000000000000..412f83199660 --- /dev/null +++ b/techpack/video/msm/vidc/hfi_packetization.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ +#ifndef __HFI_PACKETIZATION_H__ +#define __HFI_PACKETIZATION_H__ + +#include "vidc_hfi_helper.h" +#include "vidc_hfi.h" +#include "vidc_hfi_api.h" + +#define call_hfi_pkt_op(q, op, ...) \ + (((q) && (q)->pkt_ops && (q)->pkt_ops->op) ? \ + ((q)->pkt_ops->op(__VA_ARGS__)) : 0) + +enum hfi_packetization_type { + HFI_PACKETIZATION_4XX, +}; + +struct hfi_packetization_ops { + int (*sys_init)(struct hfi_cmd_sys_init_packet *pkt, u32 arch_type); + int (*sys_ping)(struct hfi_cmd_sys_ping_packet *pkt, u32 sid); + int (*sys_pc_prep)(struct hfi_cmd_sys_pc_prep_packet *pkt); + int (*sys_power_control)(struct hfi_cmd_sys_set_property_packet *pkt, + u32 enable); + int (*sys_set_resource)( + struct hfi_cmd_sys_set_resource_packet *pkt, + struct vidc_resource_hdr *resource_hdr, + void *resource_value); + int (*sys_debug_config)(struct hfi_cmd_sys_set_property_packet *pkt, + u32 mode); + int (*sys_coverage_config)(struct hfi_cmd_sys_set_property_packet *pkt, + u32 mode, u32 sid); + int (*sys_release_resource)( + struct hfi_cmd_sys_release_resource_packet *pkt, + struct vidc_resource_hdr *resource_hdr); + int (*sys_image_version)(struct hfi_cmd_sys_get_property_packet *pkt); + int (*sys_ubwc_config)(struct hfi_cmd_sys_set_property_packet *pkt, + struct msm_vidc_ubwc_config_data *ubwc_config); + int (*ssr_cmd)(struct hfi_cmd_sys_test_ssr_packet *pkt, + enum hal_ssr_trigger_type ssr_type, u32 sub_client_id, + u32 test_addr); + int (*session_init)( + struct hfi_cmd_sys_session_init_packet *pkt, + u32 sid, u32 session_domain, u32 session_codec); + int (*session_cmd)(struct vidc_hal_session_cmd_pkt *pkt, + int pkt_type, u32 sid); + int (*session_set_buffers)( + struct hfi_cmd_session_set_buffers_packet *pkt, + u32 sid, struct vidc_buffer_addr_info *buffer_info); + int (*session_release_buffers)( + struct hfi_cmd_session_release_buffer_packet *pkt, + u32 sid, struct vidc_buffer_addr_info *buffer_info); + int (*session_etb_decoder)( + struct hfi_cmd_session_empty_buffer_compressed_packet *pkt, + u32 sid, struct vidc_frame_data *input_frame); + int (*session_etb_encoder)( + struct hfi_cmd_session_empty_buffer_uncompressed_plane0_packet + *pkt, u32 sid, struct vidc_frame_data *input_frame); + int (*session_ftb)(struct hfi_cmd_session_fill_buffer_packet *pkt, + u32 sid, struct vidc_frame_data *output_frame); + int (*session_get_buf_req)( + struct hfi_cmd_session_get_property_packet *pkt, u32 sid); + int (*session_flush)(struct hfi_cmd_session_flush_packet *pkt, + u32 sid, enum hal_flush flush_mode); + int (*session_set_property)( + struct hfi_cmd_session_set_property_packet *pkt, + u32 sid, u32 ptype, void *pdata, u32 size); +}; + +struct hfi_packetization_ops *hfi_get_pkt_ops_handle( + enum hfi_packetization_type); +#endif diff --git a/techpack/video/msm/vidc/hfi_response_handler.c b/techpack/video/msm/vidc/hfi_response_handler.c new file mode 100644 index 000000000000..ca5c21f3a9d6 --- /dev/null +++ b/techpack/video/msm/vidc/hfi_response_handler.c @@ -0,0 +1,1249 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include "vidc_hfi_helper.h" +#include "msm_vidc_debug.h" +#include "vidc_hfi.h" + +static enum vidc_status hfi_map_err_status(u32 hfi_err) +{ + enum vidc_status vidc_err; + + switch (hfi_err) { + case HFI_ERR_NONE: + case HFI_ERR_SESSION_SAME_STATE_OPERATION: + vidc_err = VIDC_ERR_NONE; + break; + case HFI_ERR_SYS_FATAL: + vidc_err = VIDC_ERR_HW_FATAL; + break; + case HFI_ERR_SYS_NOC_ERROR: + vidc_err = VIDC_ERR_NOC_ERROR; + break; + case HFI_ERR_SYS_VERSION_MISMATCH: + case HFI_ERR_SYS_INVALID_PARAMETER: + case HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE: + case HFI_ERR_SESSION_INVALID_PARAMETER: + case HFI_ERR_SESSION_INVALID_SESSION_ID: + case HFI_ERR_SESSION_INVALID_STREAM_ID: + vidc_err = VIDC_ERR_BAD_PARAM; + break; + case HFI_ERR_SYS_INSUFFICIENT_RESOURCES: + case HFI_ERR_SYS_UNSUPPORTED_DOMAIN: + case HFI_ERR_SYS_UNSUPPORTED_CODEC: + case HFI_ERR_SESSION_UNSUPPORTED_PROPERTY: + case HFI_ERR_SESSION_UNSUPPORTED_SETTING: + case HFI_ERR_SESSION_INSUFFICIENT_RESOURCES: + case HFI_ERR_SESSION_UNSUPPORTED_STREAM: + vidc_err = VIDC_ERR_NOT_SUPPORTED; + break; + case HFI_ERR_SYS_MAX_SESSIONS_REACHED: + vidc_err = VIDC_ERR_MAX_CLIENTS; + break; + case HFI_ERR_SYS_SESSION_IN_USE: + vidc_err = VIDC_ERR_CLIENT_PRESENT; + break; + case HFI_ERR_SESSION_FATAL: + vidc_err = VIDC_ERR_CLIENT_FATAL; + break; + case HFI_ERR_SESSION_BAD_POINTER: + vidc_err = VIDC_ERR_BAD_PARAM; + break; + case HFI_ERR_SESSION_INCORRECT_STATE_OPERATION: + vidc_err = VIDC_ERR_BAD_STATE; + break; + case HFI_ERR_SESSION_STREAM_CORRUPT: + case HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED: + vidc_err = VIDC_ERR_BITSTREAM_ERR; + break; + case HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED: + vidc_err = VIDC_ERR_IFRAME_EXPECTED; + break; + case HFI_ERR_SESSION_START_CODE_NOT_FOUND: + vidc_err = VIDC_ERR_START_CODE_NOT_FOUND; + break; + case HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING: + default: + vidc_err = VIDC_ERR_FAIL; + break; + } + return vidc_err; +} + +static int get_hal_pixel_depth(u32 hfi_bit_depth, u32 sid) +{ + switch (hfi_bit_depth) { + case HFI_BITDEPTH_8: return MSM_VIDC_BIT_DEPTH_8; + case HFI_BITDEPTH_9: + case HFI_BITDEPTH_10: return MSM_VIDC_BIT_DEPTH_10; + } + s_vpr_e(sid, "Unsupported bit depth: %d\n", hfi_bit_depth); + return MSM_VIDC_BIT_DEPTH_UNSUPPORTED; +} + +static inline int validate_pkt_size(u32 rem_size, u32 msg_size) +{ + if (rem_size < msg_size) { + d_vpr_e("%s: bad_packet_size: %d\n", __func__, rem_size); + return false; + } + return true; +} + +static int hfi_process_sess_evt_seq_changed(u32 device_id, + struct hfi_msg_event_notify_packet *pkt, + struct msm_vidc_cb_info *info) +{ + struct msm_vidc_cb_event event_notify = {0}; + u32 num_properties_changed; + struct hfi_frame_size *frame_sz; + struct hfi_profile_level *profile_level; + struct hfi_bit_depth *pixel_depth; + struct hfi_pic_struct *pic_struct; + struct hfi_dpb_counts *dpb_counts; + u32 rem_size,entropy_mode = 0; + u8 *data_ptr; + int prop_id; + int luma_bit_depth, chroma_bit_depth; + struct hfi_colour_space *colour_info; + u32 sid; + + if (!validate_pkt_size(pkt->size, + sizeof(struct hfi_msg_event_notify_packet))) + return -E2BIG; + + sid = pkt->sid; + event_notify.device_id = device_id; + event_notify.inst_id = (void *)(uintptr_t)pkt->sid; + event_notify.status = VIDC_ERR_NONE; + num_properties_changed = pkt->event_data2; + switch (pkt->event_data1) { + case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUFFER_RESOURCES: + event_notify.hal_event_type = + HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES; + break; + case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUFFER_RESOURCES: + event_notify.hal_event_type = + HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES; + break; + default: + break; + } + + if (num_properties_changed) { + data_ptr = (u8 *) &pkt->rg_ext_event_data[0]; + rem_size = pkt->size - sizeof(struct + hfi_msg_event_notify_packet) + sizeof(u32); + do { + if (!validate_pkt_size(rem_size, sizeof(u32))) + return -E2BIG; + prop_id = (int) *((u32 *)data_ptr); + rem_size -= sizeof(u32); + switch (prop_id) { + case HFI_PROPERTY_PARAM_FRAME_SIZE: + if (!validate_pkt_size(rem_size, sizeof(struct + hfi_frame_size))) + return -E2BIG; + data_ptr = data_ptr + sizeof(u32); + frame_sz = + (struct hfi_frame_size *) data_ptr; + event_notify.width = frame_sz->width; + event_notify.height = frame_sz->height; + s_vpr_hp(sid, "height: %d width: %d\n", + frame_sz->height, frame_sz->width); + data_ptr += + sizeof(struct hfi_frame_size); + rem_size -= sizeof(struct hfi_frame_size); + break; + case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: + if (!validate_pkt_size(rem_size, sizeof(struct + hfi_profile_level))) + return -E2BIG; + data_ptr = data_ptr + sizeof(u32); + profile_level = + (struct hfi_profile_level *) data_ptr; + event_notify.profile = profile_level->profile; + event_notify.level = profile_level->level; + s_vpr_hp(sid, "profile: %d level: %d\n", + profile_level->profile, + profile_level->level); + data_ptr += + sizeof(struct hfi_profile_level); + rem_size -= sizeof(struct hfi_profile_level); + break; + case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH: + if (!validate_pkt_size(rem_size, sizeof(struct + hfi_bit_depth))) + return -E2BIG; + data_ptr = data_ptr + sizeof(u32); + pixel_depth = (struct hfi_bit_depth *) data_ptr; + /* + * Luma and chroma can have different bitdepths. + * Driver should rely on luma and chroma + * bitdepth for determining output bitdepth + * type. + * + * pixel_depth->bitdepth will include luma + * bitdepth info in bits 0..15 and chroma + * bitdept in bits 16..31. + */ + luma_bit_depth = get_hal_pixel_depth( + pixel_depth->bit_depth & + GENMASK(15, 0), sid); + chroma_bit_depth = get_hal_pixel_depth( + (pixel_depth->bit_depth & + GENMASK(31, 16)) >> 16, sid); + if (luma_bit_depth == MSM_VIDC_BIT_DEPTH_10 || + chroma_bit_depth == + MSM_VIDC_BIT_DEPTH_10) + event_notify.bit_depth = + MSM_VIDC_BIT_DEPTH_10; + else + event_notify.bit_depth = luma_bit_depth; + s_vpr_hp(sid, + "bitdepth(%d), luma_bit_depth(%d), chroma_bit_depth(%d)\n", + event_notify.bit_depth, luma_bit_depth, + chroma_bit_depth); + data_ptr += sizeof(struct hfi_bit_depth); + rem_size -= sizeof(struct hfi_bit_depth); + break; + case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT: + if (!validate_pkt_size(rem_size, sizeof(struct + hfi_pic_struct))) + return -E2BIG; + data_ptr = data_ptr + sizeof(u32); + pic_struct = (struct hfi_pic_struct *) data_ptr; + event_notify.pic_struct = + pic_struct->progressive_only; + s_vpr_hp(sid, "Progressive only flag: %d\n", + pic_struct->progressive_only); + data_ptr += + sizeof(struct hfi_pic_struct); + rem_size -= sizeof(struct hfi_pic_struct); + break; + case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE: + if (!validate_pkt_size(rem_size, sizeof(struct + hfi_colour_space))) + return -E2BIG; + data_ptr = data_ptr + sizeof(u32); + colour_info = + (struct hfi_colour_space *) data_ptr; + event_notify.colour_space = + colour_info->colour_space; + s_vpr_h(sid, "Colour space value is: %d\n", + colour_info->colour_space); + data_ptr += + sizeof(struct hfi_colour_space); + rem_size -= sizeof(struct hfi_colour_space); + break; + case HFI_PROPERTY_CONFIG_VDEC_ENTROPY: + if (!validate_pkt_size(rem_size, sizeof(u32))) + return -E2BIG; + data_ptr = data_ptr + sizeof(u32); + entropy_mode = *(u32 *)data_ptr; + event_notify.entropy_mode = entropy_mode; + s_vpr_hp(sid, "Entropy Mode: 0x%x\n", + entropy_mode); + data_ptr += + sizeof(u32); + rem_size -= sizeof(u32); + break; + case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS: + if (!validate_pkt_size(rem_size, sizeof(struct + hfi_buffer_requirements))) + return -E2BIG; + data_ptr = data_ptr + sizeof(u32); + data_ptr += + sizeof(struct hfi_buffer_requirements); + rem_size -= + sizeof(struct hfi_buffer_requirements); + break; + case HFI_INDEX_EXTRADATA_INPUT_CROP: + if (!validate_pkt_size(rem_size, sizeof(struct + hfi_index_extradata_input_crop_payload))) + return -E2BIG; + data_ptr = data_ptr + sizeof(u32); + data_ptr += + sizeof(struct + hfi_index_extradata_input_crop_payload); + rem_size -= sizeof(struct + hfi_index_extradata_input_crop_payload); + break; + case HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS: + if (!validate_pkt_size(rem_size, sizeof(struct + hfi_dpb_counts))) + return -E2BIG; + data_ptr = data_ptr + sizeof(u32); + dpb_counts = (struct hfi_dpb_counts *) data_ptr; + event_notify.max_dpb_count = + dpb_counts->max_dpb_count; + event_notify.max_ref_frames = + dpb_counts->max_ref_frames; + event_notify.max_dec_buffering = + dpb_counts->max_dec_buffering; + event_notify.max_reorder_frames = + dpb_counts->max_reorder_frames; + event_notify.fw_min_cnt = + dpb_counts->fw_min_cnt; + s_vpr_h(sid, + "FW DPB counts: dpb %d ref %d buff %d reorder %d fw_min_cnt %d\n", + dpb_counts->max_dpb_count, + dpb_counts->max_ref_frames, + dpb_counts->max_dec_buffering, + dpb_counts->max_reorder_frames, + dpb_counts->fw_min_cnt); + data_ptr += + sizeof(struct hfi_dpb_counts); + rem_size -= sizeof(struct hfi_dpb_counts); + break; + default: + s_vpr_e(sid, "%s: cmd: %#x not supported\n", + __func__, prop_id); + break; + } + num_properties_changed--; + } while (num_properties_changed > 0); + } + + info->response_type = HAL_SESSION_EVENT_CHANGE; + info->response.event = event_notify; + + return 0; +} + +static int hfi_process_evt_release_buffer_ref(u32 device_id, + struct hfi_msg_event_notify_packet *pkt, + struct msm_vidc_cb_info *info) +{ + struct msm_vidc_cb_event event_notify = {0}; + struct hfi_msg_release_buffer_ref_event_packet *data; + + if (sizeof(struct hfi_msg_event_notify_packet) + > pkt->size) { + d_vpr_e("%s: bad_pkt_size\n", __func__); + return -E2BIG; + } + if (pkt->size < sizeof(struct hfi_msg_event_notify_packet) - sizeof(u32) + + sizeof(struct hfi_msg_release_buffer_ref_event_packet)) { + d_vpr_e("%s: bad_pkt_size: %d\n", __func__, pkt->size); + return -E2BIG; + } + + data = (struct hfi_msg_release_buffer_ref_event_packet *) + pkt->rg_ext_event_data; + s_vpr_l(pkt->sid, + "RECEIVED: EVENT_NOTIFY - release_buffer_reference\n"); + + event_notify.device_id = device_id; + event_notify.inst_id = (void *)(uintptr_t)pkt->sid; + event_notify.status = VIDC_ERR_NONE; + event_notify.hal_event_type = HAL_EVENT_RELEASE_BUFFER_REFERENCE; + event_notify.packet_buffer = data->packet_buffer; + event_notify.extra_data_buffer = data->extra_data_buffer; + + info->response_type = HAL_SESSION_EVENT_CHANGE; + info->response.event = event_notify; + + return 0; +} + +static int hfi_process_sys_error(u32 device_id, + struct hfi_msg_event_notify_packet *pkt, + struct msm_vidc_cb_info *info) +{ + struct msm_vidc_cb_cmd_done cmd_done = {0}; + + cmd_done.device_id = device_id; + cmd_done.status = hfi_map_err_status(pkt->event_data1); + + info->response_type = HAL_SYS_ERROR; + info->response.cmd = cmd_done; + + return 0; +} + +static int hfi_process_session_error(u32 device_id, + struct hfi_msg_event_notify_packet *pkt, + struct msm_vidc_cb_info *info) +{ + struct msm_vidc_cb_cmd_done cmd_done = {0}; + u32 sid = pkt->sid; + + cmd_done.device_id = device_id; + cmd_done.inst_id = (void *)(uintptr_t)pkt->sid; + cmd_done.status = hfi_map_err_status(pkt->event_data1); + info->response.cmd = cmd_done; + s_vpr_h(sid, "RECEIVED: SESSION_ERROR with event id : %#x %#x\n", + pkt->event_data1, pkt->event_data2); + switch (pkt->event_data1) { + /* Ignore below errors */ + case HFI_ERR_SESSION_INVALID_SCALE_FACTOR: + case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED: + s_vpr_h(sid, "Non Fatal: HFI_EVENT_SESSION_ERROR\n"); + info->response_type = HAL_RESPONSE_UNUSED; + break; + default: + s_vpr_e(sid, "%s: data1 %#x, data2 %#x\n", __func__, + pkt->event_data1, pkt->event_data2); + info->response_type = HAL_SESSION_ERROR; + break; + } + + return 0; +} + +static int hfi_process_event_notify(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_event_notify_packet *pkt = _pkt; + + if (pkt->size < sizeof(struct hfi_msg_event_notify_packet)) { + d_vpr_e("%s: invalid params %u %u\n", __func__, + pkt->size, sizeof(struct hfi_msg_event_notify_packet)); + return -E2BIG; + } + + s_vpr_l(pkt->sid, "RECEIVED: EVENT_NOTIFY\n"); + + switch (pkt->event_id) { + case HFI_EVENT_SYS_ERROR: + s_vpr_e(pkt->sid, "HFI_EVENT_SYS_ERROR: %d, %#x\n", + pkt->event_data1, pkt->event_data2); + return hfi_process_sys_error(device_id, pkt, info); + case HFI_EVENT_SESSION_ERROR: + s_vpr_h(pkt->sid, "HFI_EVENT_SESSION_ERROR\n"); + return hfi_process_session_error(device_id, pkt, info); + + case HFI_EVENT_SESSION_SEQUENCE_CHANGED: + s_vpr_h(pkt->sid, "HFI_EVENT_SESSION_SEQUENCE_CHANGED\n"); + return hfi_process_sess_evt_seq_changed(device_id, pkt, info); + + case HFI_EVENT_RELEASE_BUFFER_REFERENCE: + s_vpr_l(pkt->sid, "HFI_EVENT_RELEASE_BUFFER_REFERENCE\n"); + return hfi_process_evt_release_buffer_ref(device_id, pkt, info); + + case HFI_EVENT_SESSION_PROPERTY_CHANGED: + default: + *info = (struct msm_vidc_cb_info) { + .response_type = HAL_RESPONSE_UNUSED, + }; + + return 0; + } +} + +static int hfi_process_sys_init_done(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_sys_init_done_packet *pkt = _pkt; + struct msm_vidc_cb_cmd_done cmd_done = {0}; + enum vidc_status status = VIDC_ERR_NONE; + + if (sizeof(struct hfi_msg_sys_init_done_packet) > pkt->size) { + d_vpr_e("%s: bad_pkt_size: %d\n", __func__, + pkt->size); + return -E2BIG; + } + d_vpr_h("RECEIVED: SYS_INIT_DONE\n"); + + status = hfi_map_err_status(pkt->error_type); + if (status) + d_vpr_e("%s: status %#x\n", __func__, status); + + cmd_done.device_id = device_id; + cmd_done.inst_id = NULL; + cmd_done.status = (u32)status; + cmd_done.size = sizeof(struct vidc_hal_sys_init_done); + + info->response_type = HAL_SYS_INIT_DONE; + info->response.cmd = cmd_done; + + return 0; +} + +static int hfi_process_sys_rel_resource_done(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_sys_release_resource_done_packet *pkt = _pkt; + struct msm_vidc_cb_cmd_done cmd_done = {0}; + enum vidc_status status = VIDC_ERR_NONE; + u32 pkt_size; + + pkt_size = sizeof(struct hfi_msg_sys_release_resource_done_packet); + if (pkt_size > pkt->size) { + d_vpr_e("hal_process_sys_rel_resource_done: bad size: %d\n", + pkt->size); + return -E2BIG; + } + d_vpr_h("RECEIVED: SYS_RELEASE_RESOURCE_DONE\n"); + + status = hfi_map_err_status(pkt->error_type); + cmd_done.device_id = device_id; + cmd_done.inst_id = NULL; + cmd_done.status = (u32) status; + cmd_done.size = 0; + + info->response_type = HAL_SYS_RELEASE_RESOURCE_DONE; + info->response.cmd = cmd_done; + + return 0; +} + +static void copy_hfi_to_hal_buf_req(struct hal_buffer_requirements *dst, + struct hfi_buffer_requirements *src) { + dst->buffer_size = src->buffer_size; + dst->buffer_count_min = (u16)src->buffer_count_min; + dst->buffer_count_min_host = (u16)src->buffer_count_min_host; + dst->buffer_count_actual = (u16)src->buffer_count_actual; + dst->buffer_alignment = (u16)src->buffer_alignment; +} + +static void hfi_process_sess_get_prop_buf_req( + struct hfi_msg_session_property_info_packet *prop, + struct buffer_requirements *buffreq, u32 sid) +{ + struct hfi_buffer_requirements *hfi_buf_req; + struct hal_buffer_requirements *hal_buf_req; + u32 req_bytes; + + if (!prop) { + s_vpr_e(sid, "%s: bad_prop: %pK\n", __func__, prop); + return; + } + + req_bytes = prop->size - sizeof( + struct hfi_msg_session_property_info_packet); + if (!req_bytes || req_bytes % sizeof(struct hfi_buffer_requirements) || + !prop->rg_property_data[1]) { + s_vpr_e(sid, "%s: bad_pkt: %d\n", __func__, req_bytes); + return; + } + + hfi_buf_req = (struct hfi_buffer_requirements *) + &prop->rg_property_data[1]; + + if (!hfi_buf_req) { + s_vpr_e(sid, "%s: invalid buffer req pointer\n", __func__); + return; + } + + while (req_bytes) { + s_vpr_h(sid, "got buffer requirements for: %d\n", + hfi_buf_req->buffer_type); + switch (hfi_buf_req->buffer_type) { + case HFI_BUFFER_INPUT: + hal_buf_req = &buffreq->buffer[0]; + hal_buf_req->buffer_type = HAL_BUFFER_INPUT; + break; + case HFI_BUFFER_OUTPUT: + hal_buf_req = &buffreq->buffer[1]; + hal_buf_req->buffer_type = HAL_BUFFER_OUTPUT; + break; + case HFI_BUFFER_OUTPUT2: + hal_buf_req = &buffreq->buffer[2]; + hal_buf_req->buffer_type = HAL_BUFFER_OUTPUT2; + break; + case HFI_BUFFER_EXTRADATA_INPUT: + hal_buf_req = &buffreq->buffer[3]; + hal_buf_req->buffer_type = + HAL_BUFFER_EXTRADATA_INPUT; + break; + case HFI_BUFFER_EXTRADATA_OUTPUT: + hal_buf_req = &buffreq->buffer[4]; + hal_buf_req->buffer_type = + HAL_BUFFER_EXTRADATA_OUTPUT; + break; + case HFI_BUFFER_EXTRADATA_OUTPUT2: + hal_buf_req = &buffreq->buffer[5]; + hal_buf_req->buffer_type = + HAL_BUFFER_EXTRADATA_OUTPUT2; + break; + case HFI_BUFFER_COMMON_INTERNAL_SCRATCH: + hal_buf_req = &buffreq->buffer[6]; + hal_buf_req->buffer_type = + HAL_BUFFER_INTERNAL_SCRATCH; + break; + case HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1: + hal_buf_req = &buffreq->buffer[7]; + hal_buf_req->buffer_type = + HAL_BUFFER_INTERNAL_SCRATCH_1; + break; + case HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2: + hal_buf_req = &buffreq->buffer[8]; + hal_buf_req->buffer_type = + HAL_BUFFER_INTERNAL_SCRATCH_2; + break; + case HFI_BUFFER_INTERNAL_PERSIST: + hal_buf_req = &buffreq->buffer[9]; + hal_buf_req->buffer_type = + HAL_BUFFER_INTERNAL_PERSIST; + break; + case HFI_BUFFER_INTERNAL_PERSIST_1: + hal_buf_req = &buffreq->buffer[10]; + hal_buf_req->buffer_type = + HAL_BUFFER_INTERNAL_PERSIST_1; + break; + case HFI_BUFFER_COMMON_INTERNAL_RECON: + hal_buf_req = &buffreq->buffer[11]; + hal_buf_req->buffer_type = + HAL_BUFFER_INTERNAL_RECON; + break; + default: + hal_buf_req = NULL; + s_vpr_e(sid, "%s: bad_buffer_type: %d\n", + __func__, hfi_buf_req->buffer_type); + break; + } + if (hal_buf_req) + copy_hfi_to_hal_buf_req(hal_buf_req, hfi_buf_req); + req_bytes -= sizeof(struct hfi_buffer_requirements); + hfi_buf_req++; + } +} + +static int hfi_process_session_prop_info(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_session_property_info_packet *pkt = _pkt; + struct msm_vidc_cb_cmd_done cmd_done = {0}; + struct buffer_requirements buff_req = { { {0} } }; + + if (pkt->size < sizeof(struct hfi_msg_session_property_info_packet)) { + d_vpr_e("hal_process_session_prop_info: bad_pkt_size\n"); + return -E2BIG; + } else if (!pkt->num_properties) { + d_vpr_e("hal_process_session_prop_info: no_properties\n"); + return -EINVAL; + } + s_vpr_h(pkt->sid, "Received SESSION_PROPERTY_INFO\n"); + + switch (pkt->rg_property_data[0]) { + case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS: + hfi_process_sess_get_prop_buf_req(pkt, &buff_req, pkt->sid); + cmd_done.device_id = device_id; + cmd_done.inst_id = (void *)(uintptr_t)pkt->sid; + cmd_done.status = VIDC_ERR_NONE; + cmd_done.data.property.buf_req = buff_req; + cmd_done.size = sizeof(buff_req); + + info->response_type = HAL_SESSION_PROPERTY_INFO; + info->response.cmd = cmd_done; + + return 0; + default: + s_vpr_h(pkt->sid, "%s: unknown_prop_id: %x\n", + __func__, pkt->rg_property_data[0]); + return -ENOTSUPP; + } +} + +static int hfi_process_session_init_done(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_sys_session_init_done_packet *pkt = _pkt; + struct msm_vidc_cb_cmd_done cmd_done = {0}; + + if (sizeof(struct hfi_msg_sys_session_init_done_packet) > pkt->size) { + d_vpr_e("hal_process_session_init_done: bad_pkt_size\n"); + return -E2BIG; + } + s_vpr_h(pkt->sid, "RECEIVED: SESSION_INIT_DONE\n"); + + cmd_done.device_id = device_id; + cmd_done.inst_id = (void *)(uintptr_t)pkt->sid; + cmd_done.status = hfi_map_err_status(pkt->error_type); + + info->response_type = HAL_SESSION_INIT_DONE; + info->response.cmd = cmd_done; + + return 0; +} + +static int hfi_process_session_load_res_done(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_session_load_resources_done_packet *pkt = _pkt; + struct msm_vidc_cb_cmd_done cmd_done = {0}; + + if (sizeof(struct hfi_msg_session_load_resources_done_packet) != + pkt->size) { + d_vpr_e("%s: bad packet size: %d\n", __func__, pkt->size); + return -E2BIG; + } + s_vpr_h(pkt->sid, "RECEIVED: SESSION_LOAD_RESOURCES_DONE\n"); + + cmd_done.device_id = device_id; + cmd_done.inst_id = (void *)(uintptr_t)pkt->sid; + cmd_done.status = hfi_map_err_status(pkt->error_type); + cmd_done.size = 0; + + info->response_type = HAL_SESSION_LOAD_RESOURCE_DONE; + info->response.cmd = cmd_done; + + return 0; +} + +static int hfi_process_session_flush_done(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_session_flush_done_packet *pkt = _pkt; + struct msm_vidc_cb_cmd_done cmd_done = {0}; + + if (sizeof(struct hfi_msg_session_flush_done_packet) != pkt->size) { + d_vpr_e("hal_process_session_flush_done: bad packet size: %d\n", + pkt->size); + return -E2BIG; + } + s_vpr_h(pkt->sid, "RECEIVED: SESSION_FLUSH_DONE\n"); + + cmd_done.device_id = device_id; + cmd_done.inst_id = (void *)(uintptr_t)pkt->sid; + cmd_done.status = hfi_map_err_status(pkt->error_type); + cmd_done.size = sizeof(u32); + + switch (pkt->flush_type) { + case HFI_FLUSH_OUTPUT: + cmd_done.data.flush_type = HAL_FLUSH_OUTPUT; + break; + case HFI_FLUSH_INPUT: + cmd_done.data.flush_type = HAL_FLUSH_INPUT; + break; + case HFI_FLUSH_ALL: + cmd_done.data.flush_type = HAL_FLUSH_ALL; + break; + default: + s_vpr_e(pkt->sid, "%s: invalid flush type!", __func__); + return -EINVAL; + } + + info->response_type = HAL_SESSION_FLUSH_DONE; + info->response.cmd = cmd_done; + + return 0; +} + +static int hfi_process_session_etb_done(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_session_empty_buffer_done_packet *pkt = _pkt; + struct msm_vidc_cb_data_done data_done = {0}; + + if (!pkt || pkt->size < + sizeof(struct hfi_msg_session_empty_buffer_done_packet)) + goto bad_packet_size; + + s_vpr_l(pkt->sid, "RECEIVED: SESSION_ETB_DONE\n"); + + data_done.device_id = device_id; + data_done.inst_id = (void *)(uintptr_t)pkt->sid; + data_done.status = hfi_map_err_status(pkt->error_type); + data_done.size = sizeof(struct msm_vidc_cb_data_done); + data_done.input_done.input_tag = pkt->input_tag; + data_done.input_done.recon_stats.buffer_index = + pkt->ubwc_cr_stats.frame_index; + memcpy(&data_done.input_done.recon_stats.ubwc_stats_info, + &pkt->ubwc_cr_stats.ubwc_stats_info, + sizeof(data_done.input_done.recon_stats.ubwc_stats_info)); + data_done.input_done.recon_stats.complexity_number = + pkt->ubwc_cr_stats.complexity_number; + data_done.input_done.offset = pkt->offset; + data_done.input_done.filled_len = pkt->filled_len; + data_done.input_done.flags = pkt->flags; + data_done.input_done.packet_buffer = pkt->packet_buffer; + data_done.input_done.extra_data_buffer = pkt->extra_data_buffer; + data_done.input_done.status = + hfi_map_err_status(pkt->error_type); + + trace_msm_v4l2_vidc_buffer_event_end("ETB", + (u32)pkt->packet_buffer, -1, -1, + pkt->filled_len, pkt->offset); + + info->response_type = HAL_SESSION_ETB_DONE; + info->response.data = data_done; + + return 0; +bad_packet_size: + d_vpr_e("%s: ebd - bad_pkt_size: %d\n", + __func__, pkt ? pkt->size : 0); + return -E2BIG; +} + +static int hfi_process_session_ftb_done( + u32 device_id, void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct vidc_hal_msg_pkt_hdr *msg_hdr = _pkt; + struct msm_vidc_cb_data_done data_done = {0}; + u32 struct_size = 0; + + bool is_decoder = false, is_encoder = false; + + if (!msg_hdr) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + struct_size = sizeof(struct + hfi_msg_session_fill_buffer_done_compressed_packet) + 4; + is_encoder = (msg_hdr->size == struct_size) || + (msg_hdr->size == (struct_size + + sizeof(struct hfi_ubwc_cr_stats) - 4)); + + struct_size = sizeof(struct + hfi_msg_session_fbd_uncompressed_plane0_packet) + 4; + is_decoder = (msg_hdr->size == struct_size) || + (msg_hdr->size == (struct_size + + sizeof(struct hfi_ubwc_cr_stats) - 4)); + + if (!(is_encoder ^ is_decoder)) { + d_vpr_e("Ambiguous packet (%#x) received (size %d)\n", + msg_hdr->packet, msg_hdr->size); + return -EBADHANDLE; + } + + if (is_encoder) { + struct hfi_msg_session_fill_buffer_done_compressed_packet *pkt = + (struct hfi_msg_session_fill_buffer_done_compressed_packet *) + msg_hdr; + if (sizeof(struct + hfi_msg_session_fill_buffer_done_compressed_packet) + > pkt->size) { + d_vpr_e("hal_process_session_ftb_done: bad_pkt_size\n"); + return -E2BIG; + } else if (pkt->error_type != HFI_ERR_NONE) { + s_vpr_e(pkt->sid, "got buffer back with error %x\n", + pkt->error_type); + /* Proceed with the FBD */ + } + s_vpr_l(pkt->sid, "RECEIVED: SESSION_FTB_DONE\n"); + + data_done.device_id = device_id; + data_done.inst_id = (void *)(uintptr_t)pkt->sid; + data_done.status = hfi_map_err_status(pkt->error_type); + data_done.size = sizeof(struct msm_vidc_cb_data_done); + + data_done.output_done.input_tag = pkt->input_tag; + data_done.output_done.timestamp_hi = pkt->time_stamp_hi; + data_done.output_done.timestamp_lo = pkt->time_stamp_lo; + data_done.output_done.flags1 = pkt->flags; + data_done.output_done.stats = pkt->stats; + data_done.output_done.offset1 = pkt->offset; + data_done.output_done.alloc_len1 = pkt->alloc_len; + data_done.output_done.filled_len1 = pkt->filled_len; + data_done.output_done.picture_type = pkt->picture_type; + data_done.output_done.packet_buffer1 = pkt->packet_buffer; + data_done.output_done.extra_data_buffer = + pkt->extra_data_buffer; + data_done.output_done.buffer_type = HAL_BUFFER_OUTPUT; + /* FBD packet is extended only when stats=1. */ + if (pkt->stats == 1) { + struct hfi_ubwc_cr_stats *ubwc_stat = + (struct hfi_ubwc_cr_stats *)pkt->rgData; + data_done.output_done.ubwc_cr_stat.is_valid = + ubwc_stat->is_valid; + data_done.output_done.ubwc_cr_stat.worst_cr = + ubwc_stat->worst_compression_ratio; + data_done.output_done.ubwc_cr_stat.worst_cf = + ubwc_stat->worst_complexity_number; + } + } else /* if (is_decoder) */ { + struct hfi_msg_session_fbd_uncompressed_plane0_packet *pkt = + (struct hfi_msg_session_fbd_uncompressed_plane0_packet *) + msg_hdr; + if (sizeof( + struct hfi_msg_session_fbd_uncompressed_plane0_packet) > + pkt->size) { + d_vpr_e("hal_process_session_ftb_done: bad_pkt_size\n"); + return -E2BIG; + } + s_vpr_l(pkt->sid, "RECEIVED: SESSION_FTB_DONE\n"); + + data_done.device_id = device_id; + data_done.inst_id = (void *)(uintptr_t)pkt->sid; + data_done.status = hfi_map_err_status(pkt->error_type); + data_done.size = sizeof(struct msm_vidc_cb_data_done); + + data_done.output_done.stream_id = pkt->stream_id; + data_done.output_done.view_id = pkt->view_id; + data_done.output_done.timestamp_hi = pkt->time_stamp_hi; + data_done.output_done.timestamp_lo = pkt->time_stamp_lo; + data_done.output_done.flags1 = pkt->flags; + data_done.output_done.stats = pkt->stats; + data_done.output_done.alloc_len1 = pkt->alloc_len; + data_done.output_done.filled_len1 = pkt->filled_len; + data_done.output_done.offset1 = pkt->offset; + data_done.output_done.frame_width = pkt->frame_width; + data_done.output_done.frame_height = pkt->frame_height; + data_done.output_done.start_x_coord = pkt->start_x_coord; + data_done.output_done.start_y_coord = pkt->start_y_coord; + data_done.output_done.input_tag = pkt->input_tag; + data_done.output_done.input_tag2 = pkt->input_tag2; + data_done.output_done.picture_type = pkt->picture_type; + data_done.output_done.packet_buffer1 = pkt->packet_buffer; + data_done.output_done.extra_data_buffer = + pkt->extra_data_buffer; + + /* FBD packet is extended only when view_id=1. */ + if (pkt->view_id == 1) { + struct hfi_ubwc_cr_stats *ubwc_stat = + (struct hfi_ubwc_cr_stats *)pkt->rgData; + data_done.output_done.ubwc_cr_stat.is_valid = + ubwc_stat->is_valid; + data_done.output_done.ubwc_cr_stat.worst_cr = + ubwc_stat->worst_compression_ratio; + data_done.output_done.ubwc_cr_stat.worst_cf = + ubwc_stat->worst_complexity_number; + } + + if (!pkt->stream_id) + data_done.output_done.buffer_type = HAL_BUFFER_OUTPUT; + else if (pkt->stream_id == 1) + data_done.output_done.buffer_type = HAL_BUFFER_OUTPUT2; + } + + trace_msm_v4l2_vidc_buffer_event_end("FTB", + (u32)data_done.output_done.packet_buffer1, + (((u64)data_done.output_done.timestamp_hi) << 32) + + ((u64)data_done.output_done.timestamp_lo), + data_done.output_done.alloc_len1, + data_done.output_done.filled_len1, + data_done.output_done.offset1); + + info->response_type = HAL_SESSION_FTB_DONE; + info->response.data = data_done; + + return 0; +} + +static int hfi_process_session_start_done(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_session_start_done_packet *pkt = _pkt; + struct msm_vidc_cb_cmd_done cmd_done = {0}; + + if (!pkt || pkt->size != + sizeof(struct hfi_msg_session_start_done_packet)) { + d_vpr_e("%s: bad packet/packet size\n", __func__); + return -E2BIG; + } + s_vpr_h(pkt->sid, "RECEIVED: SESSION_START_DONE\n"); + + cmd_done.device_id = device_id; + cmd_done.inst_id = (void *)(uintptr_t)pkt->sid; + cmd_done.status = hfi_map_err_status(pkt->error_type); + cmd_done.size = 0; + + info->response_type = HAL_SESSION_START_DONE; + info->response.cmd = cmd_done; + return 0; +} + +static int hfi_process_session_stop_done(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_session_stop_done_packet *pkt = _pkt; + struct msm_vidc_cb_cmd_done cmd_done = {0}; + + if (!pkt || pkt->size != + sizeof(struct hfi_msg_session_stop_done_packet)) { + d_vpr_e("%s: bad packet/packet size\n", __func__); + return -E2BIG; + } + s_vpr_h(pkt->sid, "RECEIVED: SESSION_STOP_DONE\n"); + + cmd_done.device_id = device_id; + cmd_done.inst_id = (void *)(uintptr_t)pkt->sid; + cmd_done.status = hfi_map_err_status(pkt->error_type); + cmd_done.size = 0; + + info->response_type = HAL_SESSION_STOP_DONE; + info->response.cmd = cmd_done; + + return 0; +} + +static int hfi_process_session_rel_res_done(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_session_release_resources_done_packet *pkt = _pkt; + struct msm_vidc_cb_cmd_done cmd_done = {0}; + + if (!pkt || pkt->size != + sizeof(struct hfi_msg_session_release_resources_done_packet)) { + d_vpr_e("%s: bad packet/packet size\n", __func__); + return -E2BIG; + } + s_vpr_h(pkt->sid, "RECEIVED: SESSION_RELEASE_RESOURCES_DONE\n"); + + cmd_done.device_id = device_id; + cmd_done.inst_id = (void *)(uintptr_t)pkt->sid; + cmd_done.status = hfi_map_err_status(pkt->error_type); + cmd_done.size = 0; + + info->response_type = HAL_SESSION_RELEASE_RESOURCE_DONE; + info->response.cmd = cmd_done; + + return 0; +} + +static int hfi_process_session_rel_buf_done(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_session_release_buffers_done_packet *pkt = _pkt; + struct msm_vidc_cb_cmd_done cmd_done = {0}; + + if (!pkt || pkt->size < + sizeof(struct hfi_msg_session_release_buffers_done_packet)) { + d_vpr_e("bad packet/packet size %d\n", + pkt ? pkt->size : 0); + return -E2BIG; + } + s_vpr_h(pkt->sid, "RECEIVED:SESSION_RELEASE_BUFFER_DONE\n"); + + cmd_done.device_id = device_id; + cmd_done.size = sizeof(struct msm_vidc_cb_cmd_done); + cmd_done.inst_id = (void *)(uintptr_t)pkt->sid; + cmd_done.status = hfi_map_err_status(pkt->error_type); + cmd_done.data.buffer_info.buffer_addr = *pkt->rg_buffer_info; + cmd_done.size = sizeof(struct hal_buffer_info); + + info->response_type = HAL_SESSION_RELEASE_BUFFER_DONE; + info->response.cmd = cmd_done; + + return 0; +} + +static int hfi_process_session_end_done(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_sys_session_end_done_packet *pkt = _pkt; + struct msm_vidc_cb_cmd_done cmd_done = {0}; + + if (!pkt || pkt->size != + sizeof(struct hfi_msg_sys_session_end_done_packet)) { + d_vpr_e("%s: bad packet/packet size\n", __func__); + return -E2BIG; + } + s_vpr_h(pkt->sid, "RECEIVED: SESSION_END_DONE\n"); + + cmd_done.device_id = device_id; + cmd_done.inst_id = (void *)(uintptr_t)pkt->sid; + cmd_done.status = hfi_map_err_status(pkt->error_type); + cmd_done.size = 0; + + info->response_type = HAL_SESSION_END_DONE; + info->response.cmd = cmd_done; + + return 0; +} + +static int hfi_process_session_abort_done(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_sys_session_abort_done_packet *pkt = _pkt; + struct msm_vidc_cb_cmd_done cmd_done = {0}; + + if (!pkt || pkt->size != + sizeof(struct hfi_msg_sys_session_abort_done_packet)) { + d_vpr_e("%s: bad packet/packet size: %d\n", + __func__, pkt ? pkt->size : 0); + return -E2BIG; + } + s_vpr_h(pkt->sid, "RECEIVED: SESSION_ABORT_DONE\n"); + cmd_done.device_id = device_id; + cmd_done.inst_id = (void *)(uintptr_t)pkt->sid; + cmd_done.status = hfi_map_err_status(pkt->error_type); + cmd_done.size = 0; + + info->response_type = HAL_SESSION_ABORT_DONE; + info->response.cmd = cmd_done; + + return 0; +} + +static int hfi_process_sys_ping_ack(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_sys_ping_ack_pkt *pkt = _pkt; + struct msm_vidc_cb_cmd_done cmd_done = {0}; + if (!pkt || pkt->size != + sizeof(struct hfi_msg_sys_ping_ack_pkt)) { + d_vpr_e("%s: bad packet/packet size: %d\n", + __func__, pkt ? pkt->size : 0); + return -E2BIG; + } + s_vpr_h(pkt->sid, "RECEIVED: SYS PING ACK\n"); + cmd_done.device_id = device_id; + cmd_done.inst_id = (void *)(uintptr_t)pkt->sid; + cmd_done.size = 0; + + info->response_type = HAL_SYS_PING_ACK; + info->response.cmd = cmd_done; + + return 0; +} + +static void hfi_process_sys_get_prop_image_version( + struct hfi_msg_sys_property_info_packet *pkt) +{ + u32 i = 0; + size_t smem_block_size = 0; + u8 *smem_table_ptr; + char version[256]; + const u32 version_string_size = 128; + const u32 smem_image_index_venus = 14 * 128; + u8 *str_image_version; + u32 req_bytes; + + req_bytes = pkt->size - sizeof(*pkt); + if (req_bytes < version_string_size || + !pkt->rg_property_data[1] || + pkt->num_properties > 1) { + d_vpr_e("%s: bad_pkt: %d\n", __func__, req_bytes); + return; + } + str_image_version = (u8 *)&pkt->rg_property_data[1]; + /* + * The version string returned by firmware includes null + * characters at the start and in between. Replace the null + * characters with space, to print the version info. + */ + for (i = 0; i < version_string_size; i++) { + if (str_image_version[i] != '\0') + version[i] = str_image_version[i]; + else + version[i] = ' '; + } + version[i] = '\0'; + d_vpr_h("F/W version: %s\n", version); + + smem_table_ptr = qcom_smem_get(QCOM_SMEM_HOST_ANY, + SMEM_IMAGE_VERSION_TABLE, &smem_block_size); + if ((smem_image_index_venus + version_string_size) <= smem_block_size && + smem_table_ptr) + memcpy(smem_table_ptr + smem_image_index_venus, + str_image_version, version_string_size); +} + +static int hfi_process_sys_property_info(u32 device_id, + void *_pkt, + struct msm_vidc_cb_info *info) +{ + struct hfi_msg_sys_property_info_packet *pkt = _pkt; + if (!pkt) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } else if (pkt->size < sizeof(*pkt)) { + d_vpr_e("%s: bad_pkt_size\n", __func__); + return -E2BIG; + } else if (!pkt->num_properties) { + d_vpr_e("%s: no_properties\n", __func__); + return -EINVAL; + } + + switch (pkt->rg_property_data[0]) { + case HFI_PROPERTY_SYS_IMAGE_VERSION: + hfi_process_sys_get_prop_image_version(pkt); + + *info = (struct msm_vidc_cb_info) { + .response_type = HAL_RESPONSE_UNUSED, + }; + return 0; + default: + d_vpr_h("%s: unknown_prop_id: %x\n", + __func__, pkt->rg_property_data[0]); + return -ENOTSUPP; + } + +} + +int hfi_process_msg_packet(u32 device_id, struct vidc_hal_msg_pkt_hdr *msg_hdr, + struct msm_vidc_cb_info *info) +{ + typedef int (*pkt_func_def)(u32, void *, struct msm_vidc_cb_info *info); + pkt_func_def pkt_func = NULL; + + if (!info || !msg_hdr || msg_hdr->size < VIDC_IFACEQ_MIN_PKT_SIZE) { + d_vpr_e("%s: bad packet/packet size\n", __func__); + return -EINVAL; + } + + switch (msg_hdr->packet) { + case HFI_MSG_EVENT_NOTIFY: + pkt_func = (pkt_func_def)hfi_process_event_notify; + break; + case HFI_MSG_SYS_INIT_DONE: + pkt_func = (pkt_func_def)hfi_process_sys_init_done; + break; + case HFI_MSG_SYS_SESSION_INIT_DONE: + pkt_func = (pkt_func_def)hfi_process_session_init_done; + break; + case HFI_MSG_SYS_PROPERTY_INFO: + pkt_func = (pkt_func_def)hfi_process_sys_property_info; + break; + case HFI_MSG_SYS_SESSION_END_DONE: + pkt_func = (pkt_func_def)hfi_process_session_end_done; + break; + case HFI_MSG_SESSION_LOAD_RESOURCES_DONE: + pkt_func = (pkt_func_def)hfi_process_session_load_res_done; + break; + case HFI_MSG_SESSION_START_DONE: + pkt_func = (pkt_func_def)hfi_process_session_start_done; + break; + case HFI_MSG_SESSION_STOP_DONE: + pkt_func = (pkt_func_def)hfi_process_session_stop_done; + break; + case HFI_MSG_SESSION_EMPTY_BUFFER_DONE: + pkt_func = (pkt_func_def)hfi_process_session_etb_done; + break; + case HFI_MSG_SESSION_FILL_BUFFER_DONE: + pkt_func = (pkt_func_def)hfi_process_session_ftb_done; + break; + case HFI_MSG_SESSION_FLUSH_DONE: + pkt_func = (pkt_func_def)hfi_process_session_flush_done; + break; + case HFI_MSG_SESSION_PROPERTY_INFO: + pkt_func = (pkt_func_def)hfi_process_session_prop_info; + break; + case HFI_MSG_SESSION_RELEASE_RESOURCES_DONE: + pkt_func = (pkt_func_def)hfi_process_session_rel_res_done; + break; + case HFI_MSG_SYS_RELEASE_RESOURCE: + pkt_func = (pkt_func_def)hfi_process_sys_rel_resource_done; + break; + case HFI_MSG_SESSION_RELEASE_BUFFERS_DONE: + pkt_func = (pkt_func_def)hfi_process_session_rel_buf_done; + break; + case HFI_MSG_SYS_SESSION_ABORT_DONE: + pkt_func = (pkt_func_def)hfi_process_session_abort_done; + break; + case HFI_MSG_SYS_PING_ACK: + pkt_func = (pkt_func_def)hfi_process_sys_ping_ack; + break; + default: + d_vpr_l("Unable to parse message: %#x\n", msg_hdr->packet); + break; + } + + return pkt_func ? + pkt_func(device_id, (void *)msg_hdr, info) : -ENOTSUPP; +} diff --git a/techpack/video/msm/vidc/msm_smem.c b/techpack/video/msm/vidc/msm_smem.c new file mode 100644 index 000000000000..5dfa1e70a089 --- /dev/null +++ b/techpack/video/msm/vidc/msm_smem.c @@ -0,0 +1,684 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include "msm_vidc.h" +#include "msm_vidc_debug.h" +#include "msm_vidc_resources.h" + +static int msm_dma_get_device_address(struct dma_buf *dbuf, unsigned long align, + dma_addr_t *iova, unsigned long *buffer_size, + unsigned long flags, enum hal_buffer buffer_type, + unsigned long session_type, struct msm_vidc_platform_resources *res, + struct dma_mapping_info *mapping_info, u32 sid) +{ + int rc = 0; + struct dma_buf_attachment *attach; + struct sg_table *table = NULL; + struct context_bank_info *cb = NULL; + + if (!dbuf || !iova || !buffer_size || !mapping_info) { + s_vpr_e(sid, "%s: invalid params: %pK, %pK, %pK, %pK\n", + __func__, dbuf, iova, buffer_size, mapping_info); + return -EINVAL; + } + + if (is_iommu_present(res)) { + cb = msm_smem_get_context_bank( + session_type, (flags & SMEM_SECURE), + res, buffer_type, sid); + if (!cb) { + s_vpr_e(sid, "%s: Failed to get context bank device\n", + __func__); + rc = -EIO; + goto mem_map_failed; + } + + /* Check if the dmabuf size matches expected size */ + if (dbuf->size < *buffer_size) { + rc = -EINVAL; + s_vpr_e(sid, + "Size mismatch: Dmabuf size: %zu Expected Size: %lu", + dbuf->size, *buffer_size); + msm_vidc_res_handle_fatal_hw_error(res, + true); + goto mem_buf_size_mismatch; + } + + /* Prepare a dma buf for dma on the given device */ + attach = dma_buf_attach(dbuf, cb->dev); + if (IS_ERR_OR_NULL(attach)) { + rc = PTR_ERR(attach) ? PTR_ERR(attach) : -ENOMEM; + s_vpr_e(sid, "Failed to attach dmabuf\n"); + goto mem_buf_attach_failed; + } + + /* + * Get the scatterlist for the given attachment + * Mapping of sg is taken care by map attachment + */ + attach->dma_map_attrs = DMA_ATTR_DELAYED_UNMAP; + /* + * We do not need dma_map function to perform cache operations + * on the whole buffer size and hence pass skip sync flag. + * We do the required cache operations separately for the + * required buffer size + */ + attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; + if (res->sys_cache_present) + attach->dma_map_attrs |= + DMA_ATTR_IOMMU_USE_UPSTREAM_HINT; + + table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(table)) { + rc = PTR_ERR(table) ? PTR_ERR(table) : -ENOMEM; + s_vpr_e(sid, "Failed to map table\n"); + goto mem_map_table_failed; + } + + /* debug trace's need to be updated later */ + trace_msm_smem_buffer_iommu_op_start("MAP", 0, 0, + align, *iova, *buffer_size); + + if (table->sgl) { + *iova = table->sgl->dma_address; + *buffer_size = table->sgl->dma_length; + } else { + s_vpr_e(sid, "sgl is NULL\n"); + rc = -ENOMEM; + goto mem_map_sg_failed; + } + + mapping_info->dev = cb->dev; + mapping_info->domain = cb->domain; + mapping_info->table = table; + mapping_info->attach = attach; + mapping_info->buf = dbuf; + mapping_info->cb_info = (void *)cb; + + trace_msm_smem_buffer_iommu_op_end("MAP", 0, 0, + align, *iova, *buffer_size); + } else { + s_vpr_h(sid, "iommu not present, use phys mem addr\n"); + } + + return 0; +mem_map_sg_failed: + dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL); +mem_map_table_failed: + dma_buf_detach(dbuf, attach); +mem_buf_size_mismatch: +mem_buf_attach_failed: +mem_map_failed: + return rc; +} + +static int msm_dma_put_device_address(u32 flags, + struct dma_mapping_info *mapping_info, + enum hal_buffer buffer_type, u32 sid) +{ + int rc = 0; + struct sg_table *table = NULL; + dma_addr_t iova; + unsigned long buffer_size; + + if (!mapping_info) { + s_vpr_e(sid, "Invalid mapping_info\n"); + return -EINVAL; + } + + if (!mapping_info->dev || !mapping_info->table || + !mapping_info->buf || !mapping_info->attach || + !mapping_info->cb_info) { + s_vpr_e(sid, "%s: invalid params\n", __func__); + return -EINVAL; + } + + table = mapping_info->table; + iova = table->sgl->dma_address; + buffer_size = table->sgl->dma_length; + trace_msm_smem_buffer_iommu_op_start("UNMAP", 0, 0, + 0, iova, buffer_size); + dma_buf_unmap_attachment(mapping_info->attach, + mapping_info->table, DMA_BIDIRECTIONAL); + dma_buf_detach(mapping_info->buf, mapping_info->attach); + trace_msm_smem_buffer_iommu_op_end("UNMAP", 0, 0, 0, 0, 0); + + mapping_info->dev = NULL; + mapping_info->domain = NULL; + mapping_info->table = NULL; + mapping_info->attach = NULL; + mapping_info->buf = NULL; + mapping_info->cb_info = NULL; + + return rc; +} + +struct dma_buf *msm_smem_get_dma_buf(int fd, u32 sid) +{ + struct dma_buf *dma_buf; + + dma_buf = dma_buf_get(fd); + if (IS_ERR_OR_NULL(dma_buf)) { + s_vpr_e(sid, "Failed to get dma_buf for %d, error %ld\n", + fd, PTR_ERR(dma_buf)); + dma_buf = NULL; + } + + return dma_buf; +} + +void msm_smem_put_dma_buf(void *dma_buf, u32 sid) +{ + if (!dma_buf) { + s_vpr_e(sid, "%s: NULL dma_buf\n", __func__); + return; + } + + dma_buf_put((struct dma_buf *)dma_buf); +} + +int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem) +{ + int rc = 0; + + dma_addr_t iova = 0; + u32 temp = 0; + unsigned long buffer_size = 0; + unsigned long align = SZ_4K; + struct dma_buf *dbuf; + unsigned long ion_flags = 0; + u32 b_type = HAL_BUFFER_INPUT | HAL_BUFFER_OUTPUT | HAL_BUFFER_OUTPUT2; + + if (!inst || !smem) { + d_vpr_e("%s: invalid params: %pK %pK\n", + __func__, inst, smem); + rc = -EINVAL; + goto exit; + } + + if (smem->refcount) { + smem->refcount++; + goto exit; + } + + dbuf = msm_smem_get_dma_buf(smem->fd, inst->sid); + if (!dbuf) { + rc = -EINVAL; + goto exit; + } + + smem->dma_buf = dbuf; + + rc = dma_buf_get_flags(dbuf, &ion_flags); + if (rc) { + s_vpr_e(inst->sid, "Failed to get dma buf flags: %d\n", rc); + goto fail_map_dma_buf; + } + if (ion_flags & ION_FLAG_CACHED) + smem->flags |= SMEM_CACHED; + + if (ion_flags & ION_FLAG_SECURE) + smem->flags |= SMEM_SECURE; + + if ((smem->buffer_type & b_type) && + !!(smem->flags & SMEM_SECURE) ^ !!(inst->flags & VIDC_SECURE)) { + s_vpr_e(inst->sid, "Failed to map %s buffer with %s session\n", + smem->flags & SMEM_SECURE ? "secure" : "non-secure", + inst->flags & VIDC_SECURE ? "secure" : "non-secure"); + rc = -EINVAL; + goto fail_map_dma_buf; + } + buffer_size = smem->size; + + rc = msm_dma_get_device_address(dbuf, align, &iova, &buffer_size, + smem->flags, smem->buffer_type, inst->session_type, + &(inst->core->resources), &smem->mapping_info, + inst->sid); + if (rc) { + s_vpr_e(inst->sid, "Failed to get device address: %d\n", rc); + goto fail_map_dma_buf; + } + temp = (u32)iova; + if ((dma_addr_t)temp != iova) { + s_vpr_e(inst->sid, "iova(%pa) truncated to %#x", &iova, temp); + rc = -EINVAL; + goto fail_iova_truncation; + } + + smem->device_addr = (u32)iova + smem->offset; + + smem->refcount++; + return 0; + +fail_iova_truncation: + msm_dma_put_device_address(smem->flags, &smem->mapping_info, + smem->buffer_type, inst->sid); +fail_map_dma_buf: + msm_smem_put_dma_buf(dbuf, inst->sid); +exit: + return rc; +} + +int msm_smem_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem) +{ + int rc = 0; + + if (!inst || !smem) { + d_vpr_e("%s: invalid params: %pK %pK\n", + __func__, inst, smem); + rc = -EINVAL; + goto exit; + } + + if (smem->refcount) { + smem->refcount--; + } else { + s_vpr_e(inst->sid, + "unmap called while refcount is zero already\n"); + return -EINVAL; + } + + if (smem->refcount) + goto exit; + + rc = msm_dma_put_device_address(smem->flags, &smem->mapping_info, + smem->buffer_type, inst->sid); + if (rc) { + s_vpr_e(inst->sid, "Failed to put device address: %d\n", rc); + goto exit; + } + + msm_smem_put_dma_buf(smem->dma_buf, inst->sid); + + smem->device_addr = 0x0; + smem->dma_buf = NULL; + +exit: + return rc; +} + +static int get_secure_flag_for_buffer_type( + u32 session_type, enum hal_buffer buffer_type) +{ + switch (buffer_type) { + case HAL_BUFFER_INPUT: + if (session_type == MSM_VIDC_ENCODER) + return ION_FLAG_CP_PIXEL; + else + return ION_FLAG_CP_BITSTREAM; + case HAL_BUFFER_OUTPUT: + case HAL_BUFFER_OUTPUT2: + if (session_type == MSM_VIDC_ENCODER) + return ION_FLAG_CP_BITSTREAM; + else + return ION_FLAG_CP_PIXEL; + case HAL_BUFFER_INTERNAL_SCRATCH: + return ION_FLAG_CP_BITSTREAM; + case HAL_BUFFER_INTERNAL_SCRATCH_1: + return ION_FLAG_CP_NON_PIXEL; + case HAL_BUFFER_INTERNAL_SCRATCH_2: + return ION_FLAG_CP_PIXEL; + case HAL_BUFFER_INTERNAL_PERSIST: + if (session_type == MSM_VIDC_ENCODER) + return ION_FLAG_CP_NON_PIXEL; + else + return ION_FLAG_CP_BITSTREAM; + case HAL_BUFFER_INTERNAL_PERSIST_1: + return ION_FLAG_CP_NON_PIXEL; + default: + WARN(1, "No matching secure flag for buffer type : %x\n", + buffer_type); + return -EINVAL; + } +} + +static int alloc_dma_mem(size_t size, u32 align, u32 flags, + enum hal_buffer buffer_type, int map_kernel, + struct msm_vidc_platform_resources *res, u32 session_type, + struct msm_smem *mem, u32 sid) +{ + dma_addr_t iova = 0; + unsigned long buffer_size = 0; + unsigned long heap_mask = 0; + int rc = 0; + int ion_flags = 0; + struct dma_buf *dbuf = NULL; + + if (!res) { + s_vpr_e(sid, "%s: NULL res\n", __func__); + return -EINVAL; + } + + align = ALIGN(align, SZ_4K); + size = ALIGN(size, SZ_4K); + + if (is_iommu_present(res)) { + if (flags & SMEM_ADSP) { + s_vpr_h(sid, "Allocating from ADSP heap\n"); + heap_mask = ION_HEAP(ION_ADSP_HEAP_ID); + } else { + heap_mask = ION_HEAP(ION_SYSTEM_HEAP_ID); + } + } else { + s_vpr_h(sid, + "allocate shared memory from adsp heap size %zx align %d\n", + size, align); + heap_mask = ION_HEAP(ION_ADSP_HEAP_ID); + } + + if (flags & SMEM_CACHED) + ion_flags |= ION_FLAG_CACHED; + + if ((flags & SMEM_SECURE) || + (buffer_type == HAL_BUFFER_INTERNAL_PERSIST && + session_type == MSM_VIDC_ENCODER)) { + int secure_flag = + get_secure_flag_for_buffer_type( + session_type, buffer_type); + if (secure_flag < 0) { + rc = secure_flag; + goto fail_shared_mem_alloc; + } + + ion_flags |= ION_FLAG_SECURE | secure_flag; + heap_mask = ION_HEAP(ION_SECURE_HEAP_ID); + + if (res->slave_side_cp) { + heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID); + size = ALIGN(size, SZ_1M); + align = ALIGN(size, SZ_1M); + } + flags |= SMEM_SECURE; + } + + trace_msm_smem_buffer_dma_op_start("ALLOC", (u32)buffer_type, + heap_mask, size, align, flags, map_kernel); + dbuf = ion_alloc(size, heap_mask, ion_flags); + if (IS_ERR_OR_NULL(dbuf)) { + s_vpr_e(sid, "Failed to allocate shared memory = %zx, %#x\n", + size, flags); + rc = -ENOMEM; + goto fail_shared_mem_alloc; + } + trace_msm_smem_buffer_dma_op_end("ALLOC", (u32)buffer_type, + heap_mask, size, align, flags, map_kernel); + + mem->flags = flags; + mem->buffer_type = buffer_type; + mem->offset = 0; + mem->size = size; + mem->dma_buf = dbuf; + mem->kvaddr = NULL; + + rc = msm_dma_get_device_address(dbuf, align, &iova, + &buffer_size, flags, buffer_type, + session_type, res, &mem->mapping_info, sid); + if (rc) { + s_vpr_e(sid, "Failed to get device address: %d\n", + rc); + goto fail_device_address; + } + mem->device_addr = (u32)iova; + if ((dma_addr_t)mem->device_addr != iova) { + s_vpr_e(sid, "iova(%pa) truncated to %#x", + &iova, mem->device_addr); + goto fail_device_address; + } + + if (map_kernel) { + dma_buf_begin_cpu_access(dbuf, DMA_BIDIRECTIONAL); + mem->kvaddr = dma_buf_vmap(dbuf); + if (!mem->kvaddr) { + s_vpr_e(sid, "Failed to map shared mem in kernel\n"); + rc = -EIO; + goto fail_map; + } + } + + s_vpr_h(sid, + "%s: dma_buf = %pK, inode = %lu, ref = %ld, device_addr = %x, size = %d, kvaddr = %pK, buffer_type = %#x, flags = %#lx\n", + __func__, mem->dma_buf, (dbuf ? file_inode(dbuf->file)->i_ino : -1), + (dbuf ? file_count(dbuf->file) : -1), mem->device_addr, mem->size, + mem->kvaddr, mem->buffer_type, mem->flags); + return rc; + +fail_map: + if (map_kernel) + dma_buf_end_cpu_access(dbuf, DMA_BIDIRECTIONAL); +fail_device_address: + dma_buf_put(dbuf); +fail_shared_mem_alloc: + return rc; +} + +static int free_dma_mem(struct msm_smem *mem, u32 sid) +{ + struct dma_buf *dbuf = NULL; + + dbuf = (struct dma_buf *)mem->dma_buf; + s_vpr_h(sid, + "%s: dma_buf = %pK, inode = %lu, ref = %ld, device_addr = %x, size = %d, kvaddr = %pK, buffer_type = %#x\n", + __func__, dbuf, (dbuf ? file_inode(dbuf->file)->i_ino : -1), + (dbuf ? file_count(dbuf->file) : -1), mem->device_addr, + mem->size, mem->kvaddr, mem->buffer_type); + + if (mem->device_addr) { + msm_dma_put_device_address(mem->flags, + &mem->mapping_info, mem->buffer_type, sid); + mem->device_addr = 0x0; + } + + if (mem->kvaddr) { + dma_buf_vunmap(dbuf, mem->kvaddr); + mem->kvaddr = NULL; + dma_buf_end_cpu_access(dbuf, DMA_BIDIRECTIONAL); + } + + if (dbuf) { + trace_msm_smem_buffer_dma_op_start("FREE", + (u32)mem->buffer_type, -1, mem->size, -1, + mem->flags, -1); + dma_buf_put(dbuf); + mem->dma_buf = NULL; + trace_msm_smem_buffer_dma_op_end("FREE", (u32)mem->buffer_type, + -1, mem->size, -1, mem->flags, -1); + } + + return 0; +} + +int msm_smem_alloc(size_t size, u32 align, u32 flags, + enum hal_buffer buffer_type, int map_kernel, + void *res, u32 session_type, struct msm_smem *smem, u32 sid) +{ + int rc = 0; + + if (!smem || !size) { + s_vpr_e(sid, "%s: NULL smem or %d size\n", + __func__, (u32)size); + return -EINVAL; + } + + rc = alloc_dma_mem(size, align, flags, buffer_type, map_kernel, + (struct msm_vidc_platform_resources *)res, + session_type, smem, sid); + + return rc; +} + +int msm_smem_free(struct msm_smem *smem, u32 sid) +{ + int rc = 0; + + if (!smem) { + s_vpr_e(sid, "NULL smem passed\n"); + return -EINVAL; + } + rc = free_dma_mem(smem, sid); + + return rc; +}; + +int msm_smem_cache_operations(struct dma_buf *dbuf, + enum smem_cache_ops cache_op, unsigned long offset, + unsigned long size, u32 sid) +{ + int rc = 0; + unsigned long flags = 0; + + if (!dbuf) { + s_vpr_e(sid, "%s: invalid params\n", __func__); + return -EINVAL; + } + + /* Return if buffer doesn't support caching */ + rc = dma_buf_get_flags(dbuf, &flags); + if (rc) { + s_vpr_e(sid, "%s: dma_buf_get_flags failed, err %d\n", + __func__, rc); + return rc; + } else if (!(flags & ION_FLAG_CACHED)) { + return rc; + } + + switch (cache_op) { + case SMEM_CACHE_CLEAN: + case SMEM_CACHE_CLEAN_INVALIDATE: + rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE, + offset, size); + if (rc) + break; + rc = dma_buf_end_cpu_access_partial(dbuf, DMA_TO_DEVICE, + offset, size); + break; + case SMEM_CACHE_INVALIDATE: + rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE, + offset, size); + if (rc) + break; + rc = dma_buf_end_cpu_access_partial(dbuf, DMA_FROM_DEVICE, + offset, size); + break; + default: + s_vpr_e(sid, "%s: cache (%d) operation not supported\n", + __func__, cache_op); + rc = -EINVAL; + break; + } + + return rc; +} + +struct context_bank_info *msm_smem_get_context_bank(u32 session_type, + bool is_secure, struct msm_vidc_platform_resources *res, + enum hal_buffer buffer_type, u32 sid) +{ + struct context_bank_info *cb = NULL, *match = NULL; + + /* + * HAL_BUFFER_INPUT is directly mapped to bitstream CB in DT + * as the buffer type structure was initially designed + * just for decoder. For Encoder, input should be mapped to + * yuvpixel CB. Persist is mapped to nonpixel CB. + * So swap the buffer types just in this local scope. + */ + if (is_secure && session_type == MSM_VIDC_ENCODER) { + if (buffer_type == HAL_BUFFER_INPUT) + buffer_type = HAL_BUFFER_OUTPUT; + else if (buffer_type == HAL_BUFFER_OUTPUT) + buffer_type = HAL_BUFFER_INPUT; + else if (buffer_type == HAL_BUFFER_INTERNAL_PERSIST) + buffer_type = HAL_BUFFER_INTERNAL_PERSIST_1; + } + + mutex_lock(&res->cb_lock); + list_for_each_entry(cb, &res->context_banks, list) { + if (cb->is_secure == is_secure && + cb->buffer_type & buffer_type) { + match = cb; + break; + } + } + mutex_unlock(&res->cb_lock); + if (!match) + s_vpr_e(sid, + "%s: cb not found for buffer_type %x, is_secure %d\n", + __func__, buffer_type, is_secure); + + return match; +} + +int msm_smem_memory_prefetch(struct msm_vidc_inst *inst) +{ + int i, rc = 0; + struct memory_regions *vidc_regions = NULL; + struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX]; + + if (!inst) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + vidc_regions = &inst->regions; + if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) { + s_vpr_e(inst->sid, "%s: invalid num_regions %d, max %d\n", + __func__, vidc_regions->num_regions, + MEMORY_REGIONS_MAX); + return -EINVAL; + } + + memset(ion_region, 0, sizeof(ion_region)); + for (i = 0; i < vidc_regions->num_regions; i++) { + ion_region[i].size = vidc_regions->region[i].size; + ion_region[i].vmid = vidc_regions->region[i].vmid; + } + + rc = msm_ion_heap_prefetch(ION_SECURE_HEAP_ID, ion_region, + vidc_regions->num_regions); + if (rc) + s_vpr_e(inst->sid, "%s: prefetch failed, ret: %d\n", + __func__, rc); + else + s_vpr_l(inst->sid, "%s: prefetch succeeded\n", __func__); + + return rc; +} + +int msm_smem_memory_drain(struct msm_vidc_inst *inst) +{ + int i, rc = 0; + struct memory_regions *vidc_regions = NULL; + struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX]; + + if (!inst) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + vidc_regions = &inst->regions; + if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) { + s_vpr_e(inst->sid, "%s: invalid num_regions %d, max %d\n", + __func__, vidc_regions->num_regions, + MEMORY_REGIONS_MAX); + return -EINVAL; + } + + memset(ion_region, 0, sizeof(ion_region)); + for (i = 0; i < vidc_regions->num_regions; i++) { + ion_region[i].size = vidc_regions->region[i].size; + ion_region[i].vmid = vidc_regions->region[i].vmid; + } + + rc = msm_ion_heap_drain(ION_SECURE_HEAP_ID, ion_region, + vidc_regions->num_regions); + if (rc) + s_vpr_e(inst->sid, "%s: drain failed, ret: %d\n", __func__, rc); + else + s_vpr_l(inst->sid, "%s: drain succeeded\n", __func__); + + return rc; +} diff --git a/techpack/video/msm/vidc/msm_v4l2_vidc.c b/techpack/video/msm/vidc/msm_v4l2_vidc.c new file mode 100644 index 000000000000..873b1a2053da --- /dev/null +++ b/techpack/video/msm/vidc/msm_v4l2_vidc.c @@ -0,0 +1,769 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include "msm_vidc.h" +#include "msm_vidc_common.h" +#include "msm_vidc_debug.h" +#include "msm_vidc_internal.h" +#include "msm_vidc_res_parse.h" +#include "msm_vidc_resources.h" +#include "vidc_hfi_api.h" +#include "msm_vidc_clocks.h" + +#define BASE_DEVICE_NUMBER 32 + +struct msm_vidc_drv *vidc_driver; + + +static inline struct msm_vidc_inst *get_vidc_inst(struct file *filp, void *fh) +{ + if (!filp->private_data) + return NULL; + return container_of(filp->private_data, + struct msm_vidc_inst, event_handler); +} + +static int msm_v4l2_open(struct file *filp) +{ + struct video_device *vdev = video_devdata(filp); + struct msm_video_device *vid_dev = + container_of(vdev, struct msm_video_device, vdev); + struct msm_vidc_core *core = video_drvdata(filp); + struct msm_vidc_inst *vidc_inst; + + trace_msm_v4l2_vidc_open_start("msm v4l2_open start"); + vidc_inst = msm_vidc_open(core->id, vid_dev->type); + if (!vidc_inst) { + d_vpr_e("Failed to create instance, core: %d, type = %d\n", + core->id, vid_dev->type); + return -ENOMEM; + } + clear_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags); + filp->private_data = &(vidc_inst->event_handler); + trace_msm_v4l2_vidc_open_end("msm v4l2_open end"); + return 0; +} + +static int msm_v4l2_close(struct file *filp) +{ + int rc = 0; + struct msm_vidc_inst *vidc_inst; + + trace_msm_v4l2_vidc_close_start("msm v4l2_close start"); + vidc_inst = get_vidc_inst(filp, NULL); + + rc = msm_vidc_close(vidc_inst); + filp->private_data = NULL; + trace_msm_v4l2_vidc_close_end("msm v4l2_close end"); + return rc; +} + +static int msm_v4l2_querycap(struct file *filp, void *fh, + struct v4l2_capability *cap) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(filp, fh); + + return msm_vidc_querycap((void *)vidc_inst, cap); +} + +int msm_v4l2_enum_fmt(struct file *file, void *fh, + struct v4l2_fmtdesc *f) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_enum_fmt((void *)vidc_inst, f); +} + +int msm_v4l2_s_fmt(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_s_fmt((void *)vidc_inst, f); +} + +int msm_v4l2_g_fmt(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_g_fmt((void *)vidc_inst, f); +} + +int msm_v4l2_s_ctrl(struct file *file, void *fh, + struct v4l2_control *a) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_s_ctrl((void *)vidc_inst, a); +} + +int msm_v4l2_g_ctrl(struct file *file, void *fh, + struct v4l2_control *a) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_g_ctrl((void *)vidc_inst, a); +} + +int msm_v4l2_reqbufs(struct file *file, void *fh, + struct v4l2_requestbuffers *b) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_reqbufs((void *)vidc_inst, b); +} + +int msm_v4l2_qbuf(struct file *file, void *fh, + struct v4l2_buffer *b) +{ + struct video_device *vdev = video_devdata(file); + return msm_vidc_qbuf(get_vidc_inst(file, fh), vdev->v4l2_dev->mdev, b); +} + +int msm_v4l2_dqbuf(struct file *file, void *fh, + struct v4l2_buffer *b) +{ + return msm_vidc_dqbuf(get_vidc_inst(file, fh), b); +} + +int msm_v4l2_streamon(struct file *file, void *fh, + enum v4l2_buf_type i) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_streamon((void *)vidc_inst, i); +} + +int msm_v4l2_streamoff(struct file *file, void *fh, + enum v4l2_buf_type i) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_streamoff((void *)vidc_inst, i); +} + +static int msm_v4l2_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + struct msm_vidc_inst *vidc_inst = container_of(fh, + struct msm_vidc_inst, event_handler); + + return msm_vidc_subscribe_event((void *)vidc_inst, sub); +} + +static int msm_v4l2_unsubscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + struct msm_vidc_inst *vidc_inst = container_of(fh, + struct msm_vidc_inst, event_handler); + + return msm_vidc_unsubscribe_event((void *)vidc_inst, sub); +} + +static int msm_v4l2_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dec) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_comm_cmd((void *)vidc_inst, (union msm_v4l2_cmd *)dec); +} + +static int msm_v4l2_encoder_cmd(struct file *file, void *fh, + struct v4l2_encoder_cmd *enc) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_comm_cmd((void *)vidc_inst, (union msm_v4l2_cmd *)enc); +} + +static int msm_v4l2_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_enum_framesizes((void *)vidc_inst, fsize); +} + +static int msm_v4l2_queryctrl(struct file *file, void *fh, + struct v4l2_queryctrl *ctrl) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_query_ctrl((void *)vidc_inst, ctrl); +} + +static int msm_v4l2_querymenu(struct file *file, void *fh, + struct v4l2_querymenu *qmenu) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_query_menu((void *)vidc_inst, qmenu); +} + +const struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = { + .vidioc_querycap = msm_v4l2_querycap, + .vidioc_enum_fmt_vid_cap = msm_v4l2_enum_fmt, + .vidioc_enum_fmt_vid_out = msm_v4l2_enum_fmt, + .vidioc_s_fmt_vid_cap_mplane = msm_v4l2_s_fmt, + .vidioc_s_fmt_vid_out_mplane = msm_v4l2_s_fmt, + .vidioc_g_fmt_vid_cap_mplane = msm_v4l2_g_fmt, + .vidioc_g_fmt_vid_out_mplane = msm_v4l2_g_fmt, + .vidioc_reqbufs = msm_v4l2_reqbufs, + .vidioc_qbuf = msm_v4l2_qbuf, + .vidioc_dqbuf = msm_v4l2_dqbuf, + .vidioc_streamon = msm_v4l2_streamon, + .vidioc_streamoff = msm_v4l2_streamoff, + .vidioc_s_ctrl = msm_v4l2_s_ctrl, + .vidioc_g_ctrl = msm_v4l2_g_ctrl, + .vidioc_queryctrl = msm_v4l2_queryctrl, + .vidioc_querymenu = msm_v4l2_querymenu, + .vidioc_subscribe_event = msm_v4l2_subscribe_event, + .vidioc_unsubscribe_event = msm_v4l2_unsubscribe_event, + .vidioc_decoder_cmd = msm_v4l2_decoder_cmd, + .vidioc_encoder_cmd = msm_v4l2_encoder_cmd, + .vidioc_enum_framesizes = msm_v4l2_enum_framesizes, +}; + +static const struct v4l2_ioctl_ops msm_v4l2_enc_ioctl_ops = { 0 }; + +static unsigned int msm_v4l2_poll(struct file *filp, + struct poll_table_struct *pt) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(filp, NULL); + + return msm_vidc_poll((void *)vidc_inst, filp, pt); +} + +static const struct v4l2_file_operations msm_v4l2_vidc_fops = { + .owner = THIS_MODULE, + .open = msm_v4l2_open, + .release = msm_v4l2_close, + .unlocked_ioctl = video_ioctl2, + .poll = msm_v4l2_poll, +}; + +void msm_vidc_release_video_device(struct video_device *pvdev) +{ +} + +static int read_platform_resources(struct msm_vidc_core *core, + struct platform_device *pdev) +{ + int rc = 0; + + if (!core || !pdev) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, core, pdev); + return -EINVAL; + } + if (!pdev->dev.of_node) { + d_vpr_e("%s: pdev node is NULL\n", __func__); + return -EINVAL; + } + + core->hfi_type = VIDC_HFI_VENUS; + core->resources.pdev = pdev; + /* Target supports DT, parse from it */ + rc = read_platform_resources_from_drv_data(core); + if (rc) { + d_vpr_e("%s: read platform resources from driver failed\n", + __func__); + return rc; + } + + rc = read_platform_resources_from_dt(&core->resources); + if (rc) { + d_vpr_e("%s: read platform resources from dt failed\n", + __func__); + return rc; + } + return 0; +} + +static int msm_vidc_initialize_core(struct platform_device *pdev, + struct msm_vidc_core *core) +{ + int i = 0; + int rc = 0; + + if (!core) + return -EINVAL; + rc = read_platform_resources(core, pdev); + if (rc) { + d_vpr_e("Failed to get platform resources\n"); + return rc; + } + + INIT_LIST_HEAD(&core->instances); + mutex_init(&core->lock); + mutex_init(&core->resources.cb_lock); + + core->state = VIDC_CORE_UNINIT; + for (i = SYS_MSG_INDEX(SYS_MSG_START); + i <= SYS_MSG_INDEX(SYS_MSG_END); i++) { + init_completion(&core->completions[i]); + } + + INIT_DELAYED_WORK(&core->fw_unload_work, msm_vidc_fw_unload_handler); + INIT_WORK(&core->ssr_work, msm_vidc_ssr_handler); + + msm_vidc_init_core_clk_ops(core); + return rc; +} + +static ssize_t link_name_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct msm_vidc_core *core = dev_get_drvdata(dev); + + if (core) + if (dev == &core->vdev[MSM_VIDC_DECODER].vdev.dev) + return snprintf(buf, PAGE_SIZE, "venus_dec"); + else if (dev == &core->vdev[MSM_VIDC_ENCODER].vdev.dev) + return snprintf(buf, PAGE_SIZE, "venus_enc"); + else + return 0; + else + return 0; +} + +static DEVICE_ATTR_RO(link_name); + +static ssize_t pwr_collapse_delay_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long val = 0; + int rc = 0; + struct msm_vidc_core *core = NULL; + + rc = kstrtoul(buf, 0, &val); + if (rc) + return rc; + else if (!val) + return -EINVAL; + + core = get_vidc_core(MSM_VIDC_CORE_VENUS); + if (!core) + return -EINVAL; + core->resources.msm_vidc_pwr_collapse_delay = val; + return count; +} + +static ssize_t pwr_collapse_delay_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct msm_vidc_core *core = NULL; + + core = get_vidc_core(MSM_VIDC_CORE_VENUS); + if (!core) + return -EINVAL; + + return snprintf(buf, PAGE_SIZE, "%u\n", + core->resources.msm_vidc_pwr_collapse_delay); +} + +static DEVICE_ATTR_RW(pwr_collapse_delay); + +static ssize_t thermal_level_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", vidc_driver->thermal_level); +} + +static ssize_t thermal_level_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int rc = 0, val = 0; + + rc = kstrtoint(buf, 0, &val); + if (rc || val < 0) { + d_vpr_e("Invalid thermal level value: %s\n", buf); + return -EINVAL; + } + d_vpr_h("Thermal level old %d new %d\n", + vidc_driver->thermal_level, val); + + if (val == vidc_driver->thermal_level) + return count; + vidc_driver->thermal_level = val; + + msm_comm_handle_thermal_event(); + return count; +} + +static DEVICE_ATTR_RW(thermal_level); + +static ssize_t sku_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d", + vidc_driver->sku_version); +} + +static DEVICE_ATTR_RO(sku_version); + +static struct attribute *msm_vidc_core_attrs[] = { + &dev_attr_pwr_collapse_delay.attr, + &dev_attr_thermal_level.attr, + &dev_attr_sku_version.attr, + NULL +}; + +static struct attribute_group msm_vidc_core_attr_group = { + .attrs = msm_vidc_core_attrs, +}; + +static const struct of_device_id msm_vidc_dt_match[] = { + {.compatible = "qcom,msm-vidc"}, + {.compatible = "qcom,msm-vidc,context-bank"}, + {.compatible = "qcom,msm-vidc,mem-cdsp"}, + {} +}; + +static int msm_vidc_register_video_device(enum session_type sess_type, + int nr, struct msm_vidc_core *core, struct device *dev) +{ + int rc = 0; + + core->vdev[sess_type].vdev.release = + msm_vidc_release_video_device; + core->vdev[sess_type].vdev.fops = &msm_v4l2_vidc_fops; + core->vdev[sess_type].vdev.ioctl_ops = &msm_v4l2_ioctl_ops; + core->vdev[sess_type].vdev.vfl_dir = VFL_DIR_M2M; + core->vdev[sess_type].type = sess_type; + core->vdev[sess_type].vdev.v4l2_dev = &core->v4l2_dev; + core->vdev[sess_type].vdev.device_caps = + V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE | + V4L2_CAP_STREAMING; + rc = video_register_device(&core->vdev[sess_type].vdev, + VFL_TYPE_GRABBER, nr); + if (rc) { + d_vpr_e("Failed to register the video device\n"); + return rc; + } + video_set_drvdata(&core->vdev[sess_type].vdev, core); + dev = &core->vdev[sess_type].vdev.dev; + rc = device_create_file(dev, &dev_attr_link_name); + if (rc) { + d_vpr_e("Failed to create video device file\n"); + video_unregister_device(&core->vdev[sess_type].vdev); + return rc; + } + return 0; +} +static int msm_vidc_probe_vidc_device(struct platform_device *pdev) +{ + int rc = 0; + struct msm_vidc_core *core; + struct device *dev = NULL; + int nr = BASE_DEVICE_NUMBER; + + if (!vidc_driver) { + d_vpr_e("Invalid vidc driver\n"); + return -EINVAL; + } + + core = kzalloc(sizeof(*core), GFP_KERNEL); + if (!core) + return -ENOMEM; + + core->platform_data = vidc_get_drv_data(&pdev->dev); + dev_set_drvdata(&pdev->dev, core); + rc = msm_vidc_initialize_core(pdev, core); + if (rc) { + d_vpr_e("Failed to init core\n"); + goto err_core_init; + } + rc = sysfs_create_group(&pdev->dev.kobj, &msm_vidc_core_attr_group); + if (rc) { + d_vpr_e("Failed to create attributes\n"); + goto err_core_init; + } + + core->id = MSM_VIDC_CORE_VENUS; + + vidc_driver->ctxt = kcalloc(core->platform_data->max_inst_count, + sizeof(*vidc_driver->ctxt), GFP_KERNEL); + if (!vidc_driver->ctxt) + goto err_vidc_context; + vidc_driver->num_ctxt = core->platform_data->max_inst_count; + + rc = v4l2_device_register(&pdev->dev, &core->v4l2_dev); + if (rc) { + d_vpr_e("Failed to register v4l2 device\n"); + goto err_v4l2_register; + } + + /* setup the decoder device */ + rc = msm_vidc_register_video_device(MSM_VIDC_DECODER, + nr, core, dev); + if (rc) { + d_vpr_e("Failed to register video decoder\n"); + goto err_dec; + } + + /* setup the encoder device */ + rc = msm_vidc_register_video_device(MSM_VIDC_ENCODER, + nr + 1, core, dev); + if (rc) { + d_vpr_e("Failed to register video encoder\n"); + goto err_enc; + } + + /* finish setting up the 'core' */ + mutex_lock(&vidc_driver->lock); + if (vidc_driver->num_cores + 1 > MSM_VIDC_CORES_MAX) { + mutex_unlock(&vidc_driver->lock); + d_vpr_e("Maximum cores already exist, core_no = %d\n", + vidc_driver->num_cores); + goto err_cores_exceeded; + } + vidc_driver->num_cores++; + mutex_unlock(&vidc_driver->lock); + + core->device = vidc_hfi_initialize(core->hfi_type, core->id, + &core->resources, &handle_cmd_response); + if (IS_ERR_OR_NULL(core->device)) { + mutex_lock(&vidc_driver->lock); + vidc_driver->num_cores--; + mutex_unlock(&vidc_driver->lock); + + rc = PTR_ERR(core->device) ? + PTR_ERR(core->device) : -EBADHANDLE; + if (rc != -EPROBE_DEFER) + d_vpr_e("Failed to create HFI device\n"); + else + d_vpr_h("msm_vidc: request probe defer\n"); + goto err_cores_exceeded; + } + + core->vidc_core_workq = create_singlethread_workqueue( + "vidc_core_workq"); + if (!core->vidc_core_workq) { + d_vpr_e("%s: create core workq failed\n", __func__); + goto err_core_workq; + } + mutex_lock(&vidc_driver->lock); + list_add_tail(&core->list, &vidc_driver->cores); + mutex_unlock(&vidc_driver->lock); + + core->debugfs_root = msm_vidc_debugfs_init_core( + core, vidc_driver->debugfs_root); + + vidc_driver->sku_version = core->resources.sku_version; + + d_vpr_h("populating sub devices\n"); + /* + * Trigger probe for each sub-device i.e. qcom,msm-vidc,context-bank. + * When msm_vidc_probe is called for each sub-device, parse the + * context-bank details and store it in core->resources.context_banks + * list. + */ + rc = of_platform_populate(pdev->dev.of_node, msm_vidc_dt_match, NULL, + &pdev->dev); + if (rc) { + d_vpr_e("Failed to trigger probe for sub-devices\n"); + goto err_fail_sub_device_probe; + } + + return rc; + +err_fail_sub_device_probe: + if (core->vidc_core_workq) + destroy_workqueue(core->vidc_core_workq); +err_core_workq: + vidc_hfi_deinitialize(core->hfi_type, core->device); +err_cores_exceeded: + device_remove_file(&core->vdev[MSM_VIDC_ENCODER].vdev.dev, + &dev_attr_link_name); + video_unregister_device(&core->vdev[MSM_VIDC_ENCODER].vdev); +err_enc: + device_remove_file(&core->vdev[MSM_VIDC_DECODER].vdev.dev, + &dev_attr_link_name); + video_unregister_device(&core->vdev[MSM_VIDC_DECODER].vdev); +err_dec: + v4l2_device_unregister(&core->v4l2_dev); +err_v4l2_register: + kfree(vidc_driver->ctxt); +err_vidc_context: + sysfs_remove_group(&pdev->dev.kobj, &msm_vidc_core_attr_group); +err_core_init: + dev_set_drvdata(&pdev->dev, NULL); + kfree(core); + return rc; +} + +static int msm_vidc_probe_mem_cdsp(struct platform_device *pdev) +{ + return read_mem_cdsp_resources_from_dt(pdev); +} + +static int msm_vidc_probe_context_bank(struct platform_device *pdev) +{ + return read_context_bank_resources_from_dt(pdev); +} + +static int msm_vidc_probe(struct platform_device *pdev) +{ + /* + * Sub devices probe will be triggered by of_platform_populate() towards + * the end of the probe function after msm-vidc device probe is + * completed. Return immediately after completing sub-device probe. + */ + if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-vidc")) { + return msm_vidc_probe_vidc_device(pdev); + } else if (of_device_is_compatible(pdev->dev.of_node, + "qcom,msm-vidc,context-bank")) { + return msm_vidc_probe_context_bank(pdev); + } else if (of_device_is_compatible(pdev->dev.of_node, + "qcom,msm-vidc,mem-cdsp")) { + return msm_vidc_probe_mem_cdsp(pdev); + } + + /* How did we end up here? */ + MSM_VIDC_ERROR(1); + return -EINVAL; +} + +static int msm_vidc_remove(struct platform_device *pdev) +{ + int rc = 0; + struct msm_vidc_core *core; + + if (!pdev) { + d_vpr_e("%s: invalid input %pK", __func__, pdev); + return -EINVAL; + } + + core = dev_get_drvdata(&pdev->dev); + if (!core) { + d_vpr_e("%s: invalid core", __func__); + return -EINVAL; + } + + if (core->vidc_core_workq) + destroy_workqueue(core->vidc_core_workq); + vidc_hfi_deinitialize(core->hfi_type, core->device); + device_remove_file(&core->vdev[MSM_VIDC_ENCODER].vdev.dev, + &dev_attr_link_name); + video_unregister_device(&core->vdev[MSM_VIDC_ENCODER].vdev); + device_remove_file(&core->vdev[MSM_VIDC_DECODER].vdev.dev, + &dev_attr_link_name); + video_unregister_device(&core->vdev[MSM_VIDC_DECODER].vdev); + v4l2_device_unregister(&core->v4l2_dev); + + msm_vidc_free_platform_resources(&core->resources); + sysfs_remove_group(&pdev->dev.kobj, &msm_vidc_core_attr_group); + dev_set_drvdata(&pdev->dev, NULL); + mutex_destroy(&core->resources.cb_lock); + mutex_destroy(&core->lock); + kfree(core); + kfree(vidc_driver->ctxt); + return rc; +} + +static int msm_vidc_pm_suspend(struct device *dev) +{ + int rc = 0; + struct msm_vidc_core *core; + + /* + * Bail out if + * - driver possibly not probed yet + * - not the main device. We don't support power management on + * subdevices (e.g. context banks) + */ + if (!dev || !dev->driver || + !of_device_is_compatible(dev->of_node, "qcom,msm-vidc")) + return 0; + + core = dev_get_drvdata(dev); + if (!core) { + d_vpr_e("%s: invalid core\n", __func__); + return -EINVAL; + } + + rc = msm_vidc_suspend(core->id); + if (rc == -ENOTSUPP) + rc = 0; + else if (rc) + d_vpr_e("Failed to suspend: %d\n", rc); + + + return rc; +} + +static int msm_vidc_pm_resume(struct device *dev) +{ + d_vpr_h("%s\n", __func__); + return 0; +} + +static const struct dev_pm_ops msm_vidc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(msm_vidc_pm_suspend, msm_vidc_pm_resume) +}; + +MODULE_DEVICE_TABLE(of, msm_vidc_dt_match); + +static struct platform_driver msm_vidc_driver = { + .probe = msm_vidc_probe, + .remove = msm_vidc_remove, + .driver = { + .name = "msm_vidc_v4l2", + .of_match_table = msm_vidc_dt_match, + .pm = &msm_vidc_pm_ops, + }, +}; + +static int __init msm_vidc_init(void) +{ + int rc = 0; + + vidc_driver = kzalloc(sizeof(*vidc_driver), + GFP_KERNEL); + if (!vidc_driver) { + d_vpr_e("Failed to allocate memroy for msm_vidc_drv\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&vidc_driver->cores); + mutex_init(&vidc_driver->lock); + vidc_driver->debugfs_root = msm_vidc_debugfs_init_drv(); + if (!vidc_driver->debugfs_root) + d_vpr_e("Failed to create debugfs for msm_vidc\n"); + + rc = platform_driver_register(&msm_vidc_driver); + if (rc) { + d_vpr_e("Failed to register platform driver\n"); + debugfs_remove_recursive(vidc_driver->debugfs_root); + kfree(vidc_driver); + vidc_driver = NULL; + } + + return rc; +} + +static void __exit msm_vidc_exit(void) +{ + platform_driver_unregister(&msm_vidc_driver); + debugfs_remove_recursive(vidc_driver->debugfs_root); + mutex_destroy(&vidc_driver->lock); + kfree(vidc_driver); + vidc_driver = NULL; +} + +module_init(msm_vidc_init); +module_exit(msm_vidc_exit); + +MODULE_SOFTDEP("pre: subsys-pil-tz"); +MODULE_LICENSE("GPL v2"); diff --git a/techpack/video/msm/vidc/msm_vdec.c b/techpack/video/msm/vidc/msm_vdec.c new file mode 100644 index 000000000000..2e9780442231 --- /dev/null +++ b/techpack/video/msm/vidc/msm_vdec.c @@ -0,0 +1,1549 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include "msm_vdec.h" +#include "msm_vidc_internal.h" +#include "msm_vidc_common.h" +#include "vidc_hfi.h" +#include "vidc_hfi_helper.h" +#include "vidc_hfi_api.h" +#include "msm_vidc_debug.h" +#include "msm_vidc_clocks.h" +#include "msm_vidc_buffer_calculations.h" + +#define MIN_NUM_DEC_OUTPUT_BUFFERS 4 +#define MIN_NUM_DEC_CAPTURE_BUFFERS 4 +/* Y=16(0-9bits), Cb(10-19bits)=Cr(20-29bits)=128, black by default */ +#define DEFAULT_VIDEO_CONCEAL_COLOR_BLACK 0x8020010 +#define MAX_VP9D_INST_COUNT 3 + +static const char *const mpeg_video_h264_profile[] = { + "Baseline", + "Constrained Baseline", + "Main", + "Extended", + "High", + "High 10", + "High 422", + "High 444 Predictive", + "High 10 Intra", + "High 422 Intra", + "High 444 Intra", + "CAVLC 444 Intra", + "Scalable Baseline", + "Scalable High", + "Scalable High Intra", + "Stereo High", + "Multiview High", + "Constrained High", + NULL, +}; + +static const char *const mpeg_video_h264_level[] = { + "1", + "1b", + "1.1", + "1.2", + "1.3", + "2", + "2.1", + "2.2", + "3", + "3.1", + "3.2", + "4", + "4.1", + "4.2", + "5", + "5.1", + "5.2", + "6.0", + "6.1", + "6.2", + NULL, +}; + +static const char *const vp9_level[] = { + "Unused", + "1.0", + "1.1", + "2.0", + "2.1", + "3.0", + "3.1", + "4.0", + "4.1", + "5.0", + "5.1", + NULL +}; + +static const char *const mpeg2_profile[] = { + "Simple", + "Main", + "High", + NULL +}; + +static const char *const mpeg2_level[] = { + "0", + "1", + "2", + "3", + NULL +}; + +static struct msm_vidc_ctrl msm_vdec_ctrls[] = { + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_UNKNOWN, + .name = "Invalid control", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = 0, + .default_value = 0, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_DECODE_ORDER, + .name = "Decode Order", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE, + .name = "Sync Frame Decode", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE, + .name = "Secure mode", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA, + .name = "Extradata Type", + .type = V4L2_CTRL_TYPE_BITMASK, + .minimum = EXTRADATA_NONE, + .maximum = EXTRADATA_DEFAULT | EXTRADATA_ADVANCED, + .default_value = EXTRADATA_DEFAULT, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE, + .name = "Video decoder multi stream", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = + V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_PRIMARY, + .maximum = + V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_SECONDARY, + .default_value = + V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_PRIMARY, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE, + .name = "H264 Profile", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, + .maximum = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH, + .default_value = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH) + ), + .qmenu = mpeg_video_h264_profile, + }, + { + .id = V4L2_CID_MPEG_VIDEO_H264_LEVEL, + .name = "H264 Level", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + .maximum = V4L2_MPEG_VIDEO_H264_LEVEL_6_2, + .default_value = V4L2_MPEG_VIDEO_H264_LEVEL_5_0, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_1_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_1B) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_1_1) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_1_2) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_1_3) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_1) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_2) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_1) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_2) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_1) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_2) + ), + .qmenu = mpeg_video_h264_level, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE, + .name = "HEVC Profile", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN, + .maximum = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10, + .default_value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN) | + (1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE) | + (1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10) + ), + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL, + .name = "HEVC Level", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + .maximum = V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2, + .default_value = V4L2_MPEG_VIDEO_HEVC_LEVEL_5, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_1) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_2) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_3) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_4) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_5) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2) + ), + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_TIER, + .name = "HEVC Tier", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_HEVC_TIER_MAIN, + .maximum = V4L2_MPEG_VIDEO_HEVC_TIER_HIGH, + .default_value = V4L2_MPEG_VIDEO_HEVC_TIER_HIGH, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) | + (1 << V4L2_MPEG_VIDEO_HEVC_TIER_HIGH) + ), + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_VP8_PROFILE, + .name = "VP8 Profile", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_VP8_PROFILE_0, + .maximum = V4L2_MPEG_VIDEO_VP8_PROFILE_0, + .default_value = V4L2_MPEG_VIDEO_VP8_PROFILE_0, + .menu_skip_mask = ~(1 << V4L2_MPEG_VIDEO_VP8_PROFILE_0), + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_VP9_PROFILE, + .name = "VP9 Profile", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_VP9_PROFILE_0, + .maximum = V4L2_MPEG_VIDEO_VP9_PROFILE_2, + .default_value = V4L2_MPEG_VIDEO_VP9_PROFILE_0, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_VP9_PROFILE_0) | + (1 << V4L2_MPEG_VIDEO_VP9_PROFILE_2) + ), + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_VP9_LEVEL, + .name = "VP9 Level", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_UNUSED, + .maximum = V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_51, + .default_value = V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_51, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_UNUSED) | + (1 << V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_1) | + (1 << V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_11) | + (1 << V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_2) | + (1 << V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_21) | + (1 << V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_3) | + (1 << V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_31) | + (1 << V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_4) | + (1 << V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_41) | + (1 << V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_5) | + (1 << V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_51) + ), + .qmenu = vp9_level, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE, + .name = "MPEG2 Profile", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_SIMPLE, + .maximum = V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_MAIN, + .default_value = V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_MAIN, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_SIMPLE) | + (1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_MAIN) + ), + .qmenu = mpeg2_profile, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL, + .name = "MPEG2 Level", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_0, + .maximum = V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_2, + .default_value = V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_2, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_0) | + (1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_1) | + (1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_2) + ), + .qmenu = mpeg2_level, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_8BIT, + .name = "Picture concealed color 8bit", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0x0, + .maximum = 0xff3fcff, + .default_value = DEFAULT_VIDEO_CONCEAL_COLOR_BLACK, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_10BIT, + .name = "Picture concealed color 10bit", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0x0, + .maximum = 0x3fffffff, + .default_value = DEFAULT_VIDEO_CONCEAL_COLOR_BLACK, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_BUFFER_SIZE_LIMIT, + .name = "Buffer size limit", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = INT_MAX, + .default_value = 0, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, + .name = "CAPTURE Count", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = SINGLE_OUTPUT_BUFFER, + .maximum = MAX_NUM_OUTPUT_BUFFERS, + .default_value = SINGLE_OUTPUT_BUFFER, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, + .name = "OUTPUT Count", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = SINGLE_INPUT_BUFFER, + .maximum = MAX_NUM_INPUT_BUFFERS, + .default_value = SINGLE_INPUT_BUFFER, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE, + .name = "Frame Rate", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = (MINIMUM_FPS << 16), + .maximum = (MAXIMUM_FPS << 16), + .default_value = (DEFAULT_FPS << 16), + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY, + .name = "Session Priority", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_ENABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE, + .name = "Decoder Operating rate", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = (DEFAULT_FPS << 16),/* Power Vote min fps */ + .maximum = INT_MAX, + .default_value = (DEFAULT_FPS << 16), + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE, + .name = "Low Latency Mode", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_HINT, + .name = "Low Latency Hint", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_DISABLE_TIMESTAMP_REORDER, + .name = "Disable TimeStamp Reorder", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_SUPERFRAME, + .name = "Superframe", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = 0, + .default_value = 0, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VDEC_HEIF_MODE, + .name = "HEIF Decoder", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, +}; + +#define NUM_CTRLS ARRAY_SIZE(msm_vdec_ctrls) + + +struct msm_vidc_format_desc vdec_output_formats[] = { + { + .name = "YCbCr Semiplanar 4:2:0", + .description = "Y/CbCr 4:2:0", + .fourcc = V4L2_PIX_FMT_NV12, + }, + { + .name = "YCbCr Semiplanar 4:2:0 10bit", + .description = "Y/CbCr 4:2:0 10bit", + .fourcc = V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS, + }, + { + .name = "UBWC YCbCr Semiplanar 4:2:0", + .description = "UBWC Y/CbCr 4:2:0", + .fourcc = V4L2_PIX_FMT_NV12_UBWC, + }, + { + .name = "UBWC YCbCr Semiplanar 4:2:0 10bit", + .description = "UBWC Y/CbCr 4:2:0 10bit", + .fourcc = V4L2_PIX_FMT_NV12_TP10_UBWC, + }, +}; + +struct msm_vidc_format_desc vdec_input_formats[] = { + { + .name = "Mpeg2", + .description = "Mpeg2 compressed format", + .fourcc = V4L2_PIX_FMT_MPEG2, + }, + { + .name = "H264", + .description = "H264 compressed format", + .fourcc = V4L2_PIX_FMT_H264, + }, + { + .name = "HEVC", + .description = "HEVC compressed format", + .fourcc = V4L2_PIX_FMT_HEVC, + }, + { + .name = "VP9", + .description = "VP9 compressed format", + .fourcc = V4L2_PIX_FMT_VP9, + }, +}; + +struct msm_vidc_format_constraint dec_pix_format_constraints[] = { + { + .fourcc = V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS, + .num_planes = 2, + .y_max_stride = 8192, + .y_buffer_alignment = 256, + .uv_max_stride = 8192, + .uv_buffer_alignment = 256, + }, + { + .fourcc = V4L2_PIX_FMT_NV12, + .num_planes = 2, + .y_max_stride = 8192, + .y_buffer_alignment = 512, + .uv_max_stride = 8192, + .uv_buffer_alignment = 256, + }, + { + .fourcc = V4L2_PIX_FMT_NV21, + .num_planes = 2, + .y_max_stride = 8192, + .y_buffer_alignment = 512, + .uv_max_stride = 8192, + .uv_buffer_alignment = 256, + }, +}; + +static bool msm_vidc_check_for_vp9d_overload(struct msm_vidc_core *core) +{ + u32 vp9d_instance_count = 0; + struct msm_vidc_inst *inst = NULL; + + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) { + if (inst->session_type == MSM_VIDC_DECODER && + get_v4l2_codec(inst) == V4L2_PIX_FMT_VP9) + vp9d_instance_count++; + } + mutex_unlock(&core->lock); + + if (vp9d_instance_count > MAX_VP9D_INST_COUNT) + return true; + return false; +} + +int msm_vdec_update_stream_output_mode(struct msm_vidc_inst *inst) +{ + struct v4l2_format *f; + u32 format; + u32 stream_output_mode; + u32 fourcc; + + if (!inst) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + format = f->fmt.pix_mp.pixelformat; + stream_output_mode = HAL_VIDEO_DECODER_PRIMARY; + if ((format == V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS) || + (format == V4L2_PIX_FMT_NV12)) { + stream_output_mode = HAL_VIDEO_DECODER_SECONDARY; + } + + msm_comm_set_stream_output_mode(inst, + stream_output_mode); + + fourcc = V4L2_PIX_FMT_NV12_UBWC; + if (inst->bit_depth == MSM_VIDC_BIT_DEPTH_10) + fourcc = V4L2_PIX_FMT_NV12_TP10_UBWC; + + inst->clk_data.dpb_fourcc = fourcc; + + return 0; +} + +int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) +{ + struct msm_vidc_format *fmt = NULL; + struct msm_vidc_format_desc *fmt_desc = NULL; + struct v4l2_pix_format_mplane *mplane = NULL; + int rc = 0; + u32 color_format; + + if (!inst || !f) { + d_vpr_e("%s: invalid parameters %pK %pK\n", __func__, inst, f); + return -EINVAL; + } + + /* + * First update inst format with new width/height/format + * Recalculate sizes/strides etc + * Perform necessary checks to continue with session + * Copy recalculated info into user format + */ + if (f->type == OUTPUT_MPLANE) { + fmt = &inst->fmts[OUTPUT_PORT]; + fmt_desc = msm_comm_get_pixel_fmt_fourcc(vdec_output_formats, + ARRAY_SIZE(vdec_output_formats), + f->fmt.pix_mp.pixelformat, inst->sid); + if (!fmt_desc) { + s_vpr_e(inst->sid, "Invalid fmt set : %x\n", + f->fmt.pix_mp.pixelformat); + return -EINVAL; + } + strlcpy(fmt->name, fmt_desc->name, sizeof(fmt->name)); + strlcpy(fmt->description, fmt_desc->description, + sizeof(fmt->description)); + + inst->clk_data.opb_fourcc = f->fmt.pix_mp.pixelformat; + + fmt->v4l2_fmt.type = f->type; + mplane = &fmt->v4l2_fmt.fmt.pix_mp; + mplane->width = f->fmt.pix_mp.width; + mplane->height = f->fmt.pix_mp.height; + mplane->pixelformat = f->fmt.pix_mp.pixelformat; + mplane->plane_fmt[0].sizeimage = + msm_vidc_calculate_dec_output_frame_size(inst); + + if (mplane->num_planes > 1) + mplane->plane_fmt[1].sizeimage = + msm_vidc_calculate_dec_output_extra_size(inst); + color_format = msm_comm_convert_color_fmt( + f->fmt.pix_mp.pixelformat, inst->sid); + mplane->plane_fmt[0].bytesperline = + VENUS_Y_STRIDE(color_format, f->fmt.pix_mp.width); + mplane->plane_fmt[0].reserved[0] = + VENUS_Y_SCANLINES(color_format, f->fmt.pix_mp.height); + inst->bit_depth = MSM_VIDC_BIT_DEPTH_8; + if ((f->fmt.pix_mp.pixelformat == + V4L2_PIX_FMT_NV12_TP10_UBWC) || + (f->fmt.pix_mp.pixelformat == + V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS)) { + inst->bit_depth = MSM_VIDC_BIT_DEPTH_10; + } + + rc = msm_vidc_check_session_supported(inst); + if (rc) { + s_vpr_e(inst->sid, + "%s: session not supported\n", __func__); + goto err_invalid_fmt; + } + + rc = msm_vdec_update_stream_output_mode(inst); + if (rc) { + s_vpr_e(inst->sid, + "%s: failed to update output stream mode\n", + __func__); + goto err_invalid_fmt; + } + + memcpy(f, &fmt->v4l2_fmt, sizeof(struct v4l2_format)); + } else if (f->type == INPUT_MPLANE) { + fmt = &inst->fmts[INPUT_PORT]; + fmt_desc = msm_comm_get_pixel_fmt_fourcc(vdec_input_formats, + ARRAY_SIZE(vdec_input_formats), + f->fmt.pix_mp.pixelformat, inst->sid); + if (!fmt_desc) { + s_vpr_e(inst->sid, "Invalid fmt set : %x\n", + f->fmt.pix_mp.pixelformat); + return -EINVAL; + } + strlcpy(fmt->name, fmt_desc->name, sizeof(fmt->name)); + strlcpy(fmt->description, fmt_desc->description, + sizeof(fmt->description)); + + if (f->fmt.pix_mp.pixelformat == V4L2_PIX_FMT_VP9) { + if (msm_vidc_check_for_vp9d_overload(inst->core)) { + s_vpr_e(inst->sid, "VP9 Decode overload\n"); + rc = -ENOMEM; + goto err_invalid_fmt; + } + } + + fmt->v4l2_fmt.type = f->type; + mplane = &fmt->v4l2_fmt.fmt.pix_mp; + mplane->width = f->fmt.pix_mp.width; + mplane->height = f->fmt.pix_mp.height; + mplane->pixelformat = f->fmt.pix_mp.pixelformat; + rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE); + if (rc) { + s_vpr_e(inst->sid, "Failed to open instance\n"); + goto err_invalid_fmt; + } + + mplane->plane_fmt[0].sizeimage = + msm_vidc_calculate_dec_input_frame_size(inst, inst->buffer_size_limit); + + /* Driver can recalculate buffer count only for + * only for bitstream port. Decoder YUV port reconfig + * should not overwrite the FW calculated buffer + * count. + */ + rc = msm_vidc_calculate_buffer_counts(inst); + if (rc) { + s_vpr_e(inst->sid, + "%s failed to calculate buffer count\n", + __func__); + return rc; + } + + rc = msm_vidc_check_session_supported(inst); + if (rc) { + s_vpr_e(inst->sid, + "%s: session not supported\n", __func__); + goto err_invalid_fmt; + } + update_log_ctxt(inst->sid, inst->session_type, + mplane->pixelformat); + memcpy(f, &fmt->v4l2_fmt, sizeof(struct v4l2_format)); + } + + inst->batch.enable = is_batching_allowed(inst); + msm_dcvs_try_enable(inst); + +err_invalid_fmt: + return rc; +} + +int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) +{ + struct v4l2_format *fmt; + + if (f->type == OUTPUT_MPLANE) { + fmt = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + fmt->fmt.pix_mp.plane_fmt[0].sizeimage = + msm_vidc_calculate_dec_output_frame_size(inst); + if (fmt->fmt.pix_mp.num_planes > 1) + fmt->fmt.pix_mp.plane_fmt[1].sizeimage = + msm_vidc_calculate_dec_output_extra_size(inst); + memcpy(f, fmt, sizeof(struct v4l2_format)); + } else if (f->type == INPUT_MPLANE) { + fmt = &inst->fmts[INPUT_PORT].v4l2_fmt; + fmt->fmt.pix_mp.plane_fmt[0].sizeimage = + msm_vidc_calculate_dec_input_frame_size(inst, inst->buffer_size_limit); + memcpy(f, fmt, sizeof(struct v4l2_format)); + } else { + s_vpr_e(inst->sid, "%s: Unsupported buf type: %d\n", + __func__, f->type); + return -EINVAL; + } + + return 0; +} + +int msm_vdec_enum_fmt(struct msm_vidc_inst *inst, struct v4l2_fmtdesc *f) +{ + const struct msm_vidc_format_desc *fmt_desc = NULL; + int rc = 0; + + if (!inst || !f) { + d_vpr_e("Invalid input, inst = %pK, f = %pK\n", inst, f); + return -EINVAL; + } + if (f->type == OUTPUT_MPLANE) { + fmt_desc = msm_comm_get_pixel_fmt_index(vdec_output_formats, + ARRAY_SIZE(vdec_output_formats), f->index, inst->sid); + } else if (f->type == INPUT_MPLANE) { + fmt_desc = msm_comm_get_pixel_fmt_index(vdec_input_formats, + ARRAY_SIZE(vdec_input_formats), f->index, inst->sid); + f->flags = V4L2_FMT_FLAG_COMPRESSED; + } + + memset(f->reserved, 0, sizeof(f->reserved)); + if (fmt_desc) { + strlcpy(f->description, fmt_desc->description, + sizeof(f->description)); + f->pixelformat = fmt_desc->fourcc; + } else { + s_vpr_h(inst->sid, "No more formats found\n"); + rc = -EINVAL; + } + return rc; +} + +int msm_vdec_inst_init(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct msm_vidc_core *core; + struct msm_vidc_format_desc *fmt_desc = NULL; + struct v4l2_format *f = NULL; + + if (!inst || !inst->core) { + d_vpr_e("Invalid input = %pK\n", inst); + return -EINVAL; + } + core = inst->core; + + inst->prop.extradata_ctrls = EXTRADATA_DEFAULT; + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + f->type = OUTPUT_MPLANE; + f->fmt.pix_mp.height = DEFAULT_HEIGHT; + f->fmt.pix_mp.width = DEFAULT_WIDTH; + f->fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12_UBWC; + f->fmt.pix_mp.num_planes = 2; + f->fmt.pix_mp.plane_fmt[0].sizeimage = + msm_vidc_calculate_dec_output_frame_size(inst); + f->fmt.pix_mp.plane_fmt[1].sizeimage = + msm_vidc_calculate_dec_output_extra_size(inst); + fmt_desc = msm_comm_get_pixel_fmt_fourcc(vdec_output_formats, + ARRAY_SIZE(vdec_output_formats), + f->fmt.pix_mp.pixelformat, inst->sid); + if (!fmt_desc) { + s_vpr_e(inst->sid, "Invalid fmt set: %x\n", + f->fmt.pix_mp.pixelformat); + return -EINVAL; + } + strlcpy(inst->fmts[OUTPUT_PORT].name, fmt_desc->name, + sizeof(inst->fmts[OUTPUT_PORT].name)); + strlcpy(inst->fmts[OUTPUT_PORT].description, fmt_desc->description, + sizeof(inst->fmts[OUTPUT_PORT].description)); + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + f->type = INPUT_MPLANE; + f->fmt.pix_mp.height = DEFAULT_HEIGHT; + f->fmt.pix_mp.width = DEFAULT_WIDTH; + f->fmt.pix_mp.pixelformat = V4L2_PIX_FMT_H264; + f->fmt.pix_mp.num_planes = 1; + f->fmt.pix_mp.plane_fmt[0].sizeimage = + msm_vidc_calculate_dec_input_frame_size(inst, inst->buffer_size_limit); + fmt_desc = msm_comm_get_pixel_fmt_fourcc(vdec_input_formats, + ARRAY_SIZE(vdec_input_formats), f->fmt.pix_mp.pixelformat, + inst->sid); + if (!fmt_desc) { + s_vpr_e(inst->sid, "Invalid fmt set: %x\n", + f->fmt.pix_mp.pixelformat); + return -EINVAL; + } + strlcpy(inst->fmts[INPUT_PORT].name, fmt_desc->name, + sizeof(inst->fmts[INPUT_PORT].name)); + strlcpy(inst->fmts[INPUT_PORT].description, fmt_desc->description, + sizeof(inst->fmts[INPUT_PORT].description)); + inst->buffer_mode_set[INPUT_PORT] = HAL_BUFFER_MODE_STATIC; + inst->buffer_mode_set[OUTPUT_PORT] = HAL_BUFFER_MODE_DYNAMIC; + inst->stream_output_mode = HAL_VIDEO_DECODER_PRIMARY; + + + inst->clk_data.frame_rate = (DEFAULT_FPS << 16); + inst->clk_data.operating_rate = (DEFAULT_FPS << 16); + if (core->resources.decode_batching) { + inst->batch.enable = true; + inst->batch.size = MAX_DEC_BATCH_SIZE; + } + + inst->buff_req.buffer[1].buffer_type = HAL_BUFFER_INPUT; + inst->buff_req.buffer[1].buffer_count_min_host = + inst->buff_req.buffer[1].buffer_count_actual = + MIN_NUM_DEC_OUTPUT_BUFFERS; + inst->buff_req.buffer[2].buffer_type = HAL_BUFFER_OUTPUT; + inst->buff_req.buffer[2].buffer_count_min_host = + inst->buff_req.buffer[2].buffer_count_actual = + MIN_NUM_DEC_CAPTURE_BUFFERS; + inst->buff_req.buffer[3].buffer_type = HAL_BUFFER_OUTPUT2; + inst->buff_req.buffer[3].buffer_count_min_host = + inst->buff_req.buffer[3].buffer_count_actual = + MIN_NUM_DEC_CAPTURE_BUFFERS; + inst->buff_req.buffer[4].buffer_type = HAL_BUFFER_EXTRADATA_INPUT; + inst->buff_req.buffer[5].buffer_type = HAL_BUFFER_EXTRADATA_OUTPUT; + inst->buff_req.buffer[6].buffer_type = HAL_BUFFER_EXTRADATA_OUTPUT2; + inst->buff_req.buffer[7].buffer_type = HAL_BUFFER_INTERNAL_SCRATCH; + inst->buff_req.buffer[8].buffer_type = HAL_BUFFER_INTERNAL_SCRATCH_1; + inst->buff_req.buffer[9].buffer_type = HAL_BUFFER_INTERNAL_SCRATCH_2; + inst->buff_req.buffer[10].buffer_type = HAL_BUFFER_INTERNAL_PERSIST; + inst->buff_req.buffer[11].buffer_type = HAL_BUFFER_INTERNAL_PERSIST_1; + inst->buff_req.buffer[12].buffer_type = HAL_BUFFER_INTERNAL_CMD_QUEUE; + inst->buff_req.buffer[13].buffer_type = HAL_BUFFER_INTERNAL_RECON; + msm_vidc_init_buffer_size_calculators(inst); + + return rc; +} + +int msm_vdec_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) +{ + int rc = 0; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + s_vpr_h(inst->sid, "%s: control name = %s, id = 0x%x value = %d\n", + __func__, ctrl->name, ctrl->id, ctrl->val); + + switch (ctrl->id) { + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: + case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: + case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE: + inst->profile = msm_comm_v4l2_to_hfi(ctrl->id, ctrl->val, + inst->sid); + break; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: + case V4L2_CID_MPEG_VIDC_VIDEO_VP9_LEVEL: + case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL: + inst->level = msm_comm_v4l2_to_hfi(ctrl->id, ctrl->val, + inst->sid); + break; + case V4L2_CID_MPEG_VIDEO_HEVC_TIER: + inst->level |= + (msm_comm_v4l2_to_hfi(ctrl->id, ctrl->val, + inst->sid) << 28); + break; + case V4L2_CID_MPEG_VIDC_VIDEO_DECODE_ORDER: + case V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_8BIT: + case V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_10BIT: + break; + case V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE: + inst->clk_data.frame_rate = ctrl->val; + break; + case V4L2_CID_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE: + inst->flags &= ~VIDC_THUMBNAIL; + if (ctrl->val) + inst->flags |= VIDC_THUMBNAIL; + + inst->batch.enable = is_batching_allowed(inst); + rc = msm_vidc_calculate_buffer_counts(inst); + if (rc) { + s_vpr_e(inst->sid, + "%s: failed to calculate thumbnail buffer count\n", + __func__); + return rc; + } + break; + case V4L2_CID_MPEG_VIDC_VIDEO_SECURE: + inst->flags &= ~VIDC_SECURE; + if (ctrl->val) + inst->flags |= VIDC_SECURE; + if (msm_comm_check_for_inst_overload(inst->core)) { + s_vpr_e(inst->sid, + "%s: Instance count reached Max limit, rejecting session", + __func__); + return -ENOTSUPP; + } + msm_comm_memory_prefetch(inst); + break; + case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA: + if (ctrl->val == EXTRADATA_NONE) + inst->prop.extradata_ctrls = 0; + else + inst->prop.extradata_ctrls |= ctrl->val; + /* + * nothing to do here as inst->bufq[OUTPUT_PORT].num_planes + * and inst->bufq[OUTPUT_PORT].plane_sizes[1] are already + * initialized to proper values + */ + break; + case V4L2_CID_MPEG_VIDC_VIDEO_BUFFER_SIZE_LIMIT: + inst->buffer_size_limit = ctrl->val; + break; + case V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY: + break; + case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE: + inst->flags &= ~VIDC_TURBO; + if (ctrl->val == INT_MAX) + inst->flags |= VIDC_TURBO; + else + inst->clk_data.operating_rate = ctrl->val; + break; + case V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE: + inst->clk_data.low_latency_mode = !!ctrl->val; + inst->batch.enable = is_batching_allowed(inst); + break; + case V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_HINT: + break; + case V4L2_CID_MPEG_VIDC_VIDEO_DISABLE_TIMESTAMP_REORDER: + break; + case V4L2_CID_MPEG_VIDC_VDEC_HEIF_MODE: + if(get_v4l2_codec(inst) != V4L2_PIX_FMT_HEVC) + break; + inst->flags &= ~VIDC_TURBO; + if (ctrl->val) + inst->flags |= VIDC_TURBO; + if (inst->state < MSM_VIDC_LOAD_RESOURCES) + msm_vidc_calculate_buffer_counts(inst); + break; + default: + s_vpr_e(inst->sid, "Unknown control %#x\n", ctrl->id); + break; + } + + return rc; +} + +int msm_vdec_set_frame_size(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_frame_size frame_size; + struct v4l2_format *f; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + frame_size.buffer_type = HFI_BUFFER_INPUT; + frame_size.width = f->fmt.pix_mp.width; + frame_size.height = f->fmt.pix_mp.height; + s_vpr_h(inst->sid, "%s: input wxh %dx%d\n", __func__, + frame_size.width, frame_size.height); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_FRAME_SIZE, &frame_size, sizeof(frame_size)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_vdec_set_color_format(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct msm_vidc_format_constraint *fmt_constraint; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + rc = msm_comm_set_color_format(inst, + msm_comm_get_hal_output_buffer(inst), + inst->clk_data.opb_fourcc); + if (rc) { + s_vpr_e(inst->sid, "%s: set color format (%#x) failed\n", + __func__, inst->clk_data.opb_fourcc); + return rc; + } + fmt_constraint = msm_comm_get_pixel_fmt_constraints( + dec_pix_format_constraints, + ARRAY_SIZE(dec_pix_format_constraints), + inst->clk_data.opb_fourcc, inst->sid); + if (fmt_constraint) { + rc = msm_comm_set_color_format_constraints(inst, + msm_comm_get_hal_output_buffer(inst), + fmt_constraint); + if (rc) { + s_vpr_e(inst->sid, + "%s: Set constraints for color format %#x failed\n", + __func__, inst->clk_data.opb_fourcc); + return rc; + } + } + + return rc; +} + +int msm_vdec_set_input_buffer_counts(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct msm_vidc_format *fmt; + enum hal_buffer buffer_type; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + buffer_type = HAL_BUFFER_INPUT; + fmt = &inst->fmts[INPUT_PORT]; + rc = msm_comm_set_buffer_count(inst, + fmt->count_min, + fmt->count_actual, + buffer_type); + if (rc) { + s_vpr_e(inst->sid, "%s: failed to set bufreqs(%#x)\n", + __func__, buffer_type); + return -EINVAL; + } + + return rc; +} + +int msm_vdec_set_output_buffer_counts(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct msm_vidc_format *fmt; + enum hal_buffer buffer_type; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + buffer_type = msm_comm_get_hal_output_buffer(inst); + /* Correct buffer counts is always stored in HAL_BUFFER_OUTPUT */ + fmt = &inst->fmts[OUTPUT_PORT]; + if (buffer_type == HAL_BUFFER_OUTPUT2) { + /* + * For split mode set DPB count as well + * For DPB actual count is same as min output count + */ + rc = msm_comm_set_buffer_count(inst, + fmt->count_min, + fmt->count_min, + HAL_BUFFER_OUTPUT); + if (rc) { + s_vpr_e(inst->sid, + "%s: failed to set buffer count(%#x)\n", + __func__, buffer_type); + return -EINVAL; + } + } + rc = msm_comm_set_buffer_count(inst, + fmt->count_min, + fmt->count_actual, + buffer_type); + if (rc) { + s_vpr_e(inst->sid, "%s: failed to set bufreqs(%#x)\n", + __func__, buffer_type); + return -EINVAL; + } + + return rc; +} + +int msm_vdec_set_profile_level(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_profile_level profile_level; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + profile_level.profile = inst->profile; + profile_level.level = inst->level; + + s_vpr_h(inst->sid, "%s: %#x %#x\n", __func__, + profile_level.profile, profile_level.level); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT, &profile_level, + sizeof(profile_level)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_vdec_set_output_order(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + u32 output_order; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_DECODE_ORDER); + s_vpr_h(inst->sid, "%s: %d\n", __func__, ctrl->val); + if (ctrl->val == V4L2_MPEG_MSM_VIDC_ENABLE) + output_order = HFI_OUTPUT_ORDER_DECODE; + else + output_order = HFI_OUTPUT_ORDER_DISPLAY; + + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER, &output_order, + sizeof(u32)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_vdec_set_sync_frame_mode(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct hfi_enable hfi_property; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE); + hfi_property.enable = (bool)ctrl->val; + + s_vpr_h(inst->sid, "%s: %#x\n", __func__, hfi_property.enable); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE, &hfi_property, + sizeof(hfi_property)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_vdec_set_secure_mode(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_SECURE); + + codec = get_v4l2_codec(inst); + if (ctrl->val) { + if (!(codec == V4L2_PIX_FMT_HEVC || + codec == V4L2_PIX_FMT_H264 || + codec == V4L2_PIX_FMT_VP9)) { + s_vpr_e(inst->sid, + "%s: Secure allowed for HEVC/H264/VP9\n", + __func__); + return -EINVAL; + } + } + + s_vpr_h(inst->sid, "%s: %#x\n", __func__, ctrl->val); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_SECURE_SESSION, &ctrl->val, sizeof(u32)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_vdec_set_output_stream_mode(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_multi_stream multi_stream; + struct hfi_frame_size frame_sz; + struct v4l2_format *f; + u32 sid; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + sid = inst->sid; + + if (is_primary_output_mode(inst)) { + multi_stream.buffer_type = HFI_BUFFER_OUTPUT; + multi_stream.enable = true; + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM, &multi_stream, + sizeof(multi_stream)); + if (rc) { + s_vpr_e(sid, + "%s: set prop multistream primary failed: %d\n", + __func__, rc); + return rc; + } + multi_stream.buffer_type = HFI_BUFFER_OUTPUT2; + multi_stream.enable = false; + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM, &multi_stream, + sizeof(multi_stream)); + if (rc) { + s_vpr_e(sid, + "%s: set prop multistream primary2 failed : %d\n", + __func__, rc); + return rc; + } + } else { + rc = msm_comm_set_color_format(inst, + HAL_BUFFER_OUTPUT, inst->clk_data.dpb_fourcc); + if (rc) + return rc; + + multi_stream.buffer_type = HFI_BUFFER_OUTPUT2; + multi_stream.enable = true; + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM, &multi_stream, + sizeof(multi_stream)); + if (rc) { + s_vpr_e(sid, + "%s: set prop multistream secondary failed : %d\n", + __func__, rc); + return rc; + } + multi_stream.buffer_type = HFI_BUFFER_OUTPUT; + multi_stream.enable = false; + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM, &multi_stream, + sizeof(multi_stream)); + if (rc) { + s_vpr_e(sid, + "%s: set prop multistream secondary2 failed: %d\n", + __func__, rc); + return rc; + } + frame_sz.buffer_type = HFI_BUFFER_OUTPUT2; + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + frame_sz.width = f->fmt.pix_mp.width; + frame_sz.height = f->fmt.pix_mp.height; + s_vpr_h(sid, + "frame_size: hal buffer type %d, width %d, height %d\n", + frame_sz.buffer_type, frame_sz.width, frame_sz.height); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_FRAME_SIZE, &frame_sz, + sizeof(frame_sz)); + if (rc) { + s_vpr_e(sid, "%s: set prop frame_size failed\n", + __func__); + return rc; + } + } + + return rc; +} + +int msm_vdec_set_priority(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_enable hfi_property; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + hfi_property.enable = is_realtime_session(inst); + + s_vpr_h(inst->sid, "%s: %#x\n", __func__, hfi_property.enable); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_REALTIME, &hfi_property, + sizeof(hfi_property)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_vdec_set_seqchng_at_syncframe(struct msm_vidc_inst *inst) +{ + int rc = 0; + u32 codec; + struct hfi_device *hdev; + struct hfi_enable hfi_property; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + hfi_property.enable = is_low_latency_hint(inst); + + if (!hfi_property.enable) + return 0; + + codec = get_v4l2_codec(inst); + if (!(codec == V4L2_PIX_FMT_HEVC || codec == V4L2_PIX_FMT_H264)) { + s_vpr_e(inst->sid, + "%s: low latency hint supported for HEVC/H264\n", + __func__); + return -EINVAL; + } + s_vpr_h(inst->sid, "%s: %#x\n", __func__, hfi_property.enable); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VDEC_SEQCHNG_AT_SYNCFRM, &hfi_property, + sizeof(hfi_property)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_vdec_set_conceal_color(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl_8b; + struct v4l2_ctrl *ctrl_10b; + struct hfi_conceal_color conceal_color; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + ctrl_8b = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_8BIT); + ctrl_10b = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_10BIT); + conceal_color.conceal_color_8bit = ctrl_8b->val; + conceal_color.conceal_color_10bit = ctrl_10b->val; + + s_vpr_h(inst->sid, "%s: %#x %#x\n", __func__, + conceal_color.conceal_color_8bit, + conceal_color.conceal_color_10bit); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR, &conceal_color, + sizeof(conceal_color)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + + +int msm_vdec_set_extradata(struct msm_vidc_inst *inst) +{ + uint32_t display_info = HFI_PROPERTY_PARAM_VUI_DISPLAY_INFO_EXTRADATA; + u32 value = 0x0; + u32 codec; + + codec = get_v4l2_codec(inst); + switch (codec) { + case V4L2_PIX_FMT_H264: + case V4L2_PIX_FMT_HEVC: + display_info = HFI_PROPERTY_PARAM_VUI_DISPLAY_INFO_EXTRADATA; + break; + case V4L2_PIX_FMT_VP9: + display_info = + HFI_PROPERTY_PARAM_VDEC_VPX_COLORSPACE_EXTRADATA; + break; + case V4L2_PIX_FMT_MPEG2: + display_info = HFI_PROPERTY_PARAM_VDEC_MPEG2_SEQDISP_EXTRADATA; + break; + } + + /* Enable Default Extradata */ + msm_comm_set_index_extradata(inst, + MSM_VIDC_EXTRADATA_OUTPUT_CROP, 0x1); + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA, 0x1); + msm_comm_set_extradata(inst, display_info, 0x1); + + if (codec == V4L2_PIX_FMT_VP9 || codec == V4L2_PIX_FMT_HEVC) { + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VDEC_HDR10_HIST_EXTRADATA, 0x1); + } + + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB, 0x1); + if (codec == V4L2_PIX_FMT_HEVC) { + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VDEC_MASTER_DISP_COL_SEI_EXTRADATA, + 0x1); + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VDEC_CLL_SEI_EXTRADATA, 0x1); + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA, + 0x1); + } + + /* Enable / Disable Advanced Extradata */ + if (inst->prop.extradata_ctrls & EXTRADATA_ADVANCED) + value = 0x1; + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA, value); + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA, value); + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA, value); + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VDEC_FRAME_RATE_EXTRADATA, value); + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VDEC_PANSCAN_WNDW_EXTRADATA, value); + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA, value); + msm_comm_set_index_extradata(inst, + MSM_VIDC_EXTRADATA_ASPECT_RATIO, value); + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VDEC_FRAME_QP_EXTRADATA, value); + + return 0; +} + +int msm_vdec_set_properties(struct msm_vidc_inst *inst) +{ + int rc = 0; + + if (!in_port_reconfig(inst)) { + /* do not allow these settings in port reconfiration */ + rc = msm_vdec_set_frame_size(inst); + if (rc) + goto exit; + rc = msm_vdec_set_input_buffer_counts(inst); + if (rc) + goto exit; + rc = msm_vdec_set_profile_level(inst); + if (rc) + goto exit; + rc = msm_vdec_set_output_order(inst); + if (rc) + goto exit; + rc = msm_vdec_set_sync_frame_mode(inst); + if (rc) + goto exit; + rc = msm_vdec_set_secure_mode(inst); + if (rc) + goto exit; + rc = msm_vdec_set_extradata(inst); + if (rc) + goto exit; + rc = msm_vdec_set_priority(inst); + if (rc) + goto exit; + rc = msm_vdec_set_conceal_color(inst); + if (rc) + goto exit; + rc = msm_vdec_set_seqchng_at_syncframe(inst); + if (rc) + goto exit; + } + + rc = msm_vdec_set_color_format(inst); + if (rc) + goto exit; + rc = msm_vdec_set_output_stream_mode(inst); + if (rc) + goto exit; + rc = msm_vdec_set_output_buffer_counts(inst); + if (rc) + goto exit; + +exit: + if (rc) + s_vpr_e(inst->sid, "%s: failed with %d\n", __func__, rc); + else + s_vpr_h(inst->sid, "%s: set properties successful\n", __func__); + + return rc; +} + +int msm_vdec_ctrl_init(struct msm_vidc_inst *inst, + const struct v4l2_ctrl_ops *ctrl_ops) +{ + return msm_comm_ctrl_init(inst, msm_vdec_ctrls, + ARRAY_SIZE(msm_vdec_ctrls), ctrl_ops); +} diff --git a/techpack/video/msm/vidc/msm_vdec.h b/techpack/video/msm/vidc/msm_vdec.h new file mode 100644 index 000000000000..d2e8b28033ed --- /dev/null +++ b/techpack/video/msm/vidc/msm_vdec.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + */ +#ifndef _MSM_VDEC_H_ +#define _MSM_VDEC_H_ + +#include "msm_vidc.h" +#include "msm_vidc_internal.h" +#define MSM_VDEC_DVC_NAME "msm_vidc_vdec" + +int msm_vdec_inst_init(struct msm_vidc_inst *inst); +int msm_vdec_ctrl_init(struct msm_vidc_inst *inst, + const struct v4l2_ctrl_ops *ctrl_ops); +int msm_vdec_enum_fmt(struct msm_vidc_inst *inst, + struct v4l2_fmtdesc *f); +int msm_vdec_s_fmt(struct msm_vidc_inst *inst, + struct v4l2_format *f); +int msm_vdec_g_fmt(struct msm_vidc_inst *inst, + struct v4l2_format *f); +int msm_vdec_s_ctrl(struct msm_vidc_inst *inst, + struct v4l2_ctrl *ctrl); +int msm_vdec_g_ctrl(struct msm_vidc_inst *inst, + struct v4l2_ctrl *ctrl); +int msm_vdec_set_properties(struct msm_vidc_inst *inst); +#endif diff --git a/techpack/video/msm/vidc/msm_venc.c b/techpack/video/msm/vidc/msm_venc.c new file mode 100644 index 000000000000..719bb76a3a49 --- /dev/null +++ b/techpack/video/msm/vidc/msm_venc.c @@ -0,0 +1,5098 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ +#include "msm_venc.h" +#include "msm_vidc_internal.h" +#include "msm_vidc_common.h" +#include "vidc_hfi.h" +#include "vidc_hfi_helper.h" +#include "vidc_hfi_api.h" +#include "msm_vidc_debug.h" +#include "msm_vidc_clocks.h" +#include "msm_vidc_buffer_calculations.h" + +#define MIN_BIT_RATE 32000 +#define MAX_BIT_RATE 1200000000 +#define DEFAULT_BIT_RATE 64000 +#define MIN_BIT_RATE_RATIO 0 +#define MAX_BIT_RATE_RATIO 100 +#define MAX_HIER_CODING_LAYER 6 +#define BIT_RATE_STEP 1 +#define MAX_BASE_LAYER_PRIORITY_ID 63 +#define MAX_SLICE_BYTE_SIZE ((MAX_BIT_RATE)>>3) +#define MIN_SLICE_BYTE_SIZE 512 +#define MAX_SLICE_MB_SIZE (((4096 + 15) >> 4) * ((2304 + 15) >> 4)) +#define QP_ENABLE_I 0x1 +#define QP_ENABLE_P 0x2 +#define QP_ENABLE_B 0x4 +#define MIN_QP 0 +#define MAX_QP 0x7F +#define MAX_QP_PACKED 0x7F7F7F +#define DEFAULT_QP 0xA +#define DEFAULT_QP_PACKED 0xA0A0A +#define MIN_CHROMA_QP_OFFSET -12 +#define MAX_INTRA_REFRESH_MBS ((7680 * 4320) >> 8) +#define MAX_LTR_FRAME_COUNT 10 +#define MAX_NUM_B_FRAMES 1 +#define MIN_CBRPLUS_W 640 +#define MIN_CBRPLUS_H 480 +#define MAX_CBR_W 1280 +#define MAX_CBR_H 720 +#define LEGACY_CBR_BUF_SIZE 500 +#define CBR_PLUS_BUF_SIZE 1000 +#define MAX_GOP 0xFFFFFFF +#define MAX_QPRANGE_BOOST 0x3333 + +#define MIN_NUM_ENC_OUTPUT_BUFFERS 4 +#define MIN_NUM_ENC_CAPTURE_BUFFERS 5 +#define VENC_MAX_TIMESTAMP_LIST_SIZE 2 + +static const char *const mpeg_video_rate_control[] = { + "VBR", + "CBR", + "CBR VFR", + "MBR", + "MBR VFR", + "CQ", + NULL +}; + +static const char *const mpeg_video_h264_profile[] = { + "Baseline", + "Constrained Baseline", + "Main", + "Extended", + "High", + "High 10", + "High 422", + "High 444 Predictive", + "High 10 Intra", + "High 422 Intra", + "High 444 Intra", + "CAVLC 444 Intra", + "Scalable Baseline", + "Scalable High", + "Scalable High Intra", + "Stereo High", + "Multiview High", + "Constrained High", + NULL, +}; + +static const char *const mpeg_video_h264_level[] = { + "1", + "1b", + "1.1", + "1.2", + "1.3", + "2", + "2.1", + "2.2", + "3", + "3.1", + "3.2", + "4", + "4.1", + "4.2", + "5", + "5.1", + "5.2", + "6.0", + "6.1", + "6.2", + NULL, +}; + +static const char *const mpeg_video_stream_format[] = { + "NAL Format Start Codes", + "NAL Format One NAL Per Buffer", + "NAL Format One Byte Length", + "NAL Format Two Byte Length", + "NAL Format Four Byte Length", + NULL +}; + +static const char *const roi_map_type[] = { + "None", + "2-bit", + "2-bit", +}; + +static struct msm_vidc_ctrl msm_venc_ctrls[] = { + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_UNKNOWN, + .name = "Invalid control", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = 0, + .default_value = 0, + .step = 1, + .menu_skip_mask = 0, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_GOP_SIZE, + .name = "Intra Period for P frames", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = MAX_GOP, + .default_value = 2*DEFAULT_FPS-1, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP, + .name = "HEVC I Frame Quantization", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MIN_QP, + .maximum = MAX_QP, + .default_value = DEFAULT_QP, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP, + .name = "HEVC P Frame Quantization", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MIN_QP, + .maximum = MAX_QP, + .default_value = DEFAULT_QP, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP, + .name = "HEVC B Frame Quantization", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MIN_QP, + .maximum = MAX_QP, + .default_value = DEFAULT_QP, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, + .name = "HEVC Quantization Range Minimum", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MIN_QP, + .maximum = MAX_QP_PACKED, + .default_value = DEFAULT_QP_PACKED, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP, + .name = "HEVC Quantization Range Maximum", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MIN_QP, + .maximum = MAX_QP_PACKED, + .default_value = DEFAULT_QP_PACKED, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_B_FRAMES, + .name = "Intra Period for B frames", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = MAX_NUM_B_FRAMES, + .default_value = 0, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, + .name = "CAPTURE Count", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = SINGLE_OUTPUT_BUFFER, + .maximum = MAX_NUM_OUTPUT_BUFFERS, + .default_value = SINGLE_OUTPUT_BUFFER, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, + .name = "OUTPUT Count", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = SINGLE_INPUT_BUFFER, + .maximum = MAX_NUM_INPUT_BUFFERS, + .default_value = SINGLE_INPUT_BUFFER, + .step = 1, + .qmenu = NULL, + }, + + { + .id = V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME, + .name = "Request I Frame", + .type = V4L2_CTRL_TYPE_BUTTON, + .minimum = 0, + .maximum = 0, + .default_value = 0, + .step = 0, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_BITRATE_MODE, + .name = "Video Bitrate Control", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, + .maximum = V4L2_MPEG_VIDEO_BITRATE_MODE_CQ, + .default_value = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) | + (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) | + (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_MBR) | + (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR) | + (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_MBR_VFR) | + (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CQ) + ), + .qmenu = mpeg_video_rate_control, + }, + { + .id = V4L2_CID_MPEG_VIDC_COMPRESSION_QUALITY, + .name = "Compression quality", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MIN_FRAME_QUALITY, + .maximum = MAX_FRAME_QUALITY, + .default_value = DEFAULT_FRAME_QUALITY, + .step = FRAME_QUALITY_STEP, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_IMG_GRID_SIZE, + .name = "Image grid size", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = HEIC_GRID_DIMENSION, + .default_value = 0, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE, + .name = "Frame Rate", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = (MINIMUM_FPS << 16), + .maximum = (MAXIMUM_FPS << 16), + .default_value = (DEFAULT_FPS << 16), + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_BITRATE, + .name = "Bit Rate", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MIN_BIT_RATE, + .maximum = MAX_BIT_RATE, + .default_value = DEFAULT_BIT_RATE, + .step = BIT_RATE_STEP, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE, + .name = "Entropy Mode", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC, + .maximum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC, + .default_value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC) | + (1 << V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC) + ), + }, + { + .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE, + .name = "H264 Profile", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, + .maximum = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH, + .default_value = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH) + ), + .qmenu = mpeg_video_h264_profile, + }, + { + .id = V4L2_CID_MPEG_VIDEO_H264_LEVEL, + .name = "H264 Level", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + .maximum = V4L2_MPEG_VIDEO_H264_LEVEL_6_2, + .default_value = V4L2_MPEG_VIDEO_H264_LEVEL_6_2, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_1_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_1B) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_1_1) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_1_2) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_1_3) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_1) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_2) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_1) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_2) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_1) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_2) + ), + .qmenu = mpeg_video_h264_level, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE, + .name = "HEVC Profile", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN, + .maximum = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10, + .default_value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN) | + (1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE) | + (1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10) + ), + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL, + .name = "HEVC Level", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + .maximum = V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2, + .default_value = + V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_1) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_2) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_3) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_4) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_5) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1) | + (1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2) + ), + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_TIER, + .name = "HEVC Tier", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_HEVC_TIER_MAIN, + .maximum = V4L2_MPEG_VIDEO_HEVC_TIER_HIGH, + .default_value = V4L2_MPEG_VIDEO_HEVC_TIER_HIGH, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) | + (1 << V4L2_MPEG_VIDEO_HEVC_TIER_HIGH) + ), + .qmenu = NULL, + }, + { + .id = V4L2_CID_ROTATE, + .name = "Rotation", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = 270, + .default_value = 0, + .step = 90, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE, + .name = "Slice Mode", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE, + .maximum = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES, + .default_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE) | + (1 << V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB) | + (1 << V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES) + ), + }, + { + .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES, + .name = "Slice Byte Size", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MIN_SLICE_BYTE_SIZE, + .maximum = MAX_SLICE_BYTE_SIZE, + .default_value = MIN_SLICE_BYTE_SIZE, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, + .name = "Slice MB Size", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 1, + .maximum = MAX_SLICE_MB_SIZE, + .default_value = 1, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM, + .name = "Random Intra Refresh MBs", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = MAX_INTRA_REFRESH_MBS, + .default_value = 0, + .step = 1, + .menu_skip_mask = 0, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB, + .name = "Cyclic Intra Refresh MBs", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = MAX_INTRA_REFRESH_MBS, + .default_value = 0, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, + .name = "H.264 Loop Filter Alpha Offset", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = -6, + .maximum = 6, + .default_value = 0, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, + .name = "H.264 Loop Filter Beta Offset", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = -6, + .maximum = 6, + .default_value = 0, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE, + .name = "H.264 Loop Filter Mode", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED, + .maximum = DB_DISABLE_SLICE_BOUNDARY, + .default_value = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED) | + (1 << V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED) | + (1 << DB_DISABLE_SLICE_BOUNDARY) + ), + }, + { + .id = V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR, + .name = "Prepend SPS/PPS to IDR", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE, + .name = "Secure mode", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA, + .name = "Extradata Type", + .type = V4L2_CTRL_TYPE_BITMASK, + .minimum = EXTRADATA_NONE, + .maximum = EXTRADATA_ADVANCED | EXTRADATA_ENC_INPUT_ROI | + EXTRADATA_ENC_INPUT_HDR10PLUS | + EXTRADATA_ENC_INPUT_CROP | + EXTRADATA_ENC_INPUT_CVP | EXTRADATA_ENC_FRAME_QP, + .default_value = EXTRADATA_NONE, + .menu_skip_mask = 0, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_VUI_TIMING_INFO, + .name = "H264 VUI Timing Info", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER, + .name = "AU Delimiter", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .step = 1, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME, + .name = "H264 Use LTR", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = ((1 << MAX_LTR_FRAME_COUNT) - 1), + .default_value = 0, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT, + .name = "Ltr Count", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = MAX_LTR_FRAME_COUNT, + .default_value = 0, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_MARKLTRFRAME, + .name = "H264 Mark LTR", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = (MAX_LTR_FRAME_COUNT - 1), + .default_value = 0, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER, + .name = "Set Hier layers", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = MAX_HIER_CODING_LAYER, + .default_value = 0, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER, + .name = "Set Hier max layers", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_0, + .maximum = V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_6, + .default_value = + V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_0, + .step = 1, + .menu_skip_mask = 0, + }, + { + .id = V4L2_CID_MPEG_VIDC_VENC_COMPLEXITY, + .name = "Encoder complexity", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = 100, + .default_value = 100, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE, + .name = "Set Hier coding type", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B, + .maximum = V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P, + .default_value = V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B) | + (1 << V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P) + ), + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP, + .name = "Set layer0 QP", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = 51, + .default_value = 51, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP, + .name = "Set layer1 QP", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = 51, + .default_value = 51, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP, + .name = "Set layer2 QP", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = 51, + .default_value = 51, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP, + .name = "Set layer3 QP", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = 51, + .default_value = 51, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP, + .name = "Set layer4 QP", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = 51, + .default_value = 51, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP, + .name = "Set layer5 QP", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = 51, + .default_value = 51, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR, + .name = "Set layer0 BR", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MIN_BIT_RATE_RATIO, + .maximum = MAX_BIT_RATE_RATIO, + .default_value = MIN_BIT_RATE_RATIO, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR, + .name = "Set layer1 BR", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MIN_BIT_RATE_RATIO, + .maximum = MAX_BIT_RATE_RATIO, + .default_value = MIN_BIT_RATE_RATIO, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR, + .name = "Set layer2 BR", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MIN_BIT_RATE_RATIO, + .maximum = MAX_BIT_RATE_RATIO, + .default_value = MIN_BIT_RATE_RATIO, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR, + .name = "Set layer3 BR", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MIN_BIT_RATE_RATIO, + .maximum = MAX_BIT_RATE_RATIO, + .default_value = MIN_BIT_RATE_RATIO, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR, + .name = "Set layer4 BR", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MIN_BIT_RATE_RATIO, + .maximum = MAX_BIT_RATE_RATIO, + .default_value = MIN_BIT_RATE_RATIO, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR, + .name = "Set layer5 BR", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MIN_BIT_RATE_RATIO, + .maximum = MAX_BIT_RATE_RATIO, + .default_value = MIN_BIT_RATE_RATIO, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_BASELAYER_ID, + .name = "Set Base Layer Priority ID for Hier-P", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = MAX_BASE_LAYER_PRIORITY_ID, + .default_value = 0, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH, + .name = "SAR Width", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = 7680, + .default_value = 0, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT, + .name = "SAR Height", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = 7680, + .default_value = 0, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY, + .name = "Session Priority", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_ENABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE, + .name = "Encoder Operating rate", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = (DEFAULT_FPS << 16),/* Power Vote min fps */ + .maximum = INT_MAX, + .default_value = (DEFAULT_FPS << 16), + .step = 1, + .menu_skip_mask = 0, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC, + .name = "Set VPE Color space conversion coefficients", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE, + .name = "Low Latency Mode", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_BLUR_DIMENSIONS, + .name = "Set Blur width/height", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = S32_MAX, + .default_value = 0, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM, + .name = "Transform 8x8", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_ENABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE, + .name = "Set Color space", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MSM_VIDC_RESERVED_1, + .maximum = MSM_VIDC_BT2020, + .default_value = MSM_VIDC_RESERVED_1, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE, + .name = "Set Color space range", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS, + .name = "Set Color space transfer characterstics", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MSM_VIDC_TRANSFER_BT709_5, + .maximum = MSM_VIDC_TRANSFER_HLG, + .default_value = MSM_VIDC_TRANSFER_601_6_625, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_MATRIX_COEFFS, + .name = "Set Color space matrix coefficients", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MSM_VIDC_MATRIX_BT_709_5, + .maximum = MSM_VIDC_MATRIX_BT_2020_CONST, + .default_value = MSM_VIDC_MATRIX_601_6_625, + .step = 1, + .qmenu = NULL, + }, + { + .id = V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, + .name = "Frame Rate based Rate Control", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = 0, + .maximum = 1, + .default_value = 1, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VENC_RC_TIMESTAMP_DISABLE, + .name = "RC Timestamp disable", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC_CUSTOM_MATRIX, + .name = "Enable/Disable CSC Custom Matrix", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_HFLIP, + .name = "Enable/Disable Horizontal Flip", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_VFLIP, + .name = "Enable/Disable Vertical Flip", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VENC_HDR_INFO, + .name = "HDR PQ information", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = INT_MIN, + .maximum = INT_MAX, + .default_value = 0, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD, + .name = "NAL Format", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_MPEG_VIDEO_HEVC_SIZE_0, + .maximum = V4L2_MPEG_VIDEO_HEVC_SIZE_4, + .default_value = V4L2_MPEG_VIDEO_HEVC_SIZE_0, + .menu_skip_mask = ~( + (1 << V4L2_MPEG_VIDEO_HEVC_SIZE_0) | + (1 << V4L2_MPEG_VIDEO_HEVC_SIZE_4) + ), + .qmenu = mpeg_video_stream_format, + }, + { + .id = V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET, + .name = "Chroma QP Index Offset", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = MIN_CHROMA_QP_OFFSET, + .maximum = INT_MAX, + .default_value = INT_MAX, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VENC_NATIVE_RECORDER, + .name = "Enable/Disable Native Recorder", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_MSM_VIDC_DISABLE, + .maximum = V4L2_MPEG_MSM_VIDC_ENABLE, + .default_value = V4L2_MPEG_MSM_VIDC_DISABLE, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VENC_BITRATE_SAVINGS, + .name = "Enable/Disable bitrate savings", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = V4L2_MPEG_VIDC_VIDEO_BRS_DISABLE, + .maximum = V4L2_MPEG_VIDC_VIDEO_BRS_ENABLE_ALL, + .default_value = V4L2_MPEG_VIDC_VIDEO_BRS_ENABLE_ALL, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VENC_BITRATE_BOOST, + .name = "Bitrate boost margin", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = 100, + .default_value = 25, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VENC_QPRANGE_BOOST, + .name = "Bitrate boost QP range", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = MAX_QPRANGE_BOOST, + .default_value = 0, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDEO_VBV_DELAY, + .name = "Set Vbv Delay", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = 1000, + .default_value = 0, + .step = 500, + }, + { + .id = V4L2_CID_MPEG_VIDC_SUPERFRAME, + .name = "Superframe", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = 0, + .maximum = VIDC_SUPERFRAME_MAX, + .default_value = 0, + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_CAPTURE_FRAME_RATE, + .name = "Capture Frame Rate", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = (MINIMUM_FPS << 16), + .maximum = (MAXIMUM_FPS << 16), + .default_value = (DEFAULT_FPS << 16), + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_CVP_FRAME_RATE, + .name = "CVP Frame Rate", + .type = V4L2_CTRL_TYPE_INTEGER, + .minimum = (MINIMUM_FPS << 16), + .maximum = (MAXIMUM_FPS << 16), + .default_value = (DEFAULT_FPS << 16), + .step = 1, + }, + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE, + .name = "ROI Type", + .type = V4L2_CTRL_TYPE_MENU, + .minimum = V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_NONE, + .maximum = V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_2BYTE, + .default_value = V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_NONE, + .menu_skip_mask = ~( + (1 << V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_NONE) | + (1 << V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_2BIT) | + (1 << V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_2BYTE) + ), + .qmenu = roi_map_type, + }, +}; + +#define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls) + +static struct msm_vidc_format_desc venc_input_formats[] = { + { + .name = "YCbCr Semiplanar 4:2:0", + .description = "Y/CbCr 4:2:0", + .fourcc = V4L2_PIX_FMT_NV12, + }, + { + .name = "UBWC YCbCr Semiplanar 4:2:0", + .description = "UBWC Y/CbCr 4:2:0", + .fourcc = V4L2_PIX_FMT_NV12_UBWC, + }, + { + .name = "YCrCb Semiplanar 4:2:0", + .description = "Y/CrCb 4:2:0", + .fourcc = V4L2_PIX_FMT_NV21, + }, + { + .name = "TP10 UBWC 4:2:0", + .description = "TP10 UBWC 4:2:0", + .fourcc = V4L2_PIX_FMT_NV12_TP10_UBWC, + }, + { + .name = "YCbCr Semiplanar 4:2:0 10bit", + .description = "Y/CbCr 4:2:0 10bit", + .fourcc = V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS, + }, + { + .name = "YCbCr Semiplanar 4:2:0 128 aligned", + .description = "Y/CbCr 4:2:0 128 aligned", + .fourcc = V4L2_PIX_FMT_NV12_128, + }, + { + .name = "YCbCr Semiplanar 4:2:0 512 aligned", + .description = "Y/CbCr 4:2:0 512 aligned", + .fourcc = V4L2_PIX_FMT_NV12_512, + }, + { + .name = "32bit RGBA UBWC 8:8:8:8", + .description = "32-bit RGBA UBWC 8:8:8:8", + .fourcc = V4L2_PIX_FMT_RGBA8888_UBWC, + }, +}; + +static struct msm_vidc_format_desc venc_output_formats[] = { + { + .name = "H264", + .description = "H264 compressed format", + .fourcc = V4L2_PIX_FMT_H264, + }, + { + .name = "HEVC", + .description = "HEVC compressed format", + .fourcc = V4L2_PIX_FMT_HEVC, + }, +}; + +struct msm_vidc_format_constraint enc_pix_format_constraints[] = { + { + .fourcc = V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS, + .num_planes = 2, + .y_max_stride = 8192, + .y_buffer_alignment = 256, + .uv_max_stride = 8192, + .uv_buffer_alignment = 256, + }, + { + .fourcc = V4L2_PIX_FMT_NV12_128, + .num_planes = 2, + .y_max_stride = 8192, + .y_buffer_alignment = 128, + .uv_max_stride = 8192, + .uv_buffer_alignment = 32, + }, + { + .fourcc = V4L2_PIX_FMT_NV12_512, + .num_planes = 2, + .y_max_stride = 16384, + .y_buffer_alignment = 512, + .uv_max_stride = 16384, + .uv_buffer_alignment = 256, + }, + { + .fourcc = V4L2_PIX_FMT_NV12, + .num_planes = 2, + .y_max_stride = 16384, + .y_buffer_alignment = 512, + .uv_max_stride = 16384, + .uv_buffer_alignment = 256, + }, + { + .fourcc = V4L2_PIX_FMT_NV21, + .num_planes = 2, + .y_max_stride = 8192, + .y_buffer_alignment = 512, + .uv_max_stride = 8192, + .uv_buffer_alignment = 256, + }, +}; + +u32 v4l2_to_hfi_flip(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *hflip = NULL; + struct v4l2_ctrl *vflip = NULL; + u32 flip = HFI_FLIP_NONE; + + hflip = get_ctrl(inst, V4L2_CID_HFLIP); + vflip = get_ctrl(inst, V4L2_CID_VFLIP); + + if ((hflip->val == V4L2_MPEG_MSM_VIDC_ENABLE) && + (vflip->val == V4L2_MPEG_MSM_VIDC_ENABLE)) + flip = HFI_FLIP_HORIZONTAL | HFI_FLIP_VERTICAL; + else if (hflip->val == V4L2_MPEG_MSM_VIDC_ENABLE) + flip = HFI_FLIP_HORIZONTAL; + else if (vflip->val == V4L2_MPEG_MSM_VIDC_ENABLE) + flip = HFI_FLIP_VERTICAL; + + return flip; +} + +static int msm_venc_set_csc(struct msm_vidc_inst *inst, + u32 color_primaries, u32 custom_matrix); + +int msm_venc_inst_init(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct msm_vidc_format_desc *fmt_desc = NULL; + struct v4l2_format *f = NULL; + uint32_t vpu; + + if (!inst) { + d_vpr_e("Invalid input = %pK\n", inst); + return -EINVAL; + } + vpu = inst->core->platform_data->vpu_ver; + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + f->type = OUTPUT_MPLANE; + f->fmt.pix_mp.height = DEFAULT_HEIGHT; + f->fmt.pix_mp.width = DEFAULT_WIDTH; + f->fmt.pix_mp.pixelformat = V4L2_PIX_FMT_H264; + f->fmt.pix_mp.num_planes = 1; + f->fmt.pix_mp.plane_fmt[0].sizeimage = + msm_vidc_calculate_enc_output_frame_size(inst); + fmt_desc = msm_comm_get_pixel_fmt_fourcc(venc_output_formats, + ARRAY_SIZE(venc_output_formats), + f->fmt.pix_mp.pixelformat, inst->sid); + if (!fmt_desc) { + s_vpr_e(inst->sid, "Invalid fmt set : %x\n", + f->fmt.pix_mp.pixelformat); + return -EINVAL; + } + strlcpy(inst->fmts[OUTPUT_PORT].name, fmt_desc->name, + sizeof(inst->fmts[OUTPUT_PORT].name)); + strlcpy(inst->fmts[OUTPUT_PORT].description, fmt_desc->description, + sizeof(inst->fmts[OUTPUT_PORT].description)); + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + f->type = INPUT_MPLANE; + f->fmt.pix_mp.height = DEFAULT_HEIGHT; + f->fmt.pix_mp.width = DEFAULT_WIDTH; + f->fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12_UBWC; + f->fmt.pix_mp.num_planes = 1; + if (vpu == VPU_VERSION_IRIS2) + f->fmt.pix_mp.num_planes = 2; + f->fmt.pix_mp.plane_fmt[0].sizeimage = + msm_vidc_calculate_enc_input_frame_size(inst); + f->fmt.pix_mp.plane_fmt[1].sizeimage = + msm_vidc_calculate_enc_input_extra_size(inst); + fmt_desc = msm_comm_get_pixel_fmt_fourcc(venc_input_formats, + ARRAY_SIZE(venc_input_formats), f->fmt.pix_mp.pixelformat, + inst->sid); + if (!fmt_desc) { + s_vpr_e(inst->sid, "Invalid fmt set : %x\n", + f->fmt.pix_mp.pixelformat); + return -EINVAL; + } + strlcpy(inst->fmts[INPUT_PORT].name, fmt_desc->name, + sizeof(inst->fmts[INPUT_PORT].name)); + strlcpy(inst->fmts[INPUT_PORT].description, fmt_desc->description, + sizeof(inst->fmts[INPUT_PORT].description)); + inst->prop.extradata_ctrls = EXTRADATA_NONE; + inst->buffer_mode_set[INPUT_PORT] = HAL_BUFFER_MODE_DYNAMIC; + inst->buffer_mode_set[OUTPUT_PORT] = HAL_BUFFER_MODE_STATIC; + inst->clk_data.frame_rate = (DEFAULT_FPS << 16); + + inst->clk_data.operating_rate = (DEFAULT_FPS << 16); + inst->clk_data.is_legacy_cbr = false; + + inst->buff_req.buffer[1].buffer_type = HAL_BUFFER_INPUT; + inst->buff_req.buffer[1].buffer_count_min_host = + inst->buff_req.buffer[1].buffer_count_actual = + MIN_NUM_ENC_OUTPUT_BUFFERS; + inst->buff_req.buffer[2].buffer_type = HAL_BUFFER_OUTPUT; + inst->buff_req.buffer[2].buffer_count_min_host = + inst->buff_req.buffer[2].buffer_count_actual = + MIN_NUM_ENC_CAPTURE_BUFFERS; + inst->buff_req.buffer[3].buffer_type = HAL_BUFFER_OUTPUT2; + inst->buff_req.buffer[3].buffer_count_min_host = + inst->buff_req.buffer[3].buffer_count_actual = + MIN_NUM_ENC_CAPTURE_BUFFERS; + inst->buff_req.buffer[4].buffer_type = HAL_BUFFER_EXTRADATA_INPUT; + inst->buff_req.buffer[5].buffer_type = HAL_BUFFER_EXTRADATA_OUTPUT; + inst->buff_req.buffer[6].buffer_type = HAL_BUFFER_EXTRADATA_OUTPUT2; + inst->buff_req.buffer[7].buffer_type = HAL_BUFFER_INTERNAL_SCRATCH; + inst->buff_req.buffer[8].buffer_type = HAL_BUFFER_INTERNAL_SCRATCH_1; + inst->buff_req.buffer[9].buffer_type = HAL_BUFFER_INTERNAL_SCRATCH_2; + inst->buff_req.buffer[10].buffer_type = HAL_BUFFER_INTERNAL_PERSIST; + inst->buff_req.buffer[11].buffer_type = HAL_BUFFER_INTERNAL_PERSIST_1; + inst->buff_req.buffer[12].buffer_type = HAL_BUFFER_INTERNAL_CMD_QUEUE; + inst->buff_req.buffer[13].buffer_type = HAL_BUFFER_INTERNAL_RECON; + msm_vidc_init_buffer_size_calculators(inst); + inst->static_rotation_flip_enabled = false; + inst->external_blur = false; + inst->hdr10_sei_enabled = false; + return rc; +} + +int msm_venc_enum_fmt(struct msm_vidc_inst *inst, struct v4l2_fmtdesc *f) +{ + const struct msm_vidc_format_desc *fmt_desc = NULL; + int rc = 0; + + if (!inst || !f) { + d_vpr_e("Invalid input, inst = %pK, f = %pK\n", inst, f); + return -EINVAL; + } + if (f->type == OUTPUT_MPLANE) { + fmt_desc = msm_comm_get_pixel_fmt_index(venc_output_formats, + ARRAY_SIZE(venc_output_formats), f->index, inst->sid); + f->flags = V4L2_FMT_FLAG_COMPRESSED; + } else if (f->type == INPUT_MPLANE) { + fmt_desc = msm_comm_get_pixel_fmt_index(venc_input_formats, + ARRAY_SIZE(venc_input_formats), f->index, inst->sid); + } + + memset(f->reserved, 0, sizeof(f->reserved)); + if (fmt_desc) { + strlcpy(f->description, fmt_desc->description, + sizeof(f->description)); + f->pixelformat = fmt_desc->fourcc; + } else { + s_vpr_h(inst->sid, "No more formats found\n"); + rc = -EINVAL; + } + return rc; +} + +static int msm_venc_set_csc(struct msm_vidc_inst *inst, + u32 color_primaries, u32 custom_matrix) +{ + int rc = 0; + int count = 0; + struct hfi_vpe_color_space_conversion vpe_csc; + struct msm_vidc_platform_resources *resources; + u32 *bias_coeff = NULL; + u32 *csc_limit = NULL; + u32 *csc_matrix = NULL; + struct hfi_device *hdev; + + hdev = inst->core->device; + resources = &(inst->core->resources); + bias_coeff = + resources->csc_coeff_data->vpe_csc_custom_bias_coeff; + csc_limit = + resources->csc_coeff_data->vpe_csc_custom_limit_coeff; + csc_matrix = + resources->csc_coeff_data->vpe_csc_custom_matrix_coeff; + + vpe_csc.input_color_primaries = color_primaries; + /* Custom bias, matrix & limit */ + vpe_csc.custom_matrix_enabled = custom_matrix ? 7 : 0; + + if (vpe_csc.custom_matrix_enabled && bias_coeff != NULL + && csc_limit != NULL && csc_matrix != NULL) { + while (count < HAL_MAX_MATRIX_COEFFS) { + if (count < HAL_MAX_BIAS_COEFFS) + vpe_csc.csc_bias[count] = + bias_coeff[count]; + if (count < HAL_MAX_LIMIT_COEFFS) + vpe_csc.csc_limit[count] = + csc_limit[count]; + vpe_csc.csc_matrix[count] = + csc_matrix[count]; + count = count + 1; + } + } + + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION, + &vpe_csc, sizeof(vpe_csc)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + return rc; +} + +int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) +{ + int rc = 0; + struct msm_vidc_format *fmt = NULL; + struct msm_vidc_format_desc *fmt_desc = NULL; + struct v4l2_pix_format_mplane *mplane = NULL; + u32 color_format; + + if (!inst || !f) { + d_vpr_e("Invalid input, inst = %pK, format = %pK\n", inst, f); + return -EINVAL; + } + + /* + * First update inst format with new width/height/format + * Recalculate sizes/strides etc + * Perform necessary checks to continue with session + * Copy recalculated info into user format + */ + if (f->type == OUTPUT_MPLANE) { + fmt = &inst->fmts[OUTPUT_PORT]; + fmt_desc = msm_comm_get_pixel_fmt_fourcc(venc_output_formats, + ARRAY_SIZE(venc_output_formats), + f->fmt.pix_mp.pixelformat, inst->sid); + if (!fmt_desc) { + s_vpr_e(inst->sid, "Invalid fmt set : %x\n", + f->fmt.pix_mp.pixelformat); + return -EINVAL; + } + strlcpy(fmt->name, fmt_desc->name, sizeof(fmt->name)); + strlcpy(fmt->description, fmt_desc->description, + sizeof(fmt->description)); + + fmt->v4l2_fmt.type = f->type; + mplane = &fmt->v4l2_fmt.fmt.pix_mp; + mplane->width = f->fmt.pix_mp.width; + mplane->height = f->fmt.pix_mp.height; + mplane->pixelformat = f->fmt.pix_mp.pixelformat; + + if (!inst->profile) { + rc = msm_venc_set_default_profile(inst); + if (rc) { + s_vpr_e(inst->sid, + "%s: Failed to set default profile type\n", + __func__); + goto exit; + } + } + + rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE); + if (rc) { + s_vpr_e(inst->sid, "Failed to open instance\n"); + goto exit; + } + + mplane->plane_fmt[0].sizeimage = + msm_vidc_calculate_enc_output_frame_size(inst); + if (mplane->num_planes > 1) + mplane->plane_fmt[1].sizeimage = + msm_vidc_calculate_enc_output_extra_size(inst); + + rc = msm_vidc_check_session_supported(inst); + if (rc) { + s_vpr_e(inst->sid, + "%s: session not supported\n", __func__); + goto exit; + } + update_log_ctxt(inst->sid, inst->session_type, + mplane->pixelformat); + memcpy(f, &fmt->v4l2_fmt, sizeof(struct v4l2_format)); + } else if (f->type == INPUT_MPLANE) { + fmt = &inst->fmts[INPUT_PORT]; + fmt_desc = msm_comm_get_pixel_fmt_fourcc(venc_input_formats, + ARRAY_SIZE(venc_input_formats), + f->fmt.pix_mp.pixelformat, inst->sid); + if (!fmt_desc) { + s_vpr_e(inst->sid, "Invalid fmt set : %x\n", + f->fmt.pix_mp.pixelformat); + return -EINVAL; + } + strlcpy(fmt->name, fmt_desc->name, sizeof(fmt->name)); + strlcpy(fmt->description, fmt_desc->description, + sizeof(fmt->description)); + + inst->clk_data.opb_fourcc = f->fmt.pix_mp.pixelformat; + + fmt->v4l2_fmt.type = f->type; + mplane = &fmt->v4l2_fmt.fmt.pix_mp; + mplane->width = f->fmt.pix_mp.width; + mplane->height = f->fmt.pix_mp.height; + mplane->pixelformat = f->fmt.pix_mp.pixelformat; + mplane->plane_fmt[0].sizeimage = + msm_vidc_calculate_enc_input_frame_size(inst); + if (mplane->num_planes > 1) + mplane->plane_fmt[1].sizeimage = + msm_vidc_calculate_enc_input_extra_size(inst); + color_format = msm_comm_convert_color_fmt( + f->fmt.pix_mp.pixelformat, inst->sid); + mplane->plane_fmt[0].bytesperline = + VENUS_Y_STRIDE(color_format, f->fmt.pix_mp.width); + mplane->plane_fmt[0].reserved[0] = + VENUS_Y_SCANLINES(color_format, f->fmt.pix_mp.height); + inst->bit_depth = MSM_VIDC_BIT_DEPTH_8; + if ((f->fmt.pix_mp.pixelformat == + V4L2_PIX_FMT_NV12_TP10_UBWC) || + (f->fmt.pix_mp.pixelformat == + V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS)) { + inst->bit_depth = MSM_VIDC_BIT_DEPTH_10; + } + + rc = msm_vidc_calculate_buffer_counts(inst); + if (rc) { + s_vpr_e(inst->sid, + "%s failed to calculate buffer count\n", + __func__); + return rc; + } + + rc = msm_vidc_check_session_supported(inst); + if (rc) { + s_vpr_e(inst->sid, + "%s: session not supported\n", __func__); + goto exit; + } + + memcpy(f, &fmt->v4l2_fmt, sizeof(struct v4l2_format)); + } else { + s_vpr_e(inst->sid, "%s: Unsupported buf type: %d\n", + __func__, f->type); + rc = -EINVAL; + goto exit; + } +exit: + return rc; +} + +int msm_venc_set_default_profile(struct msm_vidc_inst *inst) +{ + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + if (get_v4l2_codec(inst) == V4L2_PIX_FMT_HEVC) + inst->profile = HFI_HEVC_PROFILE_MAIN; + else if (get_v4l2_codec(inst) == V4L2_PIX_FMT_H264) + inst->profile = HFI_H264_PROFILE_HIGH; + else + s_vpr_e(inst->sid, "%s: Invalid codec type %#x\n", + __func__, get_v4l2_codec(inst)); + return 0; +} + +int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) +{ + struct v4l2_format *fmt; + + if (f->type == OUTPUT_MPLANE) { + fmt = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + fmt->fmt.pix_mp.plane_fmt[0].sizeimage = + msm_vidc_calculate_enc_output_frame_size(inst); + if (fmt->fmt.pix_mp.num_planes > 1) + fmt->fmt.pix_mp.plane_fmt[1].sizeimage = + msm_vidc_calculate_enc_output_extra_size(inst); + memcpy(f, fmt, sizeof(struct v4l2_format)); + } else if (f->type == INPUT_MPLANE) { + fmt = &inst->fmts[INPUT_PORT].v4l2_fmt; + fmt->fmt.pix_mp.plane_fmt[0].sizeimage = + msm_vidc_calculate_enc_input_frame_size(inst); + if (fmt->fmt.pix_mp.num_planes > 1) { + fmt->fmt.pix_mp.plane_fmt[1].sizeimage = + msm_vidc_calculate_enc_input_extra_size(inst); + } + memcpy(f, fmt, sizeof(struct v4l2_format)); + } else { + s_vpr_e(inst->sid, "%s: Unsupported buf type: %d\n", + __func__, f->type); + return -EINVAL; + } + + return 0; +} + +int msm_venc_ctrl_init(struct msm_vidc_inst *inst, + const struct v4l2_ctrl_ops *ctrl_ops) +{ + return msm_comm_ctrl_init(inst, msm_venc_ctrls, + ARRAY_SIZE(msm_venc_ctrls), ctrl_ops); +} + +static int msm_venc_resolve_rc_enable(struct msm_vidc_inst *inst, + struct v4l2_ctrl *ctrl) +{ + struct v4l2_ctrl *rc_mode; + u32 codec; + + if (!ctrl->val) { + s_vpr_h(inst->sid, "RC is not enabled. Setting RC OFF\n"); + inst->rc_type = RATE_CONTROL_OFF; + } else { + rc_mode = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE_MODE); + inst->rc_type = rc_mode->val; + } + + codec = get_v4l2_codec(inst); + if (msm_vidc_lossless_encode + && (codec == V4L2_PIX_FMT_HEVC || + codec == V4L2_PIX_FMT_H264)) { + s_vpr_h(inst->sid, + "Reset RC mode to RC_LOSSLESS for HEVC lossless encoding\n"); + inst->rc_type = RATE_CONTROL_LOSSLESS; + } + return 0; +} + +static int msm_venc_resolve_rate_control(struct msm_vidc_inst *inst, + struct v4l2_ctrl *ctrl) +{ + if (inst->rc_type == RATE_CONTROL_LOSSLESS) { + s_vpr_h(inst->sid, + "Skip RC mode when enabling lossless encoding\n"); + return 0; + } + + if (inst->rc_type == RATE_CONTROL_OFF) { + s_vpr_e(inst->sid, "RC is not enabled.\n"); + return -EINVAL; + } + + if ((ctrl->val == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ) && + get_v4l2_codec(inst) != V4L2_PIX_FMT_HEVC) { + s_vpr_e(inst->sid, "CQ supported only for HEVC\n"); + return -EINVAL; + } + inst->rc_type = ctrl->val; + return 0; +} + +static int msm_venc_update_bitrate(struct msm_vidc_inst *inst) +{ + u32 cabac_max_bitrate = 0; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + if (get_v4l2_codec(inst) == V4L2_PIX_FMT_H264) { + cabac_max_bitrate = inst->capability.cap[CAP_CABAC_BITRATE].max; + if ((inst->clk_data.bitrate > cabac_max_bitrate) && + (inst->entropy_mode == HFI_H264_ENTROPY_CABAC)) { + s_vpr_h(inst->sid, + "%s: update bitrate %u to max allowed cabac bitrate %u\n", + __func__, inst->clk_data.bitrate, + cabac_max_bitrate); + inst->clk_data.bitrate = cabac_max_bitrate; + } + } + return 0; +} + +int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) +{ + int rc = 0; + struct msm_vidc_mastering_display_colour_sei_payload *mdisp_sei = NULL; + struct msm_vidc_content_light_level_sei_payload *cll_sei = NULL; + u32 i_qp_min, i_qp_max, p_qp_min, p_qp_max, b_qp_min, b_qp_max; + struct v4l2_format *f; + u32 codec; + u32 sid; + + if (!inst || !inst->core || !inst->core->device || !ctrl) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + mdisp_sei = &(inst->hdr10_sei_params.disp_color_sei); + cll_sei = &(inst->hdr10_sei_params.cll_sei); + codec = get_v4l2_codec(inst); + sid = inst->sid; + + s_vpr_h(sid, "%s: name %s, id 0x%x value %d\n", + __func__, ctrl->name, ctrl->id, ctrl->val); + + switch (ctrl->id) { + case V4L2_CID_MPEG_VIDEO_GOP_SIZE: + if (inst->state == MSM_VIDC_START_DONE) { + if (inst->all_intra) { + s_vpr_h(sid, + "%s: ignore dynamic gop size for all intra\n", + __func__); + break; + } + rc = msm_venc_set_intra_period(inst); + if (rc) + s_vpr_e(sid, "%s: set intra period failed\n", + __func__); + } + break; + case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: + if (inst->state == MSM_VIDC_START_DONE) { + rc = msm_venc_set_request_keyframe(inst); + if (rc) + s_vpr_e(sid, "%s: set bitrate failed\n", + __func__); + } + break; + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + { + rc = msm_venc_resolve_rate_control(inst, ctrl); + if (rc) + s_vpr_e(sid, "%s: set bitrate mode failed\n", __func__); + if (inst->state < MSM_VIDC_LOAD_RESOURCES) + msm_vidc_calculate_buffer_counts(inst); + break; + } + case V4L2_CID_MPEG_VIDEO_BITRATE: + inst->clk_data.bitrate = ctrl->val; + if (inst->state == MSM_VIDC_START_DONE) { + rc = msm_venc_update_bitrate(inst); + if (rc) + s_vpr_e(sid, "%s: Update bitrate failed\n", + __func__); + rc = msm_venc_set_bitrate(inst); + if (rc) + s_vpr_e(sid, "%s: set bitrate failed\n", + __func__); + } + break; + case V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE: + inst->clk_data.frame_rate = ctrl->val; + /* For HEIC image encode, set fps to 1 */ + if (is_grid_session(inst)) { + s_vpr_h(sid, "%s: set fps to 1 for HEIC\n", + __func__); + inst->clk_data.frame_rate = 1 << 16; + } + if (inst->state < MSM_VIDC_LOAD_RESOURCES) + msm_vidc_calculate_buffer_counts(inst); + if (inst->state == MSM_VIDC_START_DONE) { + rc = msm_venc_set_frame_rate(inst, true); + if (rc) + s_vpr_e(sid, "%s: set frame rate failed\n", + __func__); + msm_comm_release_timestamps(inst); + } + break; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: + if (codec != V4L2_PIX_FMT_HEVC && codec != V4L2_PIX_FMT_H264) { + s_vpr_e(sid, + "Slice mode not supported for encoder %#x\n", + codec); + rc = -ENOTSUPP; + } + break; + case V4L2_CID_MPEG_VIDC_VIDEO_SECURE: + inst->flags &= ~VIDC_SECURE; + if (ctrl->val) + inst->flags |= VIDC_SECURE; + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + f->fmt.pix_mp.num_planes = 1; + s_vpr_h(sid, "%s: num planes %d for secure sessions\n", + __func__, f->fmt.pix_mp.num_planes); + break; + case V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME: + if (inst->state == MSM_VIDC_START_DONE) { + rc = msm_venc_set_ltr_useframe(inst); + if (rc) + s_vpr_e(sid, "%s: ltr useframe failed\n", + __func__); + } + break; + case V4L2_CID_MPEG_VIDC_VIDEO_MARKLTRFRAME: + if (inst->state == MSM_VIDC_START_DONE) { + rc = msm_venc_set_ltr_markframe(inst); + if (rc) + s_vpr_e(sid, "%s: ltr markframe failed\n", + __func__); + } + break; + case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE: + inst->flags &= ~VIDC_TURBO; + if (ctrl->val == INT_MAX) + inst->flags |= VIDC_TURBO; + else + inst->clk_data.operating_rate = ctrl->val; + /* For HEIC image encode, set operating rate to 1 */ + if (is_grid_session(inst)) { + s_vpr_h(sid, "%s: set operating rate to 1 for HEIC\n", + __func__); + inst->clk_data.operating_rate = 1 << 16; + } + if (inst->state < MSM_VIDC_LOAD_RESOURCES) + msm_vidc_calculate_buffer_counts(inst); + if (inst->state == MSM_VIDC_START_DONE) { + rc = msm_venc_set_operating_rate(inst); + if (rc) + s_vpr_e(sid, "%s: set operating rate failed\n", + __func__); + } + break; + case V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE: + inst->clk_data.low_latency_mode = !!ctrl->val; + break; + case V4L2_CID_MPEG_VIDC_VENC_HDR_INFO: { + u32 info_type = ((u32)ctrl->val >> 28) & 0xF; + u32 val = (ctrl->val & 0xFFFFFFF); + + inst->hdr10_sei_enabled = true; + s_vpr_h(sid, "Ctrl:%d, HDR Info with value %u (%#X)", + info_type, val, ctrl->val); + switch (info_type) { + case MSM_VIDC_RGB_PRIMARY_00: + mdisp_sei->nDisplayPrimariesX[0] = val; + break; + case MSM_VIDC_RGB_PRIMARY_01: + mdisp_sei->nDisplayPrimariesY[0] = val; + break; + case MSM_VIDC_RGB_PRIMARY_10: + mdisp_sei->nDisplayPrimariesX[1] = val; + break; + case MSM_VIDC_RGB_PRIMARY_11: + mdisp_sei->nDisplayPrimariesY[1] = val; + break; + case MSM_VIDC_RGB_PRIMARY_20: + mdisp_sei->nDisplayPrimariesX[2] = val; + break; + case MSM_VIDC_RGB_PRIMARY_21: + mdisp_sei->nDisplayPrimariesY[2] = val; + break; + case MSM_VIDC_WHITEPOINT_X: + mdisp_sei->nWhitePointX = val; + break; + case MSM_VIDC_WHITEPOINT_Y: + mdisp_sei->nWhitePointY = val; + break; + case MSM_VIDC_MAX_DISP_LUM: + mdisp_sei->nMaxDisplayMasteringLuminance = val; + break; + case MSM_VIDC_MIN_DISP_LUM: + mdisp_sei->nMinDisplayMasteringLuminance = val; + break; + case MSM_VIDC_RGB_MAX_CLL: + cll_sei->nMaxContentLight = val; + break; + case MSM_VIDC_RGB_MAX_FLL: + cll_sei->nMaxPicAverageLight = val; + break; + default: + s_vpr_e(sid, + "Unknown Ctrl:%d, not part of HDR Info with value %u", + info_type, val); + } + } + break; + case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA: + if (ctrl->val == EXTRADATA_NONE) + inst->prop.extradata_ctrls = 0; + else + inst->prop.extradata_ctrls |= ctrl->val; + + if ((inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_ROI) || + (inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_HDR10PLUS) || + (inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_CROP)) { + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + f->fmt.pix_mp.num_planes = 2; + f->fmt.pix_mp.plane_fmt[1].sizeimage = + msm_vidc_calculate_enc_input_extra_size(inst); + } + + if ((inst->prop.extradata_ctrls & EXTRADATA_ADVANCED) || + (inst->prop.extradata_ctrls & EXTRADATA_ENC_FRAME_QP)) { + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + f->fmt.pix_mp.num_planes = 2; + f->fmt.pix_mp.plane_fmt[1].sizeimage = + msm_vidc_calculate_enc_output_extra_size(inst); + } + + break; + case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: + rc = msm_venc_resolve_rc_enable(inst, ctrl); + if (rc) + s_vpr_e(sid, "%s: set rc enable failed\n", __func__); + break; + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: + inst->profile = msm_comm_v4l2_to_hfi(ctrl->id, ctrl->val, sid); + break; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: + inst->level = msm_comm_v4l2_to_hfi(ctrl->id, ctrl->val, sid); + break; + case V4L2_CID_MPEG_VIDEO_HEVC_TIER: + inst->level |= + (msm_comm_v4l2_to_hfi(ctrl->id, ctrl->val, sid) << 28); + break; + case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP: + case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP: + i_qp_min = inst->capability.cap[CAP_I_FRAME_QP].min; + i_qp_max = inst->capability.cap[CAP_I_FRAME_QP].max; + p_qp_min = inst->capability.cap[CAP_P_FRAME_QP].min; + p_qp_max = inst->capability.cap[CAP_P_FRAME_QP].max; + b_qp_min = inst->capability.cap[CAP_B_FRAME_QP].min; + b_qp_max = inst->capability.cap[CAP_B_FRAME_QP].max; + if ((ctrl->val & 0xff) < i_qp_min || + ((ctrl->val >> 8) & 0xff) < p_qp_min || + ((ctrl->val >> 16) & 0xff) < b_qp_min || + (ctrl->val & 0xff) > i_qp_max || + ((ctrl->val >> 8) & 0xff) > p_qp_max || + ((ctrl->val >> 16) & 0xff) > b_qp_max) { + s_vpr_e(sid, "Invalid QP %#x\n", ctrl->val); + return -EINVAL; + } + if (ctrl->id == V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP) + inst->client_set_ctrls |= CLIENT_SET_MIN_QP; + else + inst->client_set_ctrls |= CLIENT_SET_MAX_QP; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: + i_qp_min = inst->capability.cap[CAP_I_FRAME_QP].min; + i_qp_max = inst->capability.cap[CAP_I_FRAME_QP].max; + if (ctrl->val < i_qp_min || ctrl->val > i_qp_max) { + s_vpr_e(sid, "Invalid I QP %#x\n", ctrl->val); + return -EINVAL; + } + inst->client_set_ctrls |= CLIENT_SET_I_QP; + if (inst->state == MSM_VIDC_START_DONE) { + rc = msm_venc_set_dyn_qp(inst, ctrl); + if (rc) + s_vpr_e(sid, + "%s: setting dyn frame QP failed\n", + __func__); + } + break; + case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP: + p_qp_min = inst->capability.cap[CAP_P_FRAME_QP].min; + p_qp_max = inst->capability.cap[CAP_P_FRAME_QP].max; + if (ctrl->val < p_qp_min || ctrl->val > p_qp_max) { + s_vpr_e(sid, "Invalid P QP %#x\n", ctrl->val); + return -EINVAL; + } + inst->client_set_ctrls |= CLIENT_SET_P_QP; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP: + b_qp_min = inst->capability.cap[CAP_B_FRAME_QP].min; + b_qp_max = inst->capability.cap[CAP_B_FRAME_QP].max; + if (ctrl->val < b_qp_min || ctrl->val > b_qp_max) { + s_vpr_e(sid, "Invalid B QP %#x\n", ctrl->val); + return -EINVAL; + } + inst->client_set_ctrls |= CLIENT_SET_B_QP; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER: + if (inst->state == MSM_VIDC_START_DONE) { + rc = msm_venc_set_hp_layer(inst); + if (rc) + s_vpr_e(sid, "%s: set dyn hp layer failed\n", + __func__); + } + break; + case V4L2_CID_MPEG_VIDC_VIDEO_BASELAYER_ID: + if (inst->state == MSM_VIDC_START_DONE) { + rc = msm_venc_set_base_layer_priority_id(inst); + if (rc) + s_vpr_e(sid, "%s: set baselayer id failed\n", + __func__); + } + break; + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR: + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR: + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR: + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR: + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR: + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR: + if (inst->state == MSM_VIDC_START_DONE) { + rc = msm_venc_set_layer_bitrate(inst); + if (rc) + s_vpr_e(sid, "%s: set layer bitrate failed\n", + __func__); + } + break; + case V4L2_CID_MPEG_VIDEO_B_FRAMES: + if (inst->state == MSM_VIDC_START_DONE) { + s_vpr_e(sid, + "%s: Dynamic setting of Bframe is not supported\n", + __func__); + return -EINVAL; + } + break; + case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_DIMENSIONS: + if (inst->state < MSM_VIDC_START_DONE) { + if ((ctrl->val != MSM_VIDC_BLUR_INTERNAL) && + (ctrl->val != MSM_VIDC_BLUR_DISABLE)) { + inst->external_blur = true; + } + } else if (inst->state == MSM_VIDC_START_DONE) { + if (!inst->external_blur) { + s_vpr_e(sid, "%s: external blur not enabled", __func__); + break; + } + if (ctrl->val == MSM_VIDC_BLUR_EXTERNAL_DYNAMIC) { + s_vpr_h(sid, + "%s: external blur setting already enabled\n", + __func__); + break; + } else if (ctrl->val == MSM_VIDC_BLUR_INTERNAL) { + s_vpr_e(sid, + "%s: cannot change to internal blur config dynamically\n", + __func__); + break; + } else { + rc = msm_venc_set_blur_resolution(inst); + if (rc) + s_vpr_e(sid, + "%s: set blur resolution failed\n", + __func__); + } + } + break; + case V4L2_CID_HFLIP: + case V4L2_CID_VFLIP: + if (inst->state == MSM_VIDC_START_DONE) { + rc = msm_venc_set_dynamic_flip(inst); + if (rc) + s_vpr_e(sid, "%s: set flip failed\n", __func__); + } + break; + case V4L2_CID_MPEG_VIDC_CVP_FRAME_RATE: + if (inst->state == MSM_VIDC_START_DONE) { + rc = msm_venc_set_cvp_skipratio(inst); + if (rc) + s_vpr_e(sid, + "%s: set cvp skip ratio failed\n", + __func__); + } + break; + case V4L2_CID_MPEG_VIDC_COMPRESSION_QUALITY: + if (inst->state == MSM_VIDC_START_DONE) { + rc = msm_venc_set_frame_quality(inst); + if (rc) + s_vpr_e(sid, + "%s: set frame quality failed\n", + __func__); + } + break; + case V4L2_CID_MPEG_VIDC_IMG_GRID_SIZE: + /* For HEIC image encode, set fps to 1 */ + if (ctrl->val) { + s_vpr_h(sid, "%s: set fps to 1 for HEIC\n", + __func__); + inst->clk_data.frame_rate = 1 << 16; + s_vpr_h(sid, "%s: set operating rate to 1 for HEIC\n", + __func__); + inst->clk_data.operating_rate = 1 << 16; + } + break; + case V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE: + inst->full_range = ctrl->val; + break; + case V4L2_CID_MPEG_VIDC_VENC_BITRATE_BOOST: + inst->boost_enabled = true; + break; + case V4L2_CID_MPEG_VIDC_VENC_COMPLEXITY: + if (is_realtime_session(inst)) { + s_vpr_h(sid, "Client is setting complexity for RT session\n"); + } + break; + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: + inst->entropy_mode = msm_comm_v4l2_to_hfi( + V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE, + ctrl->val, inst->sid); + break; + case V4L2_CID_MPEG_VIDC_CAPTURE_FRAME_RATE: + case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER: + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: + case V4L2_CID_ROTATE: + case V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT: + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: + case V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER: + case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: + case V4L2_CID_MPEG_VIDC_VIDEO_VPX_ERROR_RESILIENCE: + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP: + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP: + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP: + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP: + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP: + case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP: + case V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE: + case V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS: + case V4L2_CID_MPEG_VIDC_VIDEO_MATRIX_COEFFS: + case V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC: + case V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC_CUSTOM_MATRIX: + case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: + case V4L2_CID_MPEG_VIDC_VIDEO_VUI_TIMING_INFO: + case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: + case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: + case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: + case V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY: + case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM: + case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: + case V4L2_CID_MPEG_VIDC_VENC_NATIVE_RECORDER: + case V4L2_CID_MPEG_VIDC_VENC_RC_TIMESTAMP_DISABLE: + case V4L2_CID_MPEG_VIDEO_VBV_DELAY: + case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: + case V4L2_CID_MPEG_VIDC_VENC_BITRATE_SAVINGS: + case V4L2_CID_MPEG_VIDC_VENC_QPRANGE_BOOST: + case V4L2_CID_MPEG_VIDC_SUPERFRAME: + s_vpr_h(sid, "Control set: ID : 0x%x Val : %d\n", + ctrl->id, ctrl->val); + break; + default: + s_vpr_e(sid, "Unsupported index: 0x%x\n", ctrl->id); + rc = -ENOTSUPP; + break; + } + + return rc; +} + +int msm_venc_set_frame_size(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_frame_size frame_sz; + struct v4l2_format *f; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + frame_sz.buffer_type = HFI_BUFFER_INPUT; + frame_sz.width = f->fmt.pix_mp.width; + frame_sz.height = f->fmt.pix_mp.height; + s_vpr_h(inst->sid, "%s: input %d %d\n", __func__, + frame_sz.width, frame_sz.height); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_FRAME_SIZE, &frame_sz, sizeof(frame_sz)); + if (rc) { + s_vpr_e(inst->sid, "%s: failed to set input frame size %d %d\n", + __func__, frame_sz.width, frame_sz.height); + return rc; + } + + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + frame_sz.buffer_type = HFI_BUFFER_OUTPUT; + frame_sz.width = f->fmt.pix_mp.width; + frame_sz.height = f->fmt.pix_mp.height; + /* firmware needs grid size in output where as + * client sends out full resolution in output port */ + if (is_grid_session(inst)) { + frame_sz.width = frame_sz.height = HEIC_GRID_DIMENSION; + } + s_vpr_h(inst->sid, "%s: output %d %d\n", __func__, + frame_sz.width, frame_sz.height); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_FRAME_SIZE, &frame_sz, sizeof(frame_sz)); + if (rc) { + s_vpr_e(inst->sid, + "%s: failed to set output frame size %d %d\n", + __func__, frame_sz.width, frame_sz.height); + return rc; + } + + return rc; +} + +int msm_venc_set_frame_rate(struct msm_vidc_inst *inst, bool external_requested) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_frame_rate frame_rate; + struct msm_vidc_capability *capability; + u32 fps_max; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + capability = &inst->capability; + + /* Check frame rate */ + if (inst->all_intra) + fps_max = capability->cap[CAP_ALLINTRA_MAX_FPS].max; + else + fps_max = capability->cap[CAP_FRAMERATE].max; + + if (inst->clk_data.frame_rate >> 16 > fps_max) { + s_vpr_e(inst->sid, + "%s: Unsupported frame rate, fps %u, max_fps %u\n", + __func__, inst->clk_data.frame_rate >> 16, fps_max); + return -ENOTSUPP; + } + + frame_rate.buffer_type = HFI_BUFFER_OUTPUT; + frame_rate.frame_rate = inst->clk_data.frame_rate; + + s_vpr_h(inst->sid, "%s: %#x\n", __func__, frame_rate.frame_rate); + + if (external_requested) { + rc = call_hfi_op(hdev, session_set_property, + inst->session, HFI_PROPERTY_CONFIG_FRAME_RATE, + &frame_rate, sizeof(frame_rate)); + } else { + s_vpr_l(inst->sid, "Auto frame rate set"); + rc = call_hfi_op(hdev, session_set_property, + inst->session, HFI_PROPERTY_CONFIG_VENC_AUTO_FRAME_RATE, + &frame_rate, sizeof(frame_rate)); + } + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_store_timestamp(struct msm_vidc_inst *inst, u64 timestamp_us) +{ + struct msm_vidc_timestamps *entry, *node, *prev = NULL; + int count = 0; + int rc = 0; + struct v4l2_ctrl *superframe_ctrl = NULL; + struct v4l2_ctrl *ctrl = NULL; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + if (!inst->core->resources.enc_auto_dynamic_fps || + is_image_session(inst)) + return rc; + + /* set auto-framerate only for VBR CFR native recorder */ + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_NATIVE_RECORDER); + if ((ctrl && ctrl->val == V4L2_MPEG_MSM_VIDC_DISABLE) || + (inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)) + return rc; + + mutex_lock(&inst->timestamps.lock); + list_for_each_entry(node, &inst->timestamps.list, list) { + count++; + if (timestamp_us <= node->timestamp_us) { + s_vpr_e(inst->sid, "%s: invalid ts %llu, exist %llu\n", + __func__, timestamp_us, node->timestamp_us); + goto unlock; + } + } + + /* Maintain a sliding window */ + entry = NULL; + if (count >= VENC_MAX_TIMESTAMP_LIST_SIZE) { + entry = list_first_entry(&inst->timestamps.list, + struct msm_vidc_timestamps, list); + list_del_init(&entry->list); + } + if (!entry) { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + s_vpr_e(inst->sid, "%s: ts malloc failure\n", + __func__); + rc = -ENOMEM; + goto unlock; + } + } + + entry->timestamp_us = timestamp_us; + entry->framerate = inst->clk_data.frame_rate; + prev = list_last_entry(&inst->timestamps.list, + struct msm_vidc_timestamps, list); + list_add_tail(&entry->list, &inst->timestamps.list); + + if (!count) + goto unlock; + + entry->framerate = msm_comm_calc_framerate(inst, + timestamp_us, prev->timestamp_us); + + /* if framerate changed and stable for 2 frames, set to firmware */ + if (entry->framerate == prev->framerate && + entry->framerate != inst->clk_data.frame_rate) { + superframe_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_SUPERFRAME); + if (superframe_ctrl->val > 1) + inst->clk_data.frame_rate = entry->framerate * superframe_ctrl->val; + else + inst->clk_data.frame_rate = entry->framerate; + s_vpr_l(inst->sid, "%s: updated fps to %u\n", + __func__, (inst->clk_data.frame_rate >> 16)); + msm_venc_set_frame_rate(inst, false); + } + +unlock: + mutex_unlock(&inst->timestamps.lock); + return rc; +} + +int msm_venc_set_color_format(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct msm_vidc_format_constraint *fmt_constraints; + struct v4l2_format *f; + + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + rc = msm_comm_set_color_format(inst, HAL_BUFFER_INPUT, + f->fmt.pix_mp.pixelformat); + if (rc) + return rc; + + fmt_constraints = msm_comm_get_pixel_fmt_constraints( + enc_pix_format_constraints, + ARRAY_SIZE(enc_pix_format_constraints), + f->fmt.pix_mp.pixelformat, inst->sid); + if (fmt_constraints) { + rc = msm_comm_set_color_format_constraints(inst, + HAL_BUFFER_INPUT, + fmt_constraints); + if (rc) { + s_vpr_e(inst->sid, "Set constraints for %d failed\n", + f->fmt.pix_mp.pixelformat); + return rc; + } + } + + return rc; +} + +int msm_venc_set_buffer_counts(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct msm_vidc_format *fmt; + enum hal_buffer buffer_type; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + buffer_type = HAL_BUFFER_INPUT; + fmt = &inst->fmts[INPUT_PORT]; + rc = msm_comm_set_buffer_count(inst, + fmt->count_min, + fmt->count_actual, + buffer_type); + if (rc) { + s_vpr_e(inst->sid, "%s: failed to set bufcounts(%#x)\n", + __func__, buffer_type); + return -EINVAL; + } + + buffer_type = HAL_BUFFER_OUTPUT; + fmt = &inst->fmts[OUTPUT_PORT]; + rc = msm_comm_set_buffer_count(inst, + fmt->count_min, + fmt->count_actual, + buffer_type); + if (rc) { + s_vpr_e(inst->sid, "%s: failed to set buf counts(%#x)\n", + __func__, buffer_type); + return -EINVAL; + } + + return rc; +} + +int msm_venc_set_secure_mode(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct hfi_enable enable; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_SECURE); + enable.enable = !!ctrl->val; + + if (enable.enable) { + codec = get_v4l2_codec(inst); + if (!(codec == V4L2_PIX_FMT_H264 || + codec == V4L2_PIX_FMT_HEVC)) { + s_vpr_e(inst->sid, + "%s: Secure mode only allowed for HEVC/H264\n", + __func__); + return -EINVAL; + } + } + + s_vpr_h(inst->sid, "%s: %d\n", __func__, enable.enable); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_SECURE_SESSION, &enable, sizeof(enable)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_priority(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_enable enable; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + enable.enable = is_realtime_session(inst); + + s_vpr_h(inst->sid, "%s: %d\n", __func__, enable.enable); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_REALTIME, &enable, sizeof(enable)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_operating_rate(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_operating_rate op_rate; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + + hdev = inst->core->device; + op_rate.operating_rate = inst->clk_data.operating_rate; + + s_vpr_h(inst->sid, "%s: %d\n", __func__, op_rate.operating_rate >> 16); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_OPERATING_RATE, &op_rate, sizeof(op_rate)); + if (rc) { + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + return rc; + } + + return rc; +} + +int msm_venc_set_profile_level(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_profile_level profile_level; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + if (!inst->profile) { + s_vpr_e(inst->sid, "%s: skip as client did not set profile\n", + __func__); + return -EINVAL; + } + profile_level.profile = inst->profile; + profile_level.level = inst->level; + + s_vpr_h(inst->sid, "%s: %#x %#x\n", __func__, + profile_level.profile, profile_level.level); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT, &profile_level, + sizeof(profile_level)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_idr_period(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_idr_period idr_period; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + codec = get_v4l2_codec(inst); + if (codec != V4L2_PIX_FMT_H264 && codec != V4L2_PIX_FMT_HEVC) + return 0; + + idr_period.idr_period = 1; + + s_vpr_h(inst->sid, "%s: %d\n", __func__, idr_period.idr_period); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD, &idr_period, + sizeof(idr_period)); + if (rc) { + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + return rc; + } + + return rc; +} + +int msm_venc_set_adaptive_bframes(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_enable enable; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + enable.enable = true; + + s_vpr_h(inst->sid, "%s: %d\n", __func__, enable.enable); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_ADAPTIVE_B, &enable, sizeof(enable)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +void msm_venc_adjust_gop_size(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *hier_ctrl; + struct v4l2_ctrl *gop_size_ctrl; + s32 val; + + gop_size_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_GOP_SIZE); + + /* + * Layer encoding needs GOP size to be multiple of subgop size + * And subgop size is 2 ^ number of enhancement layers + */ + hier_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER); + if (hier_ctrl->val > 1) { + u32 min_gop_size; + u32 num_subgops; + + min_gop_size = (1 << (hier_ctrl->val - 1)); + num_subgops = (gop_size_ctrl->val + (min_gop_size >> 1)) / + min_gop_size; + if (num_subgops) + val = num_subgops * min_gop_size; + else + val = min_gop_size; + + update_ctrl(gop_size_ctrl, val, inst->sid); + } +} + +int msm_venc_set_intra_period(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *gop_size = NULL; + struct v4l2_ctrl *bframes = NULL; + struct v4l2_ctrl *max_layer = NULL; + struct v4l2_ctrl *frame_t = NULL; + struct hfi_intra_period intra_period = { + .pframes = 0, + .bframes = 0 + }; + struct hfi_adaptive_p_b_intra_period adaptive_p_b_intra_period = { + .nframes = 0 + }; + u32 codec; + bool adaptive_bframes = false; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + frame_t = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE); + gop_size = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_GOP_SIZE); + max_layer = get_ctrl(inst, + V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER); + codec = get_v4l2_codec(inst); + + intra_period.pframes = gop_size->val; + + /* max_layer 0/1 indicates absence of layer encoding */ + if (max_layer->val < 2) { + /* + * At this point we've already made decision on bframe. + * Control value gives updated bframe value. + */ + bframes = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_B_FRAMES); + intra_period.bframes = bframes->val; + if (intra_period.bframes) + adaptive_bframes = true; + } + + if (max_layer->val > 1) { + if (frame_t->val == + V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B) { + if (codec == V4L2_PIX_FMT_HEVC) { + adaptive_p_b_intra_period.nframes = + gop_size->val; + adaptive_bframes = true; + } else { + d_vpr_e("%s: Hier-B supported for HEVC only\n", + __func__); + return -EINVAL; + } + } else if (frame_t->val == + V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P) { + msm_venc_adjust_gop_size(inst); + intra_period.pframes = gop_size->val; + intra_period.bframes = 0; + adaptive_bframes = false; + } + } + + if (inst->state == MSM_VIDC_START_DONE && + !intra_period.pframes && !intra_period.bframes) { + s_vpr_h(inst->sid, + "%s: Switch from IPPP to All Intra is not allowed\n", + __func__); + return rc; + } + + if (frame_t->val == + V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B && + codec == V4L2_PIX_FMT_HEVC) { + s_vpr_h(inst->sid, "%s: nframes: %d\n", + __func__, adaptive_p_b_intra_period.nframes); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD, + &adaptive_p_b_intra_period, + sizeof(adaptive_p_b_intra_period)); + + } else { + s_vpr_h(inst->sid, "%s: pframes: %d bframes: %d\n", + __func__, intra_period.pframes, + intra_period.bframes); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD, + &intra_period, sizeof(intra_period)); + } + + if (rc) { + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + return rc; + } + + if (adaptive_bframes) { + rc = msm_venc_set_adaptive_bframes(inst); + if (rc) { + s_vpr_e(inst->sid, "%s: set property failed\n", + __func__); + return rc; + } + } + + return rc; +} + +int msm_venc_set_request_keyframe(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + s_vpr_h(inst->sid, "%s\n", __func__); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME, NULL, 0); + if (rc) { + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + return rc; + } + + return rc; +} + +int msm_venc_set_rate_control(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + u32 hfi_rc, codec; + u32 height, width, mbpf; + struct v4l2_format *f; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + + hdev = inst->core->device; + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + codec = get_v4l2_codec(inst); + height = f->fmt.pix_mp.height; + width = f->fmt.pix_mp.width; + mbpf = NUM_MBS_PER_FRAME(height, width); + + if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_MBR_VFR) + inst->rc_type = V4L2_MPEG_VIDEO_BITRATE_MODE_MBR; + else if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR && + inst->clk_data.low_latency_mode) + inst->rc_type = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR; + + if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR || + inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR) + inst->clk_data.low_latency_mode = true; + + switch (inst->rc_type) { + case RATE_CONTROL_OFF: + case RATE_CONTROL_LOSSLESS: + hfi_rc = HFI_RATE_CONTROL_OFF; + break; + case V4L2_MPEG_VIDEO_BITRATE_MODE_CBR: + hfi_rc = HFI_RATE_CONTROL_CBR_CFR; + break; + case V4L2_MPEG_VIDEO_BITRATE_MODE_VBR: + hfi_rc = HFI_RATE_CONTROL_VBR_CFR; + break; + case V4L2_MPEG_VIDEO_BITRATE_MODE_MBR: + hfi_rc = HFI_RATE_CONTROL_MBR_CFR; + break; + case V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR: + hfi_rc = HFI_RATE_CONTROL_CBR_VFR; + break; + case V4L2_MPEG_VIDEO_BITRATE_MODE_CQ: + hfi_rc = HFI_RATE_CONTROL_CQ; + break; + default: + hfi_rc = HFI_RATE_CONTROL_OFF; + s_vpr_e(inst->sid, + "Invalid Rate control setting: %d Default RCOFF\n", + inst->rc_type); + break; + } + s_vpr_h(inst->sid, "%s: %d\n", __func__, inst->rc_type); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_RATE_CONTROL, &hfi_rc, + sizeof(u32)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + + + +int msm_venc_set_vbv_delay(struct msm_vidc_inst *inst) +{ + int rc = 0; + bool is_legacy_cbr; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + u32 codec, height, width, buf_size; + struct hfi_vbv_hrd_buf_size hrd_buf_size; + struct v4l2_format *f; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + + hdev = inst->core->device; + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + codec = get_v4l2_codec(inst); + height = f->fmt.pix_mp.height; + width = f->fmt.pix_mp.width; + + /* vbv delay is required for CBR_CFR and CBR_VFR only */ + if (inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR && + inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR) + return 0; + + /* Default behavior */ + is_legacy_cbr = false; + buf_size = CBR_PLUS_BUF_SIZE; + + /* + * Client can set vbv delay only when + * resolution is between VGA and 720p + */ + if (res_is_greater_than_or_equal_to(width, height, MIN_CBRPLUS_W, + MIN_CBRPLUS_H) && res_is_less_than_or_equal_to(width, height, + MAX_CBR_W, MAX_CBR_H)) { + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_VBV_DELAY); + if (ctrl->val == LEGACY_CBR_BUF_SIZE) { + is_legacy_cbr = true; + buf_size = LEGACY_CBR_BUF_SIZE; + goto set_vbv_delay; + } else if (ctrl->val == CBR_PLUS_BUF_SIZE) { + is_legacy_cbr = false; + buf_size = CBR_PLUS_BUF_SIZE; + goto set_vbv_delay; + } + } + + /* Enable legacy cbr if resolution < MIN_CBRPLUS (720p) */ + if (res_is_less_than(width, height, MAX_CBR_W, MAX_CBR_H)) { + is_legacy_cbr = true; + buf_size = LEGACY_CBR_BUF_SIZE; + goto set_vbv_delay; + } + +set_vbv_delay: + inst->clk_data.is_legacy_cbr = is_legacy_cbr; + hrd_buf_size.vbv_hrd_buf_size = buf_size; + s_vpr_h(inst->sid, "%s: %d\n", __func__, hrd_buf_size.vbv_hrd_buf_size); + rc = call_hfi_op(hdev, session_set_property, + (void *)inst->session, + HFI_PROPERTY_CONFIG_VENC_VBV_HRD_BUF_SIZE, + (void *)&hrd_buf_size, sizeof(hrd_buf_size)); + if (rc) { + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + } + return rc; +} + + +int msm_venc_set_input_timestamp_rc(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct hfi_enable enable; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_RC_TIMESTAMP_DISABLE); + /* + * HFI values: + * 0 - time delta is calculated based on buffer timestamp + * 1 - ignores buffer timestamp and fw derives time delta based + * on input frame rate. + */ + enable.enable = !!ctrl->val; + + s_vpr_h(inst->sid, "%s: %d\n", __func__, enable.enable); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP, &enable, + sizeof(enable)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_bitrate(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_bitrate bitrate; + struct hfi_enable enable; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ) + return 0; + + if (inst->layer_bitrate) { + s_vpr_h(inst->sid, "%s: Layer bitrate is enabled\n", __func__); + return 0; + } + + enable.enable = 0; + s_vpr_h(inst->sid, "%s: bitrate type: %d\n", + __func__, enable.enable); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_BITRATE_TYPE, &enable, + sizeof(enable)); + if (rc) { + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + return rc; + } + + bitrate.bit_rate = inst->clk_data.bitrate; + bitrate.layer_id = MSM_VIDC_ALL_LAYER_ID; + s_vpr_h(inst->sid, "%s: %d\n", __func__, bitrate.bit_rate); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE, &bitrate, + sizeof(bitrate)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_layer_bitrate(struct msm_vidc_inst *inst) +{ + int rc = 0, i = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *layer = NULL; + struct v4l2_ctrl *max_layer = NULL; + struct v4l2_ctrl *layer_br_ratios[MAX_HIER_CODING_LAYER] = {NULL}; + struct hfi_bitrate layer_br; + struct hfi_enable enable; + u32 bitrate; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + max_layer = get_ctrl(inst, + V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER); + layer = get_ctrl(inst, + V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER); + + if (!max_layer->val || !layer->val) { + s_vpr_h(inst->sid, + "%s: Hier-P layer not set. Ignore layer bitrate\n", + __func__); + goto error; + } + + if (max_layer->val < layer->val) { + s_vpr_h(inst->sid, + "%s: Hier-P layer greater than max isn't allowed\n", + __func__); + goto error; + } + + layer_br_ratios[0] = get_ctrl(inst, + V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR); + layer_br_ratios[1] = get_ctrl(inst, + V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR); + layer_br_ratios[2] = get_ctrl(inst, + V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR); + layer_br_ratios[3] = get_ctrl(inst, + V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR); + layer_br_ratios[4] = get_ctrl(inst, + V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR); + layer_br_ratios[5] = get_ctrl(inst, + V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR); + + /* Set layer bitrates only when highest layer br ratio is 100. */ + if (layer_br_ratios[layer->val-1]->val != MAX_BIT_RATE_RATIO || + layer_br_ratios[0]->val == 0) { + s_vpr_h(inst->sid, "%s: Improper layer bitrate ratio\n", + __func__); + goto error; + } + + for (i = layer->val - 1; i > 0; --i) { + if (layer_br_ratios[i]->val == 0) { + s_vpr_h(inst->sid, "%s: Layer ratio must be non-zero\n", + __func__); + goto error; + } + layer_br_ratios[i]->val -= layer_br_ratios[i-1]->val; + } + + enable.enable = 1; + s_vpr_h(inst->sid, "%s: %d\n", __func__, enable.enable); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_BITRATE_TYPE, &enable, + sizeof(enable)); + if (rc) { + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + goto error; + } + + bitrate = inst->clk_data.bitrate; + for (i = 0; i < layer->val; ++i) { + layer_br.bit_rate = + bitrate * layer_br_ratios[i]->val / 100; + layer_br.layer_id = i; + s_vpr_h(inst->sid, "%s: Bitrate for Layer[%u]: [%u]\n", + __func__, layer_br.layer_id, layer_br.bit_rate); + + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE, &layer_br, + sizeof(layer_br)); + if (rc) { + s_vpr_e(inst->sid, + "%s: set property failed for layer: %u\n", + __func__, layer_br.layer_id); + goto error; + } + } + + inst->layer_bitrate = true; + return rc; + +error: + inst->layer_bitrate = false; + return rc; +} + +int msm_venc_set_frame_qp(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *i_qp = NULL; + struct v4l2_ctrl *p_qp = NULL; + struct v4l2_ctrl *b_qp = NULL; + struct hfi_quantization qp; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + qp.layer_id = MSM_VIDC_ALL_LAYER_ID; + qp.enable = 0; + qp.enable = QP_ENABLE_I | QP_ENABLE_P | QP_ENABLE_B; + + i_qp = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP); + p_qp = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP); + b_qp = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP); + + /* + * When RC is ON: + * Enable QP types which have been set by client. + * When RC is OFF: + * I_QP value must be set by client. + * If other QP value is invalid, then, assign I_QP value to it. + */ + if (inst->rc_type != RATE_CONTROL_OFF) { + if (!(inst->client_set_ctrls & CLIENT_SET_I_QP)) + qp.enable &= ~QP_ENABLE_I; + if (!(inst->client_set_ctrls & CLIENT_SET_P_QP)) + qp.enable &= ~QP_ENABLE_P; + if (!(inst->client_set_ctrls & CLIENT_SET_B_QP)) + qp.enable &= ~QP_ENABLE_B; + + if (!qp.enable) + return 0; + } else { + if (!(inst->client_set_ctrls & CLIENT_SET_I_QP)) { + s_vpr_e(inst->sid, + "%s: Client value is not valid\n", __func__); + return -EINVAL; + } + if (!(inst->client_set_ctrls & CLIENT_SET_P_QP)) + p_qp->val = i_qp->val; + if (!(inst->client_set_ctrls & CLIENT_SET_B_QP)) + b_qp->val = i_qp->val; + } + + qp.qp_packed = i_qp->val | p_qp->val << 8 | b_qp->val << 16; + + s_vpr_h(inst->sid, "%s: layers %#x frames %#x qp_packed %#x\n", + __func__, qp.layer_id, qp.enable, qp.qp_packed); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_VENC_FRAME_QP, &qp, sizeof(qp)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_qp_range(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct hfi_quantization_range qp_range; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + if (!(inst->client_set_ctrls & CLIENT_SET_MIN_QP) && + !(inst->client_set_ctrls & CLIENT_SET_MAX_QP) && + !inst->boost_qp_enabled) { + s_vpr_h(inst->sid, + "%s: Client didn't set QP range\n", __func__); + return 0; + } + + qp_range.min_qp.layer_id = MSM_VIDC_ALL_LAYER_ID; + qp_range.max_qp.layer_id = MSM_VIDC_ALL_LAYER_ID; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP); + if (inst->boost_qp_enabled && + !(inst->client_set_ctrls & CLIENT_SET_MIN_QP)) + qp_range.min_qp.qp_packed = inst->boost_min_qp; + else + qp_range.min_qp.qp_packed = ctrl->val; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP); + if (inst->boost_qp_enabled && + !(inst->client_set_ctrls & CLIENT_SET_MAX_QP)) + qp_range.max_qp.qp_packed = inst->boost_max_qp; + else + qp_range.max_qp.qp_packed = ctrl->val; + + s_vpr_h(inst->sid, "%s: layers %#x qp_min %#x qp_max %#x\n", + __func__, qp_range.min_qp.layer_id, + qp_range.min_qp.qp_packed, qp_range.max_qp.qp_packed); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE, &qp_range, + sizeof(qp_range)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +static void set_all_intra_preconditions(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *ctrl = NULL, *ctrl_t = NULL; + + /* Disable multi slice */ + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE); + if (ctrl->val) { + d_vpr_h("Disable multi slice for all intra\n"); + update_ctrl(ctrl, V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE, + inst->sid); + } + + /* Disable LTR */ + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT); + if (ctrl->val) { + s_vpr_h(inst->sid, "Disable LTR for all intra\n"); + update_ctrl(ctrl, 0, inst->sid); + } + + /* Disable Layer encoding */ + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER); + ctrl_t = get_ctrl(inst, + V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER); + if (ctrl->val || ctrl_t->val) { + s_vpr_h(inst->sid, "Disable layer encoding for all intra\n"); + update_ctrl(ctrl, 0, inst->sid); + update_ctrl(ctrl_t, 0, inst->sid); + } + + /* Disable IR */ + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM); + ctrl_t = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB); + if (ctrl->val || ctrl_t->val) { + s_vpr_h(inst->sid, "Disable IR for all intra\n"); + update_ctrl(ctrl, 0, inst->sid); + update_ctrl(ctrl_t, 0, inst->sid); + } + + return; +} + +static void set_heif_preconditions(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *ctrl = NULL; + + /* Reset PFrames */ + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_GOP_SIZE); + if (ctrl->val) { + d_vpr_h("Reset P-frame count for HEIF\n"); + update_ctrl(ctrl, 0, inst->sid); + } + + /* Reset BFrames */ + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_B_FRAMES); + if (ctrl->val) { + s_vpr_h(inst->sid, "Reset B-frame count for HEIF\n"); + update_ctrl(ctrl, 0, inst->sid); + } + + return; +} + +int msm_venc_set_frame_quality(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct hfi_heic_frame_quality frame_quality; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + if (inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_CQ) + return 0; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_COMPRESSION_QUALITY); + frame_quality.frame_quality = ctrl->val; + + s_vpr_h(inst->sid, "%s: frame quality: %d\n", __func__, + frame_quality.frame_quality); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_HEIC_FRAME_QUALITY, &frame_quality, + sizeof(frame_quality)); + if (rc) + s_vpr_e(inst->sid, "%s: set frame quality failed\n", __func__); + + return rc; +} + +int msm_venc_set_image_grid(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct hfi_heic_grid_enable grid_enable; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + if (inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_CQ) + return 0; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_IMG_GRID_SIZE); + + /* Need a change in HFI if we want to pass size */ + if (!ctrl->val) + grid_enable.grid_enable = false; + else + grid_enable.grid_enable = true; + + s_vpr_h(inst->sid, "%s: grid enable: %d\n", __func__, + grid_enable.grid_enable); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_HEIC_GRID_ENABLE, &grid_enable, + sizeof(grid_enable)); + if (rc) + s_vpr_e(inst->sid, "%s: set grid enable failed\n", __func__); + + return rc; +} + +int msm_venc_set_image_properties(struct msm_vidc_inst *inst) +{ + int rc = 0; + + if (!inst) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + + if (!is_image_session(inst) && !is_grid_session(inst)) + return 0; + + if (inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_CQ) { + d_vpr_e("%s: invalid rate control mode\n", __func__); + return -EINVAL; + } + + rc = msm_venc_set_frame_quality(inst); + if (rc) { + s_vpr_e(inst->sid, + "%s: set image property failed\n", __func__); + return rc; + } + + rc = msm_venc_set_image_grid(inst); + if (rc) { + s_vpr_e(inst->sid, + "%s: set image property failed\n", __func__); + return rc; + } + + set_all_intra_preconditions(inst); + set_heif_preconditions(inst); + return rc; +} + +int msm_venc_set_entropy_mode(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_h264_entropy_control entropy; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + if (get_v4l2_codec(inst) != V4L2_PIX_FMT_H264) + return 0; + + entropy.entropy_mode = inst->entropy_mode; + entropy.cabac_model = HFI_H264_CABAC_MODEL_2; + + s_vpr_h(inst->sid, "%s: %d\n", __func__, entropy.entropy_mode); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL, &entropy, + sizeof(entropy)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_slice_control_mode(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct v4l2_ctrl *ctrl_t; + struct hfi_multi_slice_control multi_slice_control; + struct v4l2_format *f; + int temp = 0; + u32 mb_per_frame, fps, mbps, bitrate, max_slices; + u32 slice_val, slice_mode, max_avg_slicesize; + u32 rc_mode, output_width, output_height; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + + codec = get_v4l2_codec(inst); + if (codec != V4L2_PIX_FMT_HEVC && codec != V4L2_PIX_FMT_H264) + return 0; + + slice_mode = HFI_MULTI_SLICE_OFF; + slice_val = 0; + + bitrate = inst->clk_data.bitrate; + fps = inst->clk_data.frame_rate >> 16; + rc_mode = inst->rc_type; + if (fps > 60 || (!(rc_mode == RATE_CONTROL_OFF || + rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR || + rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR))) { + goto set_and_exit; + } + + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + output_width = f->fmt.pix_mp.width; + output_height = f->fmt.pix_mp.height; + if ((codec == V4L2_PIX_FMT_HEVC) && + (output_height < 128 || output_width < 384)) + goto set_and_exit; + + if ((codec == V4L2_PIX_FMT_H264) && + (output_height < 128 || output_width < 192)) + goto set_and_exit; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE); + if (ctrl->val == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB) { + temp = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB; + slice_mode = HFI_MULTI_SLICE_BY_MB_COUNT; + } else if (ctrl->val == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES) { + temp = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES; + slice_mode = HFI_MULTI_SLICE_BY_BYTE_COUNT; + } else { + goto set_and_exit; + } + + ctrl_t = get_ctrl(inst, temp); + slice_val = ctrl_t->val; + + /* Update Slice Config */ + mb_per_frame = NUM_MBS_PER_FRAME(output_height, output_width); + mbps = NUM_MBS_PER_SEC(output_height, output_width, fps); + + if (slice_mode == HFI_MULTI_SLICE_BY_MB_COUNT) { + if (output_width <= 4096 || output_height <= 4096 || + mb_per_frame <= NUM_MBS_PER_FRAME(4096, 2160) || + mbps <= NUM_MBS_PER_SEC(4096, 2160, 60)) { + max_slices = inst->capability.cap[CAP_SLICE_MB].max ? + inst->capability.cap[CAP_SLICE_MB].max : 1; + slice_val = max(slice_val, mb_per_frame / max_slices); + } + } else { + if (output_width <= 1920 || output_height <= 1920 || + mb_per_frame <= NUM_MBS_PER_FRAME(1088, 1920) || + mbps <= NUM_MBS_PER_SEC(1088, 1920, 60)) { + max_slices = inst->capability.cap[CAP_SLICE_BYTE].max ? + inst->capability.cap[CAP_SLICE_BYTE].max : 1; + if (rc_mode != RATE_CONTROL_OFF) { + max_avg_slicesize = + ((bitrate / fps) / 8) / max_slices; + slice_val = max(slice_val, max_avg_slicesize); + } + } + } + + if (slice_mode == HFI_MULTI_SLICE_OFF) { + update_ctrl(ctrl, V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE, + inst->sid); + update_ctrl(ctrl_t, 0, inst->sid); + } + +set_and_exit: + multi_slice_control.multi_slice = slice_mode; + multi_slice_control.slice_size = slice_val; + + hdev = inst->core->device; + s_vpr_h(inst->sid, "%s: %d %d\n", __func__, + multi_slice_control.multi_slice, + multi_slice_control.slice_size); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL, + &multi_slice_control, sizeof(multi_slice_control)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_intra_refresh_mode(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl = NULL; + struct hfi_intra_refresh intra_refresh; + struct v4l2_format *f; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + if (!(inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR || + inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)) + return 0; + + /* Firmware supports only random mode */ + intra_refresh.mode = HFI_INTRA_REFRESH_RANDOM; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM); + intra_refresh.mbs = 0; + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + if (ctrl->val) { + u32 num_mbs_per_frame = 0; + u32 width = f->fmt.pix_mp.width; + u32 height = f->fmt.pix_mp.height; + + num_mbs_per_frame = NUM_MBS_PER_FRAME(height, width); + intra_refresh.mbs = num_mbs_per_frame / ctrl->val; + if (num_mbs_per_frame % ctrl->val) { + intra_refresh.mbs++; + } + } else { + ctrl = get_ctrl(inst, + V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB); + intra_refresh.mbs = ctrl->val; + } + if (!intra_refresh.mbs) { + intra_refresh.mode = HFI_INTRA_REFRESH_NONE; + intra_refresh.mbs = 0; + } + + s_vpr_h(inst->sid, "%s: %d %d\n", __func__, + intra_refresh.mode, intra_refresh.mbs); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH, &intra_refresh, + sizeof(intra_refresh)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_chroma_qp_offset(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *chr; + struct v4l2_ctrl *ctrl_cs; + struct hfi_chroma_qp_offset chroma_qp; + struct v4l2_format *f; + u32 codec, width, height, mbpf; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + chr = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET); + if (chr->val == INT_MAX || (chr->val != 0 && chr->val != -12)) + return 0; + + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + width = f->fmt.pix_mp.width; + height = f->fmt.pix_mp.height; + mbpf = NUM_MBS_PER_FRAME(width, height); + ctrl_cs = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE); + codec = get_v4l2_codec(inst); + + /** + * Set chroma qp offset to HEVC & VBR_CFR rc + * 10 bit: only BT2020 + * 8 bit: only mbpf >= num_mbs(7680, 3840) + */ + if (codec != V4L2_PIX_FMT_HEVC || + inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) + return 0; + + if ((inst->bit_depth == MSM_VIDC_BIT_DEPTH_10 && + ctrl_cs->val != MSM_VIDC_BT2020) || + (inst->bit_depth == MSM_VIDC_BIT_DEPTH_8 && + mbpf < NUM_MBS_PER_FRAME(7680, 3840))) + return 0; + + /** + * client sets one chroma offset only in range [-12, 0] + * firmware expects chroma cb offset and cr offset in + * range [0, 12], firmware subtracts 12 from driver set values. + */ + chroma_qp.chroma_offset = (chr->val + 12) << 16 | (chr->val + 12); + s_vpr_h(inst->sid, "%s: %x\n", __func__, chroma_qp.chroma_offset); + + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_HEVC_PPS_CB_CR_OFFSET, &chroma_qp, + sizeof(chroma_qp)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_bitrate_savings_mode(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *cac = NULL; + struct v4l2_ctrl *profile = NULL; + struct hfi_enable enable; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + enable.enable = 0; + if (inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) { + s_vpr_h(inst->sid, + "Disable bitrate savings for non-VBR_CFR\n"); + goto setprop; + } + + codec = get_v4l2_codec(inst); + profile = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_PROFILE); + cac = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_BITRATE_SAVINGS); + + if (codec == V4L2_PIX_FMT_HEVC && + profile->val == V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10) + enable.enable = !!(cac->val & V4L2_MPEG_VIDC_VIDEO_BRS_ENABLE_10BIT); + else + enable.enable = !!(cac->val & V4L2_MPEG_VIDC_VIDEO_BRS_ENABLE_8BIT); + +setprop: + s_vpr_h(inst->sid, "%s: %d\n", __func__, enable.enable); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_BITRATE_SAVINGS, &enable, + sizeof(enable)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + else + rc = msm_venc_set_bitrate_boost_margin(inst, enable.enable); + + return rc; +} + +int msm_venc_set_bitrate_boost_margin(struct msm_vidc_inst *inst, u32 enable) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl = NULL; + struct hfi_bitrate_boost_margin boost_margin; + int minqp, maxqp; + uint32_t vpu; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + vpu = inst->core->platform_data->vpu_ver; + + if (!enable) { + boost_margin.margin = 0; + goto setprop; + } + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_BITRATE_BOOST); + + /* + * For certain SOC, default value should be 0 unless client enabled + */ + if (!inst->boost_enabled && vpu == VPU_VERSION_AR50_LITE) { + ctrl->val = 0; + update_ctrl(ctrl, 0, inst->sid); + } + /* Mapped value to 0, 15, 25 or 50*/ + if (ctrl->val >= 50) + boost_margin.margin = 50; + else if (ctrl->val >= 25) + boost_margin.margin = (u32)(ctrl->val/25) * 25; + else + boost_margin.margin = (u32)(ctrl->val/15) * 15; + +setprop: + s_vpr_h(inst->sid, "%s: %d\n", __func__, boost_margin.margin); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_BITRATE_BOOST, &boost_margin, + sizeof(boost_margin)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + /* Boost QP range is only enabled when bitrate boost is enabled + * and boost QP range is set by client + */ + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_QPRANGE_BOOST); + if (enable && ctrl->val) { + minqp = ctrl->val & 0xFF; + maxqp = (ctrl->val >> 8) & 0xFF; + inst->boost_qp_enabled = true; + inst->boost_min_qp = minqp | (minqp << 8) | (minqp << 16); + inst->boost_max_qp = maxqp | (maxqp << 8) | (maxqp << 16); + } + + return rc; +} + + +int msm_venc_set_loop_filter_mode(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct v4l2_ctrl *ctrl_a; + struct v4l2_ctrl *ctrl_b; + struct hfi_h264_db_control h264_db_control; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + codec = get_v4l2_codec(inst); + if (codec != V4L2_PIX_FMT_H264 && codec != V4L2_PIX_FMT_HEVC) + return 0; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE); + ctrl_a = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA); + ctrl_b = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA); + h264_db_control.mode = msm_comm_v4l2_to_hfi( + V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE, + ctrl->val, inst->sid); + h264_db_control.slice_alpha_offset = ctrl_a->val; + h264_db_control.slice_beta_offset = ctrl_b->val; + + s_vpr_h(inst->sid, "%s: %d %d %d\n", __func__, + h264_db_control.mode, h264_db_control.slice_alpha_offset, + h264_db_control.slice_beta_offset); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL, &h264_db_control, + sizeof(h264_db_control)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_sequence_header_mode(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct hfi_enable enable; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + codec = get_v4l2_codec(inst); + if (!(codec == V4L2_PIX_FMT_H264 || codec == V4L2_PIX_FMT_HEVC)) + return 0; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR); + if (ctrl->val) + enable.enable = true; + else + enable.enable = false; + + s_vpr_h(inst->sid, "%s: %d\n", __func__, enable.enable); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER, &enable, + sizeof(enable)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_au_delimiter_mode(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct hfi_enable enable; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + codec = get_v4l2_codec(inst); + if (!(codec == V4L2_PIX_FMT_H264 || codec == V4L2_PIX_FMT_HEVC)) + return 0; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER); + enable.enable = !!ctrl->val; + + s_vpr_h(inst->sid, "%s: %d\n", __func__, enable.enable); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_GENERATE_AUDNAL, &enable, + sizeof(enable)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_enable_hybrid_hp(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *ctrl = NULL; + struct v4l2_ctrl *layer = NULL; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + + if (get_v4l2_codec(inst) != V4L2_PIX_FMT_H264) + return 0; + + if (inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) + return 0; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT); + if (ctrl->val) + return 0; + + ctrl = get_ctrl(inst, + V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER); + layer = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER); + if (ctrl->val == 0 || ctrl->val != layer->val) + return 0; + + /* + * Hybrid HP is enabled only for H264 when + * LTR and B-frame are both disabled, + * Layer encoding has higher priority over B-frame + * Hence, no need to check for B-frame + * Rate control type is VBR and + * Max layer equals layer count. + */ + + inst->hybrid_hp = true; + + return 0; +} + +int msm_venc_set_base_layer_priority_id(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl = NULL; + struct v4l2_ctrl *max_layer = NULL; + u32 baselayerid; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + max_layer = get_ctrl(inst, + V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER); + if (max_layer->val <= 0) { + s_vpr_h(inst->sid, "%s: Layer id can only be set with Hier-P\n", + __func__); + return 0; + } + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_BASELAYER_ID); + baselayerid = ctrl->val; + + s_vpr_h(inst->sid, "%s: %d\n", __func__, baselayerid); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_VENC_BASELAYER_PRIORITYID, &baselayerid, + sizeof(baselayerid)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_hb_max_layer(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *frame_t = NULL; + struct v4l2_ctrl *max_layer = NULL; + u32 hb_layer = 0; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + codec = get_v4l2_codec(inst); + if (codec != V4L2_PIX_FMT_HEVC) + return 0; + + max_layer = get_ctrl(inst, + V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER); + frame_t = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE); + if (max_layer->val < 2 || + frame_t->val != V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B) { + s_vpr_h(inst->sid, + "%s: Hier-B not requested for this session\n", + __func__); + return 0; + } + hb_layer = max_layer->val - 1; + + s_vpr_h(inst->sid, "%s: Hier-B max layer: %d\n", + __func__, hb_layer); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER, + &hb_layer, sizeof(hb_layer)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_hp_max_layer(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *frame_t = NULL; + struct v4l2_ctrl *max_layer = NULL; + u32 hp_layer = 0; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + codec = get_v4l2_codec(inst); + if (codec != V4L2_PIX_FMT_H264 && codec != V4L2_PIX_FMT_HEVC) + return 0; + + max_layer = get_ctrl(inst, + V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER); + frame_t = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE); + if (max_layer->val < 2 || + frame_t->val != V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P) { + s_vpr_h(inst->sid, + "%s: Hier-P not requested for this session\n", + __func__); + return 0; + } + + rc = msm_venc_enable_hybrid_hp(inst); + if (rc) { + s_vpr_e(inst->sid, "%s: get hybrid hier-P decision failed\n", + __func__); + return rc; + } + if (!inst->hybrid_hp && max_layer->val > 4) { + update_ctrl(max_layer, 0, inst->sid); + s_vpr_h(inst->sid, + "%s: Hier-P requested beyond max capability\n", __func__); + return 0; + } + + /* + * We send enhancement layer count to FW, + * hence, input 0/1 indicates absence of layer encoding. + */ + if (max_layer->val) + hp_layer = max_layer->val - 1; + + if (inst->hybrid_hp) { + s_vpr_h(inst->sid, "%s: Hybrid hier-P layer: %d\n", + __func__, hp_layer); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE, + &hp_layer, sizeof(hp_layer)); + } else { + s_vpr_h(inst->sid, "%s: Hier-P max layer: %d\n", + __func__, hp_layer); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER, + &hp_layer, sizeof(hp_layer)); + } + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + return rc; +} + +int msm_venc_set_hp_layer(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *frame_t = NULL; + struct v4l2_ctrl *ctrl = NULL; + struct v4l2_ctrl *max_layer = NULL; + u32 hp_layer = 0; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + frame_t = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE); + if (frame_t->val != V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P) { + s_vpr_h(inst->sid, + "%s: Hier-P layer can be set for P type frame only\n", + __func__); + return 0; + } + + codec = get_v4l2_codec(inst); + if (codec != V4L2_PIX_FMT_H264 && codec != V4L2_PIX_FMT_HEVC) + return 0; + + if (inst->hybrid_hp) { + s_vpr_e(inst->sid, + "%s: Setting layer isn't allowed with hybrid hp\n", + __func__); + return 0; + } + + max_layer = get_ctrl(inst, + V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER); + ctrl = get_ctrl(inst, + V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER); + s_vpr_h(inst->sid, "%s: heir_layer: %d, max_hier_layer: %d\n", + __func__, ctrl->val, max_layer->val); + if (max_layer->val < ctrl->val) { + s_vpr_e(inst->sid, + "%s: HP layer count greater than max isn't allowed\n", + __func__); + return 0; + } + + /* + * We send enhancement layer count to FW, + * hence, input 0/1 indicates absence of layer encoding. + */ + if (ctrl->val) + hp_layer = ctrl->val - 1; + + s_vpr_h(inst->sid, "%s: Hier-P enhancement layer: %d\n", + __func__, hp_layer); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER, + &hp_layer, sizeof(hp_layer)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_video_signal_info(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl_cs; + struct v4l2_ctrl *ctrl_fr; + struct v4l2_ctrl *ctrl_tr; + struct v4l2_ctrl *ctrl_mc; + struct hfi_video_signal_metadata signal_info; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + codec = get_v4l2_codec(inst); + if (!(codec == V4L2_PIX_FMT_H264 || codec == V4L2_PIX_FMT_HEVC)) + return 0; + + ctrl_cs = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE); + ctrl_fr = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE); + ctrl_tr = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS); + ctrl_mc = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_MATRIX_COEFFS); + + memset(&signal_info, 0, sizeof(struct hfi_video_signal_metadata)); + if (inst->full_range == COLOR_RANGE_UNSPECIFIED && + ctrl_cs->val == MSM_VIDC_RESERVED_1) + signal_info.enable = false; + else + signal_info.enable = true; + + if (signal_info.enable) { + signal_info.video_format = MSM_VIDC_NTSC; + signal_info.video_full_range = ctrl_fr->val; + if (ctrl_cs->val != MSM_VIDC_RESERVED_1) { + signal_info.color_description = 1; + signal_info.color_primaries = ctrl_cs->val; + signal_info.transfer_characteristics = ctrl_tr->val; + signal_info.matrix_coeffs = ctrl_mc->val; + } + } + + s_vpr_h(inst->sid, "%s: %d %d %d %d\n", __func__, + signal_info.color_primaries, signal_info.video_full_range, + signal_info.transfer_characteristics, + signal_info.matrix_coeffs); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_VIDEO_SIGNAL_INFO, &signal_info, + sizeof(signal_info)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_rotation(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct v4l2_ctrl *rotation = NULL; + struct hfi_device *hdev; + struct hfi_vpe_rotation_type vpe_rotation; + + hdev = inst->core->device; + rotation = get_ctrl(inst, V4L2_CID_ROTATE); + + vpe_rotation.rotation = HFI_ROTATE_NONE; + if (rotation->val == 90) + vpe_rotation.rotation = HFI_ROTATE_90; + else if (rotation->val == 180) + vpe_rotation.rotation = HFI_ROTATE_180; + else if (rotation->val == 270) + vpe_rotation.rotation = HFI_ROTATE_270; + + vpe_rotation.flip = v4l2_to_hfi_flip(inst); + + s_vpr_h(inst->sid, "Set rotation = %d, flip = %d\n", + vpe_rotation.rotation, vpe_rotation.flip); + rc = call_hfi_op(hdev, session_set_property, + (void *)inst->session, + HFI_PROPERTY_PARAM_VPE_ROTATION, + &vpe_rotation, sizeof(vpe_rotation)); + if (rc) { + s_vpr_e(inst->sid, "Set rotation/flip failed\n"); + return rc; + } + + /* Mark static rotation/flip set */ + inst->static_rotation_flip_enabled = false; + if ((vpe_rotation.rotation != HFI_ROTATE_NONE || + vpe_rotation.flip != HFI_FLIP_NONE) && + inst->state < MSM_VIDC_START_DONE) + inst->static_rotation_flip_enabled = true; + + return rc; +} + +int msm_venc_check_dynamic_flip_constraints(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct v4l2_ctrl *blur = NULL; + struct v4l2_format *f = NULL; + bool scalar_enable = false; + bool blur_enable = false; + u32 input_height, input_width; + + /* Dynamic flip is not allowed with scalar when static + * rotation/flip is disabled + */ + scalar_enable = vidc_scalar_enabled(inst); + + /* Check blur configs + * blur value = 0 -> enable auto blur + * blur value = 2 or input resolution -> disable all blur + * For other values -> enable external blur + * Dynamic flip is not allowed with external blur enabled + */ + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + input_height = f->fmt.pix_mp.height; + input_width = f->fmt.pix_mp.width; + + blur = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_BLUR_DIMENSIONS); + if (blur->val != 0 && blur->val != 2 && + ((blur->val & 0xFFFF) != input_height || + (blur->val & 0x7FFF0000) >> 16 != input_width)) + blur_enable = true; + s_vpr_h(inst->sid, "Blur = %u, height = %u, width = %u\n", + blur->val, input_height, input_width); + if (blur_enable) { + /* Reject dynamic flip with external blur enabled */ + s_vpr_e(inst->sid, + "Unsupported dynamic flip with external blur\n"); + rc = -EINVAL; + } else if (scalar_enable && !inst->static_rotation_flip_enabled) { + /* Reject dynamic flip with scalar enabled */ + s_vpr_e(inst->sid, "Unsupported dynamic flip with scalar\n"); + rc = -EINVAL; + } else if (handle_vpss_restrictions(inst)) { + s_vpr_e(inst->sid, "Unsupported resolution for dynamic flip\n"); + rc = -EINVAL; + } + + return rc; +} + +int msm_venc_set_dynamic_flip(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + u32 dynamic_flip; + + hdev = inst->core->device; + + rc = msm_venc_check_dynamic_flip_constraints(inst); + if (rc) { + d_vpr_e("%s: Dynamic flip unsupported\n", __func__); + return rc; + } + + /* Require IDR frame first */ + s_vpr_h(inst->sid, "Set dynamic IDR frame\n"); + rc = msm_venc_set_request_keyframe(inst); + if (rc) { + s_vpr_e(inst->sid, "%s: Dynamic IDR failed\n", __func__); + return rc; + } + + dynamic_flip = v4l2_to_hfi_flip(inst); + s_vpr_h(inst->sid, "Dynamic flip = %d\n", dynamic_flip); + rc = call_hfi_op(hdev, session_set_property, + (void *)inst->session, + HFI_PROPERTY_CONFIG_VPE_FLIP, + &dynamic_flip, sizeof(dynamic_flip)); + if (rc) { + s_vpr_e(inst->sid, "Set dynamic flip failed\n"); + return rc; + } + + return rc; +} + +int msm_venc_set_video_csc(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct v4l2_ctrl *ctrl_cs; + struct v4l2_ctrl *ctrl_cm; + u32 color_primaries, custom_matrix; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + if (get_v4l2_codec(inst) != V4L2_PIX_FMT_H264 && + get_v4l2_codec(inst) != V4L2_PIX_FMT_HEVC) + return 0; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC); + if (ctrl->val == V4L2_MPEG_MSM_VIDC_DISABLE) + return 0; + + ctrl_cs = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE); + ctrl_cm = get_ctrl(inst, + V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC_CUSTOM_MATRIX); + + color_primaries = ctrl_cs->val; + custom_matrix = ctrl_cm->val; + rc = msm_venc_set_csc(inst, color_primaries, custom_matrix); + if (rc) + s_vpr_e(inst->sid, "%s: msm_venc_set_csc failed\n", __func__); + + return rc; +} + +int msm_venc_set_8x8_transform(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl = NULL; + struct hfi_enable enable; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + if (get_v4l2_codec(inst) != V4L2_PIX_FMT_H264) { + s_vpr_h(inst->sid, "%s: skip as codec is not H264\n", + __func__); + return 0; + } + + if (inst->profile != HFI_H264_PROFILE_HIGH && + inst->profile != HFI_H264_PROFILE_CONSTRAINED_HIGH) { + s_vpr_h(inst->sid, "%s: skip due to %#x\n", + __func__, inst->profile); + return 0; + } + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM); + enable.enable = !!ctrl->val; + + s_vpr_h(inst->sid, "%s: %d\n", __func__, enable.enable); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_H264_8X8_TRANSFORM, &enable, + sizeof(enable)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_vui_timing_info(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct hfi_vui_timing_info timing_info; + bool cfr, native_recorder; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + codec = get_v4l2_codec(inst); + if (codec != V4L2_PIX_FMT_H264 && codec != V4L2_PIX_FMT_HEVC) + return 0; + + native_recorder = false; + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_NATIVE_RECORDER); + if (ctrl->val == V4L2_MPEG_MSM_VIDC_ENABLE) + native_recorder = true; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_VUI_TIMING_INFO); + if (ctrl->val == V4L2_MPEG_MSM_VIDC_DISABLE && native_recorder == false) + return 0; + + switch (inst->rc_type) { + case V4L2_MPEG_VIDEO_BITRATE_MODE_VBR: + case V4L2_MPEG_VIDEO_BITRATE_MODE_CBR: + case V4L2_MPEG_VIDEO_BITRATE_MODE_MBR: + cfr = true; + break; + default: + cfr = false; + break; + } + + timing_info.enable = 1; + timing_info.fixed_frame_rate = cfr; + timing_info.time_scale = (inst->clk_data.frame_rate >> 16) * USEC_PER_SEC; + + s_vpr_h(inst->sid, "%s: %d %d\n", __func__, timing_info.enable, + timing_info.fixed_frame_rate); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_VUI_TIMING_INFO, &timing_info, + sizeof(timing_info)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_nal_stream_format(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct hfi_nal_stream_format_select stream_format; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + codec = get_v4l2_codec(inst); + if (codec != V4L2_PIX_FMT_H264 && codec != V4L2_PIX_FMT_HEVC) + return 0; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD); + stream_format.nal_stream_format_select = BIT(ctrl->val); + + /* + * Secure encode session supports 0x00000001 satrtcode based + * encoding only + */ + if (is_secure_session(inst) && + ctrl->val != V4L2_MPEG_VIDEO_HEVC_SIZE_0) { + s_vpr_e(inst->sid, + "%s: Invalid stream format setting for secure session\n", + __func__); + return -EINVAL; + } + + switch (ctrl->val) { + case V4L2_MPEG_VIDEO_HEVC_SIZE_0: + stream_format.nal_stream_format_select = + HFI_NAL_FORMAT_STARTCODES; + break; + case V4L2_MPEG_VIDEO_HEVC_SIZE_4: + stream_format.nal_stream_format_select = + HFI_NAL_FORMAT_FOUR_BYTE_LENGTH; + break; + default: + s_vpr_e(inst->sid, + "%s: Invalid stream format setting. Setting default\n", + __func__); + stream_format.nal_stream_format_select = + HFI_NAL_FORMAT_STARTCODES; + break; + } + + s_vpr_h(inst->sid, "%s: %#x\n", __func__, + stream_format.nal_stream_format_select); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT, &stream_format, + sizeof(stream_format)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_ltr_mode(struct msm_vidc_inst *inst) +{ + int rc = 0; + bool is_ltr = true; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct hfi_ltr_mode ltr; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT); + if (!ctrl->val) + return 0; + + codec = get_v4l2_codec(inst); + if (!(codec == V4L2_PIX_FMT_HEVC || codec == V4L2_PIX_FMT_H264)) { + is_ltr = false; + goto disable_ltr; + } + + if (!(inst->rc_type == RATE_CONTROL_OFF || + inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR || + inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR)) { + is_ltr = false; + goto disable_ltr; + } + + if (ctrl->val > inst->capability.cap[CAP_LTR_COUNT].max) { + s_vpr_e(inst->sid, "%s: invalid ltr count %d, max %d\n", + __func__, ctrl->val, + inst->capability.cap[CAP_LTR_COUNT].max); + return -EINVAL; + } + ltr.ltr_count = ctrl->val; + ltr.ltr_mode = HFI_LTR_MODE_MANUAL; + ltr.trust_mode = 1; + s_vpr_h(inst->sid, "%s: %d %d\n", __func__, + ltr.ltr_mode, ltr.ltr_count); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_LTRMODE, <r, sizeof(ltr)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + +disable_ltr: + /* + * Forcefully setting LTR count to zero when + * client sets unsupported codec/rate control. + */ + if (!is_ltr) { + update_ctrl(ctrl, 0, inst->sid); + s_vpr_h(inst->sid, "LTR is forcefully disabled!\n"); + } + return rc; +} + +int msm_venc_set_ltr_useframe(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct hfi_ltr_use use_ltr; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT); + if (!ctrl->val) + return 0; + + codec = get_v4l2_codec(inst); + if (!(codec == V4L2_PIX_FMT_HEVC || codec == V4L2_PIX_FMT_H264)) + return 0; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME); + use_ltr.ref_ltr = ctrl->val; + use_ltr.use_constrnt = true; + use_ltr.frames = 0; + s_vpr_h(inst->sid, "%s: %d\n", __func__, use_ltr.ref_ltr); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_VENC_USELTRFRAME, &use_ltr, + sizeof(use_ltr)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_ltr_markframe(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct hfi_ltr_mark mark_ltr; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT); + if (!ctrl->val) + return 0; + + codec = get_v4l2_codec(inst); + if (!(codec == V4L2_PIX_FMT_HEVC || codec == V4L2_PIX_FMT_H264)) + return 0; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_MARKLTRFRAME); + mark_ltr.mark_frame = ctrl->val; + + s_vpr_h(inst->sid, "%s: %d\n", __func__, mark_ltr.mark_frame); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME, &mark_ltr, + sizeof(mark_ltr)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_dyn_qp(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_quantization qp; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + if (inst->rc_type != RATE_CONTROL_OFF) { + s_vpr_e(inst->sid, "%s: Dyn qp is set only when RC is OFF\n", + __func__); + return -EINVAL; + } + + qp.qp_packed = ctrl->val | ctrl->val << 8 | ctrl->val << 16; + qp.enable = QP_ENABLE_I | QP_ENABLE_P | QP_ENABLE_B; + qp.layer_id = MSM_VIDC_ALL_LAYER_ID; + + s_vpr_h(inst->sid, "%s: %#x\n", __func__, + ctrl->val); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_VENC_FRAME_QP, &qp, sizeof(qp)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_aspect_ratio(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl; + struct hfi_aspect_ratio sar; + u32 codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + codec = get_v4l2_codec(inst); + if (!(codec == V4L2_PIX_FMT_H264 || codec == V4L2_PIX_FMT_HEVC)) + return 0; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH); + if (!ctrl->val) + return 0; + sar.aspect_width = ctrl->val; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT); + if (!ctrl->val) + return 0; + sar.aspect_height = ctrl->val; + + s_vpr_h(inst->sid, "%s: %d %d\n", __func__, + sar.aspect_width, sar.aspect_height); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_ASPECT_RATIO, &sar, sizeof(sar)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_blur_resolution(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct v4l2_ctrl *ctrl = NULL; + struct hfi_frame_size frame_sz; + struct v4l2_format *f; + bool disable_blur = false; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_BLUR_DIMENSIONS); + + frame_sz.buffer_type = HFI_BUFFER_INPUT; + frame_sz.height = ctrl->val & 0xFFFF; + frame_sz.width = (ctrl->val & 0x7FFF0000) >> 16; + + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + + /* + * 0x0 is default value, internal blur enabled, external blur disabled + * 0x1 means dynamic external blur, blur resolution will be set + * after start, internal blur disabled + * 0x2 means disable both internal and external blur + */ + if (ctrl->val == MSM_VIDC_BLUR_DISABLE) { + s_vpr_h(inst->sid, + "Disable internal/external blur\n"); + disable_blur = true; + } else if (ctrl->val == MSM_VIDC_BLUR_INTERNAL) { + if (check_blur_restrictions(inst)) { + s_vpr_h(inst->sid, + "Internal blur restrictions not met. Disabling blur..\n"); + disable_blur = true; + } + } else { + if (check_blur_restrictions(inst)) { + s_vpr_e(inst->sid, + "External blur is unsupported with rotation/flip/scalar\n"); + disable_blur = true; + } else if (frame_sz.width > f->fmt.pix_mp.width || + frame_sz.height > f->fmt.pix_mp.height) { + s_vpr_e(inst->sid, + "external blur wxh[%ux%u] exceeds input wxh[%ux%u]\n", + frame_sz.width, frame_sz.height, + f->fmt.pix_mp.width, f->fmt.pix_mp.height); + disable_blur = true; + } + if (inst->state < MSM_VIDC_START_DONE && disable_blur) + inst->external_blur = false; + } + + if (disable_blur) { + /* + * Use original input width/height (before VPSS) to inform FW + * to disable all blur. + */ + frame_sz.width = f->fmt.pix_mp.width; + frame_sz.height = f->fmt.pix_mp.height; + } + + s_vpr_h(inst->sid, "%s: type %u, height %u, width %u\n", __func__, + frame_sz.buffer_type, frame_sz.height, frame_sz.width); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE, &frame_sz, + sizeof(frame_sz)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_hdr_info(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct v4l2_ctrl *profile = NULL; + struct hfi_device *hdev; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + + if (get_v4l2_codec(inst) != V4L2_PIX_FMT_HEVC || + !inst->hdr10_sei_enabled) + return 0; + + profile = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_PROFILE); + if (profile->val != V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10) + return 0; + + /* No conversion to HFI needed as both structures are same */ + s_vpr_h(inst->sid, "%s: setting hdr info\n", __func__); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI, &inst->hdr10_sei_params, + sizeof(inst->hdr10_sei_params)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + + return rc; +} + +int msm_venc_set_extradata(struct msm_vidc_inst *inst) +{ + int rc = 0; + u32 codec; + + codec = get_v4l2_codec(inst); + if (inst->prop.extradata_ctrls == EXTRADATA_NONE) { + // Disable all Extradata + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VENC_LTR_INFO, 0x0); + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VENC_ROI_QP_EXTRADATA, 0x0); + if (codec == V4L2_PIX_FMT_HEVC) { + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VENC_HDR10PLUS_METADATA_EXTRADATA, + 0x0); + } + } + + if (inst->prop.extradata_ctrls & EXTRADATA_ADVANCED) + // Enable Advanced Extradata - LTR Info + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VENC_LTR_INFO, 0x1); + + if (inst->prop.extradata_ctrls & EXTRADATA_ENC_FRAME_QP) + // Enable AvgQP Extradata + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VENC_FRAME_QP_EXTRADATA, 0x1); + + if (inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_ROI) + // Enable ROIQP Extradata + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VENC_ROI_QP_EXTRADATA, 0x1); + + if (inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_HDR10PLUS) { + // Enable HDR10+ Extradata + if (codec == V4L2_PIX_FMT_HEVC) { + msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VENC_HDR10PLUS_METADATA_EXTRADATA, + 0x1); + } + } + + if (inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_CROP) { + // Enable Input Crop Extradata + msm_comm_set_index_extradata(inst, MSM_VIDC_EXTRADATA_INPUT_CROP, 0x1); + s_vpr_l(inst->sid, "%s: enable input crop encoding\n", __func__); + } + + if(!msm_vidc_cvp_usage) + inst->prop.extradata_ctrls &= ~EXTRADATA_ENC_INPUT_CVP; + + /* CVP extradata is common between user space and external CVP kernel to kernel. + Hence, skipping here and will be set after msm_vidc_prepare_preprocess in start_streaming*/ + + return rc; +} + +int msm_venc_set_lossless(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_enable enable; + + hdev = inst->core->device; + + if (inst->rc_type != RATE_CONTROL_LOSSLESS) + return 0; + + s_vpr_h(inst->sid, "%s: enable lossless encoding\n", __func__); + enable.enable = 1; + rc = call_hfi_op(hdev, session_set_property, + inst->session, + HFI_PROPERTY_PARAM_VENC_LOSSLESS_ENCODING, + &enable, sizeof(enable)); + + if (rc) + s_vpr_e(inst->sid, "Failed to set lossless mode\n"); + + return rc; +} +int msm_venc_set_cvp_skipratio(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct v4l2_ctrl *capture_rate_ctrl; + struct v4l2_ctrl *cvp_rate_ctrl; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + + if (!is_cvp_supported(inst)) { + s_vpr_h(inst->sid, "%s cvp is not supported", __func__); + return rc; + } + + if (!msm_vidc_cvp_usage) + return 0; + + capture_rate_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_CAPTURE_FRAME_RATE); + cvp_rate_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_CVP_FRAME_RATE); + + rc = msm_comm_set_cvp_skip_ratio(inst, + capture_rate_ctrl->val, cvp_rate_ctrl->val); + if (rc) + s_vpr_e(inst->sid, "Failed to set cvp skip ratio\n"); + + return rc; +} + +int msm_venc_update_entropy_mode(struct msm_vidc_inst *inst) +{ + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + if (get_v4l2_codec(inst) == V4L2_PIX_FMT_H264) { + if ((inst->profile == HFI_H264_PROFILE_BASELINE || + inst->profile == HFI_H264_PROFILE_CONSTRAINED_BASE) + && inst->entropy_mode == HFI_H264_ENTROPY_CABAC) { + inst->entropy_mode = HFI_H264_ENTROPY_CAVLC; + s_vpr_h(inst->sid, + "%s: profile %d entropy %d\n", + __func__, inst->profile, + inst->entropy_mode); + } + } + + return 0; +} + +int handle_all_intra_restrictions(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *ctrl = NULL; + u32 n_fps, fps_max; + struct msm_vidc_capability *capability; + struct v4l2_format *f; + enum hal_video_codec codec; + struct hfi_intra_period intra_period; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + + if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ) + return 0; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_GOP_SIZE); + intra_period.pframes = ctrl->val; + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_B_FRAMES); + intra_period.bframes = ctrl->val; + + if (!intra_period.pframes && !intra_period.bframes) + inst->all_intra = true; + else + return 0; + + s_vpr_h(inst->sid, "All Intra(IDRs) Encoding\n"); + /* check codec and profile */ + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + codec = get_hal_codec(f->fmt.pix_mp.pixelformat, inst->sid); + if (codec != HAL_VIDEO_CODEC_HEVC && codec != HAL_VIDEO_CODEC_H264) { + s_vpr_e(inst->sid, "Unsupported codec for all intra\n"); + return -ENOTSUPP; + } + if (codec == HAL_VIDEO_CODEC_HEVC && + inst->profile == HFI_HEVC_PROFILE_MAIN10) { + s_vpr_e(inst->sid, "Unsupported HEVC profile for all intra\n"); + return -ENOTSUPP; + } + + /* CBR_CFR is one of the advertised rc mode for HEVC encoding. + * However, all-intra is intended for quality bitstream. Hence, + * fallback to VBR RC mode if client needs all-intra encoding. + */ + if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) + inst->rc_type = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR; + + /* check supported bit rate mode and frame rate */ + capability = &inst->capability; + n_fps = inst->clk_data.frame_rate >> 16; + fps_max = capability->cap[CAP_ALLINTRA_MAX_FPS].max; + s_vpr_h(inst->sid, "%s: rc_type %u, fps %u, fps_max %u\n", + __func__, inst->rc_type, n_fps, fps_max); + if ((inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_VBR && + inst->rc_type != RATE_CONTROL_OFF && + inst->rc_type != RATE_CONTROL_LOSSLESS) || + n_fps > fps_max) { + s_vpr_e(inst->sid, "Unsupported bitrate mode or frame rate\n"); + return -ENOTSUPP; + } + + set_all_intra_preconditions(inst); + + return 0; +} + +int check_blur_restrictions(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *cac = NULL; + struct v4l2_ctrl *profile = NULL; + struct v4l2_ctrl *blur = NULL; + struct v4l2_format *f; + bool scalar_enable = false; + bool sup_resolution = false; + bool sup_codec = false; + bool is_10_bit = false; + u32 input_height, input_width; + u32 codec; + + /* Only need to check static VPSS conditions */ + if (inst->state == MSM_VIDC_START_DONE) + return 0; + + scalar_enable = vidc_scalar_enabled(inst); + blur = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_BLUR_DIMENSIONS); + /* Minimum restrictions to enable any type of blur */ + if (scalar_enable || inst->static_rotation_flip_enabled) { + return -ENOTSUPP; + } + if (blur->val != MSM_VIDC_BLUR_INTERNAL) { + /* below restrictions applicable for internal blur only */ + return 0; + } + + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + input_height = f->fmt.pix_mp.height; + input_width = f->fmt.pix_mp.width; + + /* Adaptive blur restrictions */ + cac = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_BITRATE_SAVINGS); + codec = get_v4l2_codec(inst); + profile = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_PROFILE); + + if (codec == V4L2_PIX_FMT_HEVC && + profile->val == V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10) { + is_10_bit = true; + } + if (res_is_greater_than(input_width, input_height, 352, 240) && + res_is_less_than_or_equal_to(input_width, input_height, + 3840, 2160)) { + sup_resolution = true; + } + + if (codec == V4L2_PIX_FMT_HEVC || codec == V4L2_PIX_FMT_H264) + sup_codec = true; + + if (inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_VBR || + !cac->val || is_10_bit || !sup_codec || inst->all_intra || + !sup_resolution) { + return -ENOTSUPP; + } + + return 0; +} + +int handle_vpss_restrictions(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *rotation = NULL; + struct v4l2_ctrl *hflip = NULL; + struct v4l2_ctrl *vflip = NULL; + struct v4l2_format *f; + struct msm_vidc_vpss_capability *vpss_caps; + u32 vpss_caps_count; + bool rotation_flip_enable = false; + u32 i,input_height, input_width; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + input_height = f->fmt.pix_mp.height; + input_width = f->fmt.pix_mp.width; + + vpss_caps = inst->core->resources.vpss_caps; + vpss_caps_count = inst->core->resources.vpss_caps_count; + + /* check customer specified VPSS resolutions */ + if (vpss_caps) { + for (i = 0; i < vpss_caps_count; i++) { + if (input_width == vpss_caps[i].width && + input_height == vpss_caps[i].height) { + s_vpr_h(inst->sid, + "supported resolution found for VPSS, width = %d, height = %d\n", + input_width, input_height); + return 0; + } + } + } + + /* check rotation and flip contraint for VPSS + * any rotation or flip sessions with non-multiple of 8 + * resolution is rejected. + */ + rotation = get_ctrl(inst, V4L2_CID_ROTATE); + hflip = get_ctrl(inst, V4L2_CID_HFLIP); + vflip = get_ctrl(inst, V4L2_CID_VFLIP); + if (rotation->val != 0 || + hflip->val != V4L2_MPEG_MSM_VIDC_DISABLE || + vflip->val != V4L2_MPEG_MSM_VIDC_DISABLE) + rotation_flip_enable = true; + + if (rotation_flip_enable) { + if ((input_width & 7) != 0) { + s_vpr_e(inst->sid, "Unsupported width = %d for VPSS\n", + input_width); + return -ENOTSUPP; + } + if ((input_height & 7) != 0) { + s_vpr_e(inst->sid, "Unsupported height = %d for VPSS\n", + input_height); + return -ENOTSUPP; + } + } + return 0; +} + +int msm_venc_set_properties(struct msm_vidc_inst *inst) +{ + int rc = 0; + + rc = msm_venc_update_entropy_mode(inst); + if (rc) + goto exit; + rc = msm_venc_update_bitrate(inst); + if (rc) + goto exit; + rc = handle_all_intra_restrictions(inst); + if (rc) + goto exit; + rc = handle_vpss_restrictions(inst); + if (rc) + goto exit; + rc = msm_venc_set_frame_size(inst); + if (rc) + goto exit; + rc = msm_venc_set_frame_rate(inst, true); + if (rc) + goto exit; + rc = msm_venc_set_secure_mode(inst); + if (rc) + goto exit; + rc = msm_venc_set_priority(inst); + if (rc) + goto exit; + rc = msm_venc_set_color_format(inst); + if (rc) + goto exit; + rc = msm_venc_set_sequence_header_mode(inst); + if (rc) + goto exit; + rc = msm_venc_set_profile_level(inst); + if (rc) + goto exit; + rc = msm_venc_set_8x8_transform(inst); + if (rc) + goto exit; + rc = msm_venc_set_entropy_mode(inst); + if (rc) + goto exit; + rc = msm_venc_set_rate_control(inst); + if (rc) + goto exit; + rc = msm_venc_set_vbv_delay(inst); + if (rc) + goto exit; + rc = msm_venc_set_bitrate_savings_mode(inst); + if (rc) + goto exit; + rc = msm_venc_set_input_timestamp_rc(inst); + if (rc) + goto exit; + rc = msm_venc_set_frame_qp(inst); + if (rc) + goto exit; + rc = msm_venc_set_qp_range(inst); + if (rc) + goto exit; + rc = msm_venc_set_image_properties(inst); + if (rc) + goto exit; + rc = msm_venc_set_au_delimiter_mode(inst); + if (rc) + goto exit; + rc = msm_venc_set_vui_timing_info(inst); + if (rc) + goto exit; + rc = msm_venc_set_hdr_info(inst); + if (rc) + goto exit; + rc = msm_venc_set_nal_stream_format(inst); + if (rc) + goto exit; + rc = msm_venc_set_slice_control_mode(inst); + if (rc) + goto exit; + rc = msm_venc_set_loop_filter_mode(inst); + if (rc) + goto exit; + rc = msm_venc_set_intra_refresh_mode(inst); + if (rc) + goto exit; + rc = msm_venc_set_ltr_mode(inst); + if (rc) + goto exit; + rc = msm_venc_set_hb_max_layer(inst); + if (rc) + goto exit; + rc = msm_venc_set_hp_max_layer(inst); + if (rc) + goto exit; + rc = msm_venc_set_hp_layer(inst); + if (rc) + goto exit; + rc = msm_venc_set_base_layer_priority_id(inst); + if (rc) + goto exit; + rc = msm_venc_set_idr_period(inst); + if (rc) + goto exit; + rc = msm_venc_set_intra_period(inst); + if (rc) + goto exit; + rc = msm_venc_set_aspect_ratio(inst); + if (rc) + goto exit; + rc = msm_venc_set_video_signal_info(inst); + if (rc) + goto exit; + /* + * Layer bitrate is preferred over cumulative bitrate. + * Cumulative bitrate is set only when we fall back. + */ + rc = msm_venc_set_layer_bitrate(inst); + if (rc) + goto exit; + rc = msm_venc_set_bitrate(inst); + if (rc) + goto exit; + rc = msm_venc_set_video_csc(inst); + if (rc) + goto exit; + /* + * Downscalar and Static rotation/flip has higher priority + * than blur. + */ + rc = msm_venc_set_rotation(inst); + if (rc) + goto exit; + rc = msm_venc_set_chroma_qp_offset(inst); + if (rc) + goto exit; + rc = msm_venc_set_blur_resolution(inst); + if (rc) + goto exit; + rc = msm_venc_set_extradata(inst); + if (rc) + goto exit; + rc = msm_venc_set_operating_rate(inst); + if (rc) + goto exit; + rc = msm_venc_set_buffer_counts(inst); + if (rc) + goto exit; + rc = msm_venc_set_lossless(inst); + if (rc) + goto exit; + +exit: + if (rc) + s_vpr_e(inst->sid, "%s: failed with %d\n", __func__, rc); + else + s_vpr_h(inst->sid, "%s: set properties successful\n", __func__); + + return rc; +} diff --git a/techpack/video/msm/vidc/msm_venc.h b/techpack/video/msm/vidc/msm_venc.h new file mode 100644 index 000000000000..ac3f38a829ff --- /dev/null +++ b/techpack/video/msm/vidc/msm_venc.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + */ +#ifndef _MSM_VENC_H_ +#define _MSM_VENC_H_ + +#include "msm_vidc.h" +#include "msm_vidc_internal.h" +#define MSM_VENC_DVC_NAME "msm_vidc_venc" + +int msm_venc_inst_init(struct msm_vidc_inst *inst); +int msm_venc_ctrl_init(struct msm_vidc_inst *inst, + const struct v4l2_ctrl_ops *ctrl_ops); +int msm_venc_enum_fmt(struct msm_vidc_inst *inst, + struct v4l2_fmtdesc *f); +int msm_venc_s_fmt(struct msm_vidc_inst *inst, + struct v4l2_format *f); +int msm_venc_g_fmt(struct msm_vidc_inst *inst, + struct v4l2_format *f); +int msm_venc_set_default_profile(struct msm_vidc_inst *inst); +int msm_venc_s_ctrl(struct msm_vidc_inst *inst, + struct v4l2_ctrl *ctrl); +int msm_venc_set_properties(struct msm_vidc_inst *inst); +int msm_venc_set_extradata(struct msm_vidc_inst *inst); +int msm_venc_set_frame_rate(struct msm_vidc_inst *inst, bool external_requested); +int msm_venc_store_timestamp(struct msm_vidc_inst *inst, u64 timestamp_us); +int msm_venc_set_bitrate(struct msm_vidc_inst *inst); +int msm_venc_set_layer_bitrate(struct msm_vidc_inst *inst); +int msm_venc_set_operating_rate(struct msm_vidc_inst *inst); +int msm_venc_set_idr_period(struct msm_vidc_inst *inst); +int msm_venc_set_intra_period(struct msm_vidc_inst *inst); +int msm_venc_set_ltr_useframe(struct msm_vidc_inst *inst); +int msm_venc_set_ltr_markframe(struct msm_vidc_inst *inst); +int msm_venc_set_dyn_qp(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl); +int msm_venc_set_request_keyframe(struct msm_vidc_inst *inst); +int msm_venc_set_intra_refresh_mode(struct msm_vidc_inst *inst); +int msm_venc_set_hb_max_layer(struct msm_vidc_inst *inst); +int msm_venc_set_hp_max_layer(struct msm_vidc_inst *inst); +int msm_venc_set_hp_layer(struct msm_vidc_inst *inst); +int msm_venc_set_base_layer_priority_id(struct msm_vidc_inst *inst); +int msm_venc_check_dynamic_flip_constraints(struct msm_vidc_inst *inst); +int msm_venc_set_dynamic_flip(struct msm_vidc_inst *inst); +int msm_venc_set_lossless(struct msm_vidc_inst *inst); +int msm_venc_set_blur_resolution(struct msm_vidc_inst *inst); +int msm_venc_set_cvp_skipratio(struct msm_vidc_inst *inst); +int handle_all_intra_restrictions(struct msm_vidc_inst *inst); +int check_blur_restrictions(struct msm_vidc_inst *inst); +int msm_venc_set_frame_quality(struct msm_vidc_inst *inst); +int msm_venc_set_image_grid(struct msm_vidc_inst *inst); +int msm_venc_set_bitrate_boost_margin(struct msm_vidc_inst *inst, u32 enable); +int handle_vpss_restrictions(struct msm_vidc_inst *inst); +#endif diff --git a/techpack/video/msm/vidc/msm_vidc.c b/techpack/video/msm/vidc/msm_vidc.c new file mode 100644 index 000000000000..d848eb08976b --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc.c @@ -0,0 +1,1843 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include "msm_vidc.h" +#include "msm_vidc_internal.h" +#include "msm_vidc_debug.h" +#include "msm_vdec.h" +#include "msm_venc.h" +#include "msm_vidc_common.h" +#include "vidc_hfi.h" +#include "vidc_hfi_helper.h" +#include "vidc_hfi_api.h" +#include "msm_vidc_clocks.h" +#include "msm_vidc_buffer_calculations.h" + +#define MAX_EVENTS 30 + +static int try_get_ctrl_for_instance(struct msm_vidc_inst *inst, + struct v4l2_ctrl *ctrl); + +static int get_poll_flags(void *instance) +{ + struct msm_vidc_inst *inst = instance; + struct vb2_queue *outq = &inst->bufq[INPUT_PORT].vb2_bufq; + struct vb2_queue *capq = &inst->bufq[OUTPUT_PORT].vb2_bufq; + struct vb2_buffer *out_vb = NULL; + struct vb2_buffer *cap_vb = NULL; + unsigned long flags = 0; + int rc = 0; + + if (v4l2_event_pending(&inst->event_handler)) + rc |= POLLPRI; + + spin_lock_irqsave(&capq->done_lock, flags); + if (!list_empty(&capq->done_list)) + cap_vb = list_first_entry(&capq->done_list, struct vb2_buffer, + done_entry); + if (cap_vb && (cap_vb->state == VB2_BUF_STATE_DONE + || cap_vb->state == VB2_BUF_STATE_ERROR)) + rc |= POLLIN | POLLRDNORM; + spin_unlock_irqrestore(&capq->done_lock, flags); + + spin_lock_irqsave(&outq->done_lock, flags); + if (!list_empty(&outq->done_list)) + out_vb = list_first_entry(&outq->done_list, struct vb2_buffer, + done_entry); + if (out_vb && (out_vb->state == VB2_BUF_STATE_DONE + || out_vb->state == VB2_BUF_STATE_ERROR)) + rc |= POLLOUT | POLLWRNORM; + spin_unlock_irqrestore(&outq->done_lock, flags); + + return rc; +} + +int msm_vidc_poll(void *instance, struct file *filp, + struct poll_table_struct *wait) +{ + struct msm_vidc_inst *inst = instance; + struct vb2_queue *outq = NULL; + struct vb2_queue *capq = NULL; + + if (!inst) + return -EINVAL; + + outq = &inst->bufq[INPUT_PORT].vb2_bufq; + capq = &inst->bufq[OUTPUT_PORT].vb2_bufq; + + poll_wait(filp, &inst->event_handler.wait, wait); + poll_wait(filp, &capq->done_wq, wait); + poll_wait(filp, &outq->done_wq, wait); + return get_poll_flags(inst); +} +EXPORT_SYMBOL(msm_vidc_poll); + +int msm_vidc_querycap(void *instance, struct v4l2_capability *cap) +{ + struct msm_vidc_inst *inst = instance; + + if (!inst || !cap) + return -EINVAL; + + strlcpy(cap->driver, MSM_VIDC_DRV_NAME, sizeof(cap->driver)); + cap->bus_info[0] = 0; + cap->version = MSM_VIDC_VERSION; + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | + V4L2_CAP_VIDEO_OUTPUT_MPLANE | + V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; + + memset(cap->reserved, 0, sizeof(cap->reserved)); + + if (inst->session_type == MSM_VIDC_DECODER) + strlcpy(cap->card, MSM_VDEC_DVC_NAME, sizeof(cap->card)); + else if (inst->session_type == MSM_VIDC_ENCODER) + strlcpy(cap->card, MSM_VENC_DVC_NAME, sizeof(cap->card)); + else + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL(msm_vidc_querycap); + +int msm_vidc_enum_fmt(void *instance, struct v4l2_fmtdesc *f) +{ + struct msm_vidc_inst *inst = instance; + + if (!inst || !f) + return -EINVAL; + + if (inst->session_type == MSM_VIDC_DECODER) + return msm_vdec_enum_fmt(instance, f); + else if (inst->session_type == MSM_VIDC_ENCODER) + return msm_venc_enum_fmt(instance, f); + return -EINVAL; +} +EXPORT_SYMBOL(msm_vidc_enum_fmt); + +int msm_vidc_query_ctrl(void *instance, struct v4l2_queryctrl *q_ctrl) +{ + int rc = 0; + struct msm_vidc_inst *inst = instance; + struct v4l2_ctrl *ctrl; + + if (!inst || !q_ctrl) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, inst, q_ctrl); + return -EINVAL; + } + + ctrl = v4l2_ctrl_find(&inst->ctrl_handler, q_ctrl->id); + if (!ctrl) { + s_vpr_e(inst->sid, "%s: get_ctrl failed for id %d\n", + __func__, q_ctrl->id); + return -EINVAL; + } + q_ctrl->minimum = ctrl->minimum; + q_ctrl->maximum = ctrl->maximum; + q_ctrl->default_value = ctrl->default_value; + /* remove tier info for HEVC level */ + if (q_ctrl->id == V4L2_CID_MPEG_VIDEO_HEVC_LEVEL) { + q_ctrl->minimum &= ~(0xF << 28); + q_ctrl->maximum &= ~(0xF << 28); + } + if (ctrl->type == V4L2_CTRL_TYPE_MENU) { + q_ctrl->flags = ~(ctrl->menu_skip_mask); + } else { + q_ctrl->flags = 0; + q_ctrl->step = ctrl->step; + } + s_vpr_h(inst->sid, + "query ctrl: %s: min %d, max %d, default %d step %d flags %#x\n", + ctrl->name, q_ctrl->minimum, q_ctrl->maximum, + q_ctrl->default_value, q_ctrl->step, q_ctrl->flags); + return rc; +} +EXPORT_SYMBOL(msm_vidc_query_ctrl); + +int msm_vidc_query_menu(void *instance, struct v4l2_querymenu *qmenu) +{ + int rc = 0; + struct msm_vidc_inst *inst = instance; + struct v4l2_ctrl *ctrl; + + if (!inst || !qmenu) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, inst, qmenu); + return -EINVAL; + } + + ctrl = v4l2_ctrl_find(&inst->ctrl_handler, qmenu->id); + if (!ctrl) { + s_vpr_e(inst->sid, "%s: get_ctrl failed for id %d\n", + __func__, qmenu->id); + return -EINVAL; + } + if (ctrl->type != V4L2_CTRL_TYPE_MENU) { + s_vpr_e(inst->sid, "%s: ctrl: %s: type (%d) is not MENU type\n", + __func__, ctrl->name, ctrl->type); + return -EINVAL; + } + if (qmenu->index < ctrl->minimum || qmenu->index > ctrl->maximum) + return -EINVAL; + + if (ctrl->menu_skip_mask & (1 << qmenu->index)) + rc = -EINVAL; + + s_vpr_h(inst->sid, + "%s: ctrl: %s: min %d, max %d, menu_skip_mask %#x, qmenu: id %d, index %d, %s\n", + __func__, ctrl->name, ctrl->minimum, ctrl->maximum, + ctrl->menu_skip_mask, qmenu->id, qmenu->index, + rc ? "not supported" : "supported"); + return rc; +} +EXPORT_SYMBOL(msm_vidc_query_menu); + +int msm_vidc_s_fmt(void *instance, struct v4l2_format *f) +{ + int rc = 0; + struct msm_vidc_inst *inst = instance; + + if (!inst || !f) + return -EINVAL; + + if (inst->session_type == MSM_VIDC_DECODER) + rc = msm_vdec_s_fmt(instance, f); + if (inst->session_type == MSM_VIDC_ENCODER) + rc = msm_venc_s_fmt(instance, f); + + s_vpr_h(inst->sid, + "s_fmt: type %d wxh %dx%d pixelfmt %#x num_planes %d size[0] %d size[1] %d in_reconfig %d\n", + f->type, f->fmt.pix_mp.width, f->fmt.pix_mp.height, + f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.num_planes, + f->fmt.pix_mp.plane_fmt[0].sizeimage, + f->fmt.pix_mp.plane_fmt[1].sizeimage, inst->in_reconfig); + return rc; +} +EXPORT_SYMBOL(msm_vidc_s_fmt); + +int msm_vidc_g_fmt(void *instance, struct v4l2_format *f) +{ + int rc = 0; + struct msm_vidc_inst *inst = instance; + + if (!inst || !f) + return -EINVAL; + + if (inst->session_type == MSM_VIDC_DECODER) + rc = msm_vdec_g_fmt(instance, f); + if (inst->session_type == MSM_VIDC_ENCODER) + rc = msm_venc_g_fmt(instance, f); + + s_vpr_h(inst->sid, + "g_fmt: type %d wxh %dx%d pixelfmt %#x num_planes %d size[0] %d size[1] %d in_reconfig %d\n", + f->type, f->fmt.pix_mp.width, f->fmt.pix_mp.height, + f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.num_planes, + f->fmt.pix_mp.plane_fmt[0].sizeimage, + f->fmt.pix_mp.plane_fmt[1].sizeimage, inst->in_reconfig); + return rc; +} +EXPORT_SYMBOL(msm_vidc_g_fmt); + +int msm_vidc_s_ctrl(void *instance, struct v4l2_control *control) +{ + struct msm_vidc_inst *inst = instance; + + if (!inst || !control) + return -EINVAL; + + return msm_comm_s_ctrl(instance, control); +} +EXPORT_SYMBOL(msm_vidc_s_ctrl); + +int msm_vidc_g_ctrl(void *instance, struct v4l2_control *control) +{ + struct msm_vidc_inst *inst = instance; + struct v4l2_ctrl *ctrl = NULL; + int rc = 0; + + if (!inst || !control) + return -EINVAL; + + ctrl = v4l2_ctrl_find(&inst->ctrl_handler, control->id); + if (ctrl) { + rc = try_get_ctrl_for_instance(inst, ctrl); + if (!rc) + control->value = ctrl->val; + } + + return rc; +} +EXPORT_SYMBOL(msm_vidc_g_ctrl); + +int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b) +{ + struct msm_vidc_inst *inst = instance; + struct buf_queue *q = NULL; + int rc = 0; + + if (!inst || !b) + return -EINVAL; + q = msm_comm_get_vb2q(inst, b->type); + if (!q) { + s_vpr_e(inst->sid, + "Failed to find buffer queue. type %d\n", b->type); + return -EINVAL; + } + + mutex_lock(&q->lock); + rc = vb2_reqbufs(&q->vb2_bufq, b); + mutex_unlock(&q->lock); + + if (rc) + s_vpr_e(inst->sid, "Failed to get reqbufs, %d\n", rc); + return rc; +} +EXPORT_SYMBOL(msm_vidc_reqbufs); + +static bool valid_v4l2_buffer(struct v4l2_buffer *b, + struct msm_vidc_inst *inst) +{ + struct v4l2_format *f; + enum vidc_ports port = + !V4L2_TYPE_IS_MULTIPLANAR(b->type) ? MAX_PORT_NUM : + b->type == OUTPUT_MPLANE ? OUTPUT_PORT : + b->type == INPUT_MPLANE ? INPUT_PORT : + MAX_PORT_NUM; + + f = &inst->fmts[port].v4l2_fmt; + return port != MAX_PORT_NUM && + f->fmt.pix_mp.num_planes == b->length; +} + +int msm_vidc_release_buffer(void *instance, int type, unsigned int index) +{ + int rc = 0; + struct msm_vidc_inst *inst = instance; + struct msm_vidc_buffer *mbuf, *dummy; + + if (!inst) { + d_vpr_e("%s: invalid inst\n", __func__); + return -EINVAL; + } + + if (!inst->in_reconfig && + inst->state > MSM_VIDC_LOAD_RESOURCES && + inst->state < MSM_VIDC_RELEASE_RESOURCES_DONE) { + rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE); + if (rc) { + s_vpr_e(inst->sid, + "%s: Failed to move inst: %pK to rel res done\n", + __func__, inst); + } + } + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry_safe(mbuf, dummy, &inst->registeredbufs.list, + list) { + struct vb2_buffer *vb2 = &mbuf->vvb.vb2_buf; + + if (vb2->type != type || vb2->index != index) + continue; + + if (mbuf->flags & MSM_VIDC_FLAG_RBR_PENDING) { + print_vidc_buffer(VIDC_HIGH, + "skip rel buf (rbr pending)", inst, mbuf); + continue; + } + + print_vidc_buffer(VIDC_HIGH, "release buf", inst, mbuf); + msm_comm_unmap_vidc_buffer(inst, mbuf); + list_del(&mbuf->list); + kref_put_mbuf(mbuf); + } + mutex_unlock(&inst->registeredbufs.lock); + + return rc; +} +EXPORT_SYMBOL(msm_vidc_release_buffer); + +int msm_vidc_qbuf(void *instance, struct media_device *mdev, + struct v4l2_buffer *b) +{ + struct msm_vidc_inst *inst = instance; + int rc = 0; + unsigned int i = 0; + struct buf_queue *q = NULL; + s64 timestamp_us = 0; + u32 cr = 0; + + if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst)) { + d_vpr_e("%s: invalid params %pK %pK\n", __func__, inst, b); + return -EINVAL; + } + + if (!IS_ALIGNED(b->m.planes[0].length, SZ_4K)) { + s_vpr_e(inst->sid, "qbuf: buffer size not 4K aligned - %u\n", + b->m.planes[0].length); + return -EINVAL; + } + + q = msm_comm_get_vb2q(inst, b->type); + if (!q) { + s_vpr_e(inst->sid, + "Failed to find buffer queue. type %d\n", b->type); + return -EINVAL; + } + + mutex_lock(&q->lock); + if ((inst->out_flush && b->type == OUTPUT_MPLANE) || inst->in_flush) { + s_vpr_e(inst->sid, + "%s: in flush, discarding qbuf, type %u, index %u\n", + __func__, b->type, b->index); + rc = -EINVAL; + goto unlock; + } + inst->last_qbuf_time_ns = ktime_get_ns(); + + for (i = 0; i < b->length; i++) { + b->m.planes[i].m.fd = + b->m.planes[i].reserved[MSM_VIDC_BUFFER_FD]; + b->m.planes[i].data_offset = + b->m.planes[i].reserved[MSM_VIDC_DATA_OFFSET]; + } + + /* Compression ratio is valid only for Encoder YUV buffers. */ + if (inst->session_type == MSM_VIDC_ENCODER && + b->type == INPUT_MPLANE) { + cr = b->m.planes[0].reserved[MSM_VIDC_COMP_RATIO]; + msm_comm_update_input_cr(inst, b->index, cr); + } + + if (b->type == INPUT_MPLANE) { + rc = msm_comm_store_input_tag(&inst->etb_data, b->index, + b->m.planes[0].reserved[MSM_VIDC_INPUT_TAG_1], + 0, inst->sid); + if (rc) { + s_vpr_e(inst->sid, "Failed to store input tag"); + rc = -EINVAL; + goto unlock; + } + } + + /* + * set perf mode for image and thumbnail session buffers + * so that they will be processed quickly + */ + if ((is_grid_session(inst) || is_thumbnail_session(inst)) + && b->type == INPUT_MPLANE) + b->flags |= V4L2_BUF_FLAG_PERF_MODE; + + timestamp_us = (s64)((b->timestamp.tv_sec * 1000000) + + b->timestamp.tv_usec); + if (is_decode_session(inst) && b->type == INPUT_MPLANE && + is_ts_reorder_allowed(inst)) { + if (inst->flush_timestamps) + msm_comm_release_timestamps(inst); + inst->flush_timestamps = false; + + if (!(b->flags & V4L2_BUF_FLAG_CODECCONFIG)) + rc = msm_comm_store_timestamp(inst, timestamp_us, + b->flags & V4L2_BUF_FLAG_EOS); + + if (rc) + goto unlock; + inst->clk_data.frame_rate = msm_comm_get_max_framerate(inst); + } + if (is_encode_session(inst) && b->type == INPUT_MPLANE) { + if (inst->flush_timestamps) + msm_comm_release_timestamps(inst); + inst->flush_timestamps = false; + + rc = msm_venc_store_timestamp(inst, timestamp_us); + if (rc) + goto unlock; + } + + rc = vb2_qbuf(&q->vb2_bufq, mdev, b); + if (rc) + s_vpr_e(inst->sid, "Failed to qbuf, %d\n", rc); +unlock: + mutex_unlock(&q->lock); + + return rc; +} +EXPORT_SYMBOL(msm_vidc_qbuf); + +int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b) +{ + struct msm_vidc_inst *inst = instance; + int rc = 0; + unsigned int i = 0; + struct buf_queue *q = NULL; + + if (!inst || !b || !valid_v4l2_buffer(b, inst)) { + d_vpr_e("%s: invalid params, %pK %pK\n", + __func__, inst, b); + return -EINVAL; + } + + q = msm_comm_get_vb2q(inst, b->type); + if (!q) { + s_vpr_e(inst->sid, "Failed to find buffer queue. type %d\n", + b->type); + return -EINVAL; + } + + mutex_lock(&q->lock); + rc = vb2_dqbuf(&q->vb2_bufq, b, true); + mutex_unlock(&q->lock); + if (rc == -EAGAIN) { + return rc; + } else if (rc) { + s_vpr_e(inst->sid, "Failed to dqbuf, %d\n", rc); + return rc; + } + + for (i = 0; i < b->length; i++) { + b->m.planes[i].reserved[MSM_VIDC_BUFFER_FD] = + b->m.planes[i].m.fd; + b->m.planes[i].reserved[MSM_VIDC_DATA_OFFSET] = + b->m.planes[i].data_offset; + } + if (b->type == OUTPUT_MPLANE) { + rc = msm_comm_fetch_input_tag(&inst->fbd_data, b->index, + &b->m.planes[0].reserved[MSM_VIDC_INPUT_TAG_1], + &b->m.planes[0].reserved[MSM_VIDC_INPUT_TAG_2], + inst->sid); + if (rc) { + s_vpr_e(inst->sid, "Failed to fetch input tag"); + return -EINVAL; + } + } + if (is_decode_session(inst) && + b->type == OUTPUT_MPLANE && + !(b->flags & V4L2_BUF_FLAG_CODECCONFIG) && + is_ts_reorder_allowed(inst)) + msm_comm_fetch_ts_framerate(inst, b); + + return rc; +} +EXPORT_SYMBOL(msm_vidc_dqbuf); + +int msm_vidc_streamon(void *instance, enum v4l2_buf_type i) +{ + struct msm_vidc_inst *inst = instance; + int rc = 0; + struct buf_queue *q; + + if (!inst) + return -EINVAL; + + q = msm_comm_get_vb2q(inst, i); + if (!q) { + d_vpr_e("Failed to find buffer queue. type %d\n", i); + return -EINVAL; + } + s_vpr_h(inst->sid, "Calling streamon\n"); + mutex_lock(&q->lock); + rc = vb2_streamon(&q->vb2_bufq, i); + mutex_unlock(&q->lock); + if (rc) { + s_vpr_e(inst->sid, "streamon failed on port: %d\n", i); + msm_comm_kill_session(inst); + } + return rc; +} +EXPORT_SYMBOL(msm_vidc_streamon); + +int msm_vidc_streamoff(void *instance, enum v4l2_buf_type i) +{ + struct msm_vidc_inst *inst = instance; + int rc = 0; + struct buf_queue *q; + + if (!inst) + return -EINVAL; + + q = msm_comm_get_vb2q(inst, i); + if (!q) { + s_vpr_e(inst->sid, "Failed to find buffer queue. type %d\n", i); + return -EINVAL; + } + + if (!inst->in_reconfig) { + s_vpr_h(inst->sid, "%s: inst %pK release resources\n", + __func__, inst); + rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE); + if (rc) + s_vpr_e(inst->sid, + "%s: inst %pK move to rel res done failed\n", + __func__, inst); + } + + s_vpr_h(inst->sid, "Calling streamoff\n"); + mutex_lock(&q->lock); + rc = vb2_streamoff(&q->vb2_bufq, i); + mutex_unlock(&q->lock); + if (rc) + s_vpr_e(inst->sid, "streamoff failed on port: %d\n", i); + return rc; +} +EXPORT_SYMBOL(msm_vidc_streamoff); + +int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize) +{ + struct msm_vidc_inst *inst = instance; + struct msm_vidc_capability *capability = NULL; + + if (!inst || !fsize) { + d_vpr_e("%s: invalid parameter: %pK %pK\n", + __func__, inst, fsize); + return -EINVAL; + } + if (!inst->core) + return -EINVAL; + + capability = &inst->capability; + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + if(is_grid_session(inst)) { + fsize->stepwise.min_width = + capability->cap[CAP_HEIC_IMAGE_FRAME_WIDTH].min; + fsize->stepwise.max_width = + capability->cap[CAP_HEIC_IMAGE_FRAME_WIDTH].max; + fsize->stepwise.step_width = + capability->cap[CAP_HEIC_IMAGE_FRAME_WIDTH].step_size; + fsize->stepwise.min_height = + capability->cap[CAP_HEIC_IMAGE_FRAME_HEIGHT].min; + fsize->stepwise.max_height = + capability->cap[CAP_HEIC_IMAGE_FRAME_HEIGHT].max; + fsize->stepwise.step_height = + capability->cap[CAP_HEIC_IMAGE_FRAME_HEIGHT].step_size; + + } + else { + fsize->stepwise.min_width = capability->cap[CAP_FRAME_WIDTH].min; + fsize->stepwise.max_width = capability->cap[CAP_FRAME_WIDTH].max; + fsize->stepwise.step_width = + capability->cap[CAP_FRAME_WIDTH].step_size; + fsize->stepwise.min_height = capability->cap[CAP_FRAME_HEIGHT].min; + fsize->stepwise.max_height = capability->cap[CAP_FRAME_HEIGHT].max; + fsize->stepwise.step_height = + capability->cap[CAP_FRAME_HEIGHT].step_size; + } + return 0; +} +EXPORT_SYMBOL(msm_vidc_enum_framesizes); + +static void *vidc_get_userptr(struct device *dev, unsigned long vaddr, + unsigned long size, enum dma_data_direction dma_dir) +{ + return (void *)0xdeadbeef; +} + +static void vidc_put_userptr(void *buf_priv) +{ +} + +static const struct vb2_mem_ops msm_vidc_vb2_mem_ops = { + .get_userptr = vidc_get_userptr, + .put_userptr = vidc_put_userptr, +}; + +static void msm_vidc_cleanup_buffer(struct vb2_buffer *vb) +{ + int rc = 0; + struct buf_queue *q = NULL; + struct msm_vidc_inst *inst = NULL; + + if (!vb) { + d_vpr_e("%s: Invalid vb pointer", __func__); + return; + } + + inst = vb2_get_drv_priv(vb->vb2_queue); + if (!inst) { + d_vpr_e("%s: Invalid inst pointer", __func__); + return; + } + + q = msm_comm_get_vb2q(inst, vb->type); + if (!q) { + s_vpr_e(inst->sid, + "%s: Failed to find buffer queue. type %d\n", + __func__, vb->type); + return; + } + + if (q->vb2_bufq.streaming) { + s_vpr_h(inst->sid, "%d PORT is streaming\n", + vb->type); + return; + } + + rc = msm_vidc_release_buffer(inst, vb->type, vb->index); + if (rc) + s_vpr_e(inst->sid, "%s: Failed to release buffers: %d\n", + __func__, rc); +} + +static int msm_vidc_queue_setup(struct vb2_queue *q, + unsigned int *num_buffers, unsigned int *num_planes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct msm_vidc_inst *inst; + int rc = 0; + unsigned int i = 0; + struct msm_vidc_format *fmt; + struct v4l2_format *f; + + if (!q || !num_buffers || !num_planes + || !sizes || !q->drv_priv) { + d_vpr_e("Invalid input, q = %pK, %pK, %pK\n", + q, num_buffers, num_planes); + return -EINVAL; + } + inst = q->drv_priv; + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + + switch (q->type) { + case INPUT_MPLANE: { + fmt = &inst->fmts[INPUT_PORT]; + if (*num_buffers < fmt->count_min_host) { + s_vpr_h(inst->sid, + "Client passed num buffers %d less than the min_host count %d\n", + *num_buffers, fmt->count_min_host); + } + f = &fmt->v4l2_fmt; + *num_planes = f->fmt.pix_mp.num_planes; + if (*num_buffers < SINGLE_INPUT_BUFFER || + *num_buffers > MAX_NUM_INPUT_BUFFERS) + fmt->count_actual = *num_buffers = + SINGLE_INPUT_BUFFER; + for (i = 0; i < *num_planes; i++) + sizes[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage; + + fmt->count_actual = *num_buffers; + } + break; + case OUTPUT_MPLANE: { + fmt = &inst->fmts[OUTPUT_PORT]; + if (inst->session_type != MSM_VIDC_DECODER && + inst->state > MSM_VIDC_LOAD_RESOURCES_DONE) { + if (*num_buffers < fmt->count_min_host) { + s_vpr_h(inst->sid, + "Client passed num buffers %d less than the min_host count %d\n", + *num_buffers, + fmt->count_min_host); + } + } + f = &fmt->v4l2_fmt; + *num_planes = f->fmt.pix_mp.num_planes; + if (*num_buffers < SINGLE_OUTPUT_BUFFER || + *num_buffers > MAX_NUM_OUTPUT_BUFFERS) + fmt->count_actual = *num_buffers = + SINGLE_OUTPUT_BUFFER; + + for (i = 0; i < *num_planes; i++) + sizes[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage; + + fmt->count_actual = *num_buffers; + } + break; + default: + s_vpr_e(inst->sid, "Invalid q type = %d\n", q->type); + rc = -EINVAL; + break; + } + + s_vpr_h(inst->sid, + "queue_setup:type %d num_buffers %d num_planes %d sizes[0] %d sizes[1] %d\n", + q->type, *num_buffers, *num_planes, sizes[0], sizes[1]); + return rc; +} + +static inline int msm_vidc_verify_buffer_counts(struct msm_vidc_inst *inst) +{ + int rc = 0, i = 0; + + /* For decoder No need to sanity till LOAD_RESOURCES */ + if (inst->session_type == MSM_VIDC_DECODER && + (inst->state < MSM_VIDC_LOAD_RESOURCES_DONE || + inst->state >= MSM_VIDC_RELEASE_RESOURCES_DONE)) { + s_vpr_h(inst->sid, "No need to verify buffer counts\n"); + return 0; + } + + for (i = 0; i < HAL_BUFFER_MAX; i++) { + struct hal_buffer_requirements *req = &inst->buff_req.buffer[i]; + + if (req && (req->buffer_type == HAL_BUFFER_OUTPUT)) { + s_vpr_h(inst->sid, "Verifying Buffer : %d\n", + req->buffer_type); + if (req->buffer_count_actual < + req->buffer_count_min_host || + req->buffer_count_min_host < + req->buffer_count_min) { + + s_vpr_e(inst->sid, + "Invalid data : Counts mismatch\n"); + s_vpr_e(inst->sid, "Min Count = %d ", + req->buffer_count_min); + s_vpr_e(inst->sid, "Min Host Count = %d ", + req->buffer_count_min_host); + s_vpr_e(inst->sid, "Min Actual Count = %d\n", + req->buffer_count_actual); + rc = -EINVAL; + break; + } + } + } + return rc; +} + +static int msm_vidc_set_properties(struct msm_vidc_inst *inst) +{ + int rc = 0; + + if (is_decode_session(inst)) + rc = msm_vdec_set_properties(inst); + else if (is_encode_session(inst)) + rc = msm_venc_set_properties(inst); + + return rc; +} + +static bool msm_vidc_set_cvp_metadata(struct msm_vidc_inst *inst) { + + int rc = 0; + u32 value = 0x0; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return false; + } + + if (!is_cvp_supported(inst)) { + s_vpr_h(inst->sid, "%s cvp is not supported", __func__); + return true; + } + + if (inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_CVP) + value = 0x1; + + s_vpr_h(inst->sid, "%s: CVP extradata %d\n", __func__, value); + rc = msm_comm_set_extradata(inst, + HFI_PROPERTY_PARAM_VENC_CVP_METADATA_EXTRADATA, value); + if (rc) { + s_vpr_e(inst->sid, "%s: set CVP extradata failed\n", __func__); + return false; + } + + if(inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_CVP) + rc = msm_venc_set_cvp_skipratio(inst); + + if (rc) { + s_vpr_e(inst->sid, + "%s: set CVP skip ratio controls failed\n", __func__); + return false; + } + return true; +} + +static inline int start_streaming(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_buffer_size_minimum b; + struct v4l2_format *f; + + s_vpr_h(inst->sid, "%s: inst %pK\n", __func__, inst); + hdev = inst->core->device; + + rc = msm_vidc_set_properties(inst); + if (rc) { + s_vpr_e(inst->sid, "%s: set props failed\n", __func__); + goto fail_start; + } + + if (is_encode_session(inst)) { + if (!(msm_vidc_set_cvp_metadata(inst))) + goto fail_start; + } + + b.buffer_type = HFI_BUFFER_OUTPUT; + if (inst->session_type == MSM_VIDC_DECODER && + is_secondary_output_mode(inst)) { + b.buffer_type = HFI_BUFFER_OUTPUT2; + rc = msm_comm_update_dpb_bufreqs(inst); + if (rc) { + s_vpr_e(inst->sid, + "%s: set dpb bufreq failed\n", __func__); + goto fail_start; + } + } + + /* Check if current session is under HW capability */ + rc = msm_vidc_check_session_supported(inst); + if (rc) { + s_vpr_e(inst->sid, "This session is not supported\n"); + goto fail_start; + } + + rc = msm_vidc_check_scaling_supported(inst); + if (rc) { + s_vpr_e(inst->sid, "scaling is not supported\n"); + goto fail_start; + } + + /* Decide work mode for current session */ + rc = call_core_op(inst->core, decide_work_mode, inst); + if (rc) { + s_vpr_e(inst->sid, "Failed to decide work mode\n"); + goto fail_start; + } + + /* Decide work route for current session */ + rc = call_core_op(inst->core, decide_work_route, inst); + if (rc) { + s_vpr_e(inst->sid, "Failed to decide work route\n"); + goto fail_start; + } + + /* Decide bse vpp delay after work mode */ + msm_vidc_set_bse_vpp_delay(inst); + + /* Assign Core and LP mode for current session */ + rc = call_core_op(inst->core, decide_core_and_power_mode, inst); + if (rc) { + s_vpr_e(inst->sid, + "This session can't be submitted to HW %pK\n", inst); + goto fail_start; + } + + rc = msm_comm_try_get_bufreqs(inst); + + rc = msm_comm_check_memory_supported(inst); + if (rc) { + s_vpr_e(inst->sid, + "Memory not sufficient to proceed current session\n"); + goto fail_start; + } + + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + b.buffer_size = f->fmt.pix_mp.plane_fmt[0].sizeimage; + rc = call_hfi_op(hdev, session_set_property, + inst->session, HFI_PROPERTY_PARAM_BUFFER_SIZE_MINIMUM, + &b, sizeof(b)); + + /* Verify if buffer counts are correct */ + rc = msm_vidc_verify_buffer_counts(inst); + if (rc) { + s_vpr_e(inst->sid, + "This session has mis-match buffer counts%pK\n", inst); + goto fail_start; + } + + msm_comm_check_prefetch_sufficient(inst); + + rc = msm_comm_set_scratch_buffers(inst); + if (rc) { + s_vpr_e(inst->sid, "Failed to set scratch buffers: %d\n", rc); + goto fail_start; + } + rc = msm_comm_set_persist_buffers(inst); + if (rc) { + s_vpr_e(inst->sid, "Failed to set persist buffers: %d\n", rc); + goto fail_start; + } + + rc = msm_comm_set_recon_buffers(inst); + if (rc) { + s_vpr_e(inst->sid, "Failed to set recon buffers: %d\n", rc); + goto fail_start; + } + + if (msm_comm_get_stream_output_mode(inst) == + HAL_VIDEO_DECODER_SECONDARY) { + rc = msm_comm_set_dpb_only_buffers(inst); + if (rc) { + s_vpr_e(inst->sid, + "Failed to set output buffers: %d\n", rc); + goto fail_start; + } + } + + inst->batch.enable = is_batching_allowed(inst); + s_vpr_hp(inst->sid, "%s: batching %s for inst %pK\n", + __func__, inst->batch.enable ? "enabled" : "disabled", inst); + + msm_dcvs_try_enable(inst); + + /* + * For seq_changed_insufficient, driver should set session_continue + * to firmware after the following sequence + * - driver raises insufficient event to v4l2 client + * - all output buffers have been flushed and freed + * - v4l2 client queries buffer requirements and splits/combines OPB-DPB + * - v4l2 client sets new set of buffers to firmware + * - v4l2 client issues CONTINUE to firmware to resume decoding of + * submitted ETBs. + */ + rc = msm_comm_session_continue(inst); + if (rc) + goto fail_start; + + msm_comm_scale_clocks_and_bus(inst, 1); + + rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE); + if (rc) { + s_vpr_e(inst->sid, + "Failed to move inst: %pK to start done state\n", inst); + goto fail_start; + } + + msm_clock_data_reset(inst); + + if (msm_comm_get_stream_output_mode(inst) == + HAL_VIDEO_DECODER_SECONDARY) { + rc = msm_comm_queue_dpb_only_buffers(inst); + if (rc) { + s_vpr_e(inst->sid, + "Failed to queue output buffers: %d\n", rc); + goto fail_start; + } + } + +fail_start: + if (rc) + s_vpr_e(inst->sid, "%s: inst %pK failed to start\n", + __func__, inst); + return rc; +} + +static int msm_vidc_start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct msm_vidc_inst *inst; + int rc = 0; + struct hfi_device *hdev; + + if (!q || !q->drv_priv) { + d_vpr_e("Invalid input, q = %pK\n", q); + return -EINVAL; + } + inst = q->drv_priv; + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + hdev = inst->core->device; + s_vpr_h(inst->sid, "Streamon called on: %d capability for inst: %pK\n", + q->type, inst); + switch (q->type) { + case INPUT_MPLANE: + if (inst->bufq[OUTPUT_PORT].vb2_bufq.streaming) + rc = start_streaming(inst); + break; + case OUTPUT_MPLANE: + if (inst->bufq[INPUT_PORT].vb2_bufq.streaming) + rc = start_streaming(inst); + break; + default: + s_vpr_e(inst->sid, + "Queue type is not supported: %d\n", q->type); + rc = -EINVAL; + goto stream_start_failed; + } + if (rc) { + s_vpr_e(inst->sid, "Streamon failed: %d, inst: %pK\n", + q->type, inst); + goto stream_start_failed; + } + + rc = msm_comm_qbufs(inst); + if (rc) { + s_vpr_e(inst->sid, + "Failed to commit buffers queued before STREAM_ON: %d\n", + rc); + goto stream_start_failed; + } + + rc = msm_vidc_send_pending_eos_buffers(inst); + if (rc) { + s_vpr_e(inst->sid, "Failed : Send pending EOS: %d\n", rc); + goto stream_start_failed; + } + +stream_start_failed: + if (rc) { + struct msm_vidc_buffer *temp, *next; + struct vb2_buffer *vb; + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, + list) { + if (temp->vvb.vb2_buf.type != q->type) + continue; + /* + * queued_list lock is already acquired before + * vb2_stream so no need to acquire it again. + */ + list_for_each_entry(vb, &q->queued_list, queued_entry) { + if (msm_comm_compare_vb2_planes(inst, temp, + vb)) { + print_vb2_buffer("return vb", inst, vb); + vb2_buffer_done(vb, + VB2_BUF_STATE_QUEUED); + break; + } + } + msm_comm_unmap_vidc_buffer(inst, temp); + list_del(&temp->list); + kref_put_mbuf(temp); + } + mutex_unlock(&inst->registeredbufs.lock); + } + return rc; +} + +static inline int stop_streaming(struct msm_vidc_inst *inst) +{ + int rc = 0; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + s_vpr_h(inst->sid, "%s: inst %pK\n", __func__, inst); + + rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE); + if (rc) + s_vpr_e(inst->sid, "Failed to move inst: %pK to state %d\n", + inst, MSM_VIDC_RELEASE_RESOURCES_DONE); + + if (is_encode_session(inst)) { + inst->all_intra = false; + } + + msm_clock_data_reset(inst); + + return rc; +} + +static void msm_vidc_stop_streaming(struct vb2_queue *q) +{ + struct msm_vidc_inst *inst; + int rc = 0; + + if (!q || !q->drv_priv) { + d_vpr_e("Invalid input, q = %pK\n", q); + return; + } + + inst = q->drv_priv; + s_vpr_h(inst->sid, "Streamoff called on: %d capability\n", q->type); + switch (q->type) { + case INPUT_MPLANE: + if (!inst->bufq[OUTPUT_PORT].vb2_bufq.streaming) + rc = stop_streaming(inst); + break; + case OUTPUT_MPLANE: + if (!inst->bufq[INPUT_PORT].vb2_bufq.streaming) + rc = stop_streaming(inst); + break; + default: + s_vpr_e(inst->sid, "Q-type is not supported: %d\n", q->type); + rc = -EINVAL; + break; + } + + msm_comm_scale_clocks_and_bus(inst, 1); + + if (rc) + s_vpr_e(inst->sid, + "Failed STOP Streaming inst = %pK on cap = %d\n", + inst, q->type); +} + +static int msm_vidc_queue_buf(struct msm_vidc_inst *inst, + struct vb2_buffer *vb2) +{ + int rc = 0; + struct msm_vidc_buffer *mbuf; + + if (!inst || !vb2) { + d_vpr_e("%s: invalid params %pK, %pK\n", + __func__, inst, vb2); + return -EINVAL; + } + + mbuf = msm_comm_get_vidc_buffer(inst, vb2); + if (IS_ERR_OR_NULL(mbuf)) { + /* + * if the buffer has RBR_PENDING flag (-EEXIST) then don't queue + * it now, it will be queued via msm_comm_qbuf_rbr() as part of + * RBR event processing. + */ + if (PTR_ERR(mbuf) == -EEXIST) + return 0; + s_vpr_e(inst->sid, "%s: failed to get vidc-buf\n", __func__); + return -EINVAL; + } + if (!kref_get_mbuf(inst, mbuf)) { + s_vpr_e(inst->sid, "%s: mbuf not found\n", __func__); + return -EINVAL; + } + rc = msm_comm_qbuf(inst, mbuf); + if (rc) + s_vpr_e(inst->sid, "%s: failed qbuf\n", __func__); + kref_put_mbuf(mbuf); + + return rc; +} + +static int msm_vidc_queue_buf_decode_batch(struct msm_vidc_inst *inst, + struct vb2_buffer *vb2) +{ + int rc; + struct msm_vidc_buffer *mbuf; + + if (!inst || !vb2) { + d_vpr_e("%s: invalid params %pK, %pK\n", + __func__, inst, vb2); + return -EINVAL; + } + + mbuf = msm_comm_get_vidc_buffer(inst, vb2); + if (IS_ERR_OR_NULL(mbuf)) { + s_vpr_e(inst->sid, "%s: failed to get vidc-buf\n", __func__); + return -EINVAL; + } + if (!kref_get_mbuf(inst, mbuf)) { + s_vpr_e(inst->sid, "%s: mbuf not found\n", __func__); + return -EINVAL; + } + /* + * If this buffer has RBR_EPNDING then it will not be queued + * but it may trigger full batch queuing in below function. + */ + rc = msm_comm_qbuf_decode_batch(inst, mbuf); + if (rc) + s_vpr_e(inst->sid, "%s: failed qbuf\n", __func__); + kref_put_mbuf(mbuf); + + return rc; +} + +static int msm_vidc_queue_buf_batch(struct msm_vidc_inst *inst, + struct vb2_buffer *vb2) +{ + int rc; + + if (!inst || !vb2) { + d_vpr_e("%s: invalid params %pK, %pK\n", + __func__, inst, vb2); + return -EINVAL; + } + + if (inst->session_type == MSM_VIDC_DECODER && + vb2->type == OUTPUT_MPLANE) + rc = msm_vidc_queue_buf_decode_batch(inst, vb2); + else + rc = msm_vidc_queue_buf(inst, vb2); + + return rc; +} + +static void msm_vidc_buf_queue(struct vb2_buffer *vb2) +{ + int rc = 0; + struct msm_vidc_inst *inst = NULL; + + inst = vb2_get_drv_priv(vb2->vb2_queue); + if (!inst) { + d_vpr_e("%s: invalid inst\n", __func__); + return; + } + + if (inst->batch.enable) + rc = msm_vidc_queue_buf_batch(inst, vb2); + else + rc = msm_vidc_queue_buf(inst, vb2); + + if (rc) { + print_vb2_buffer("failed vb2-qbuf", inst, vb2); + vb2_buffer_done(vb2, VB2_BUF_STATE_DONE); + msm_vidc_queue_v4l2_event(inst, + V4L2_EVENT_MSM_VIDC_SYS_ERROR); + } +} + +static const struct vb2_ops msm_vidc_vb2q_ops = { + .queue_setup = msm_vidc_queue_setup, + .start_streaming = msm_vidc_start_streaming, + .buf_queue = msm_vidc_buf_queue, + .buf_cleanup = msm_vidc_cleanup_buffer, + .stop_streaming = msm_vidc_stop_streaming, +}; + +static inline int vb2_bufq_init(struct msm_vidc_inst *inst, + enum v4l2_buf_type type, enum session_type sess) +{ + struct vb2_queue *q = NULL; + + if (type == OUTPUT_MPLANE) { + q = &inst->bufq[OUTPUT_PORT].vb2_bufq; + } else if (type == INPUT_MPLANE) { + q = &inst->bufq[INPUT_PORT].vb2_bufq; + } else { + s_vpr_e(inst->sid, "buf_type = %d not recognised\n", type); + return -EINVAL; + } + + q->type = type; + q->io_modes = VB2_MMAP | VB2_USERPTR; + q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + q->ops = &msm_vidc_vb2q_ops; + + q->mem_ops = &msm_vidc_vb2_mem_ops; + q->drv_priv = inst; + q->allow_zero_bytesused = !V4L2_TYPE_IS_OUTPUT(type); + q->copy_timestamp = 1; + return vb2_queue_init(q); +} + +static int setup_event_queue(void *inst, + struct video_device *pvdev) +{ + struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst; + + v4l2_fh_init(&vidc_inst->event_handler, pvdev); + v4l2_fh_add(&vidc_inst->event_handler); + + return 0; +} + +int msm_vidc_subscribe_event(void *inst, + const struct v4l2_event_subscription *sub) +{ + int rc = 0; + struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst; + + if (!inst || !sub) + return -EINVAL; + + rc = v4l2_event_subscribe(&vidc_inst->event_handler, + sub, MAX_EVENTS, NULL); + return rc; +} +EXPORT_SYMBOL(msm_vidc_subscribe_event); + +int msm_vidc_unsubscribe_event(void *inst, + const struct v4l2_event_subscription *sub) +{ + int rc = 0; + struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst; + + if (!inst || !sub) + return -EINVAL; + + rc = v4l2_event_unsubscribe(&vidc_inst->event_handler, sub); + return rc; +} +EXPORT_SYMBOL(msm_vidc_unsubscribe_event); + +int msm_vidc_dqevent(void *inst, struct v4l2_event *event) +{ + int rc = 0; + struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst; + + if (!inst || !event) + return -EINVAL; + + rc = v4l2_event_dequeue(&vidc_inst->event_handler, event, false); + return rc; +} +EXPORT_SYMBOL(msm_vidc_dqevent); + +static int msm_vidc_try_set_ctrl(void *instance, struct v4l2_ctrl *ctrl) +{ + struct msm_vidc_inst *inst = instance; + + if (inst->session_type == MSM_VIDC_DECODER) + return msm_vdec_s_ctrl(instance, ctrl); + else if (inst->session_type == MSM_VIDC_ENCODER) + return msm_venc_s_ctrl(instance, ctrl); + return -EINVAL; +} + +static int msm_vidc_op_s_ctrl(struct v4l2_ctrl *ctrl) +{ + + int rc = 0; + struct msm_vidc_inst *inst; + const char *ctrl_name = NULL; + + if (!ctrl) { + d_vpr_e("%s: invalid parameters for ctrl\n", __func__); + return -EINVAL; + } + + inst = container_of(ctrl->handler, + struct msm_vidc_inst, ctrl_handler); + if (!inst) { + d_vpr_e("%s: invalid parameters for inst\n", __func__); + return -EINVAL; + } + + rc = msm_vidc_try_set_ctrl(inst, ctrl); + if (rc) { + s_vpr_e(inst->sid, "Failed setting %x\n", ctrl->id); + ctrl_name = v4l2_ctrl_get_name(ctrl->id); + s_vpr_e(inst->sid, "Failed setting control: Inst = %pK (%s)\n", + inst, ctrl_name ? ctrl_name : "Invalid ctrl"); + } + + return rc; +} + +static int try_get_ctrl_for_instance(struct msm_vidc_inst *inst, + struct v4l2_ctrl *ctrl) +{ + int rc = 0; + + switch (ctrl->id) { + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + ctrl->val = msm_comm_hfi_to_v4l2( + V4L2_CID_MPEG_VIDEO_H264_PROFILE, + inst->profile, inst->sid); + break; + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: + ctrl->val = msm_comm_hfi_to_v4l2( + V4L2_CID_MPEG_VIDEO_HEVC_PROFILE, + inst->profile, inst->sid); + break; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + ctrl->val = msm_comm_hfi_to_v4l2( + V4L2_CID_MPEG_VIDEO_H264_LEVEL, + inst->level, inst->sid); + break; + case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL: + ctrl->val = msm_comm_hfi_to_v4l2( + V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL, + inst->level, inst->sid); + break; + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: + ctrl->val = msm_comm_hfi_to_v4l2( + V4L2_CID_MPEG_VIDEO_HEVC_LEVEL, + inst->level, inst->sid); + break; + case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: + ctrl->val = inst->fmts[OUTPUT_PORT].count_min_host; + s_vpr_h(inst->sid, "g_min: OUTPUT_PORT count_min_host %d\n", + ctrl->val); + break; + case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: + ctrl->val = inst->fmts[INPUT_PORT].count_min_host; + s_vpr_h(inst->sid, "g_min: INPUT_PORT count_min_host %d\n", + ctrl->val); + break; + case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA: + ctrl->val = inst->prop.extradata_ctrls; + break; + case V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE: + { + uint32_t vpu_ver; + + if (!inst->core || !inst->core->platform_data) + return -EINVAL; + vpu_ver = inst->core->platform_data->vpu_ver; + ctrl->val = (vpu_ver == VPU_VERSION_IRIS1 || + vpu_ver == VPU_VERSION_IRIS2 || + vpu_ver == VPU_VERSION_IRIS2_1) ? + V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_2BYTE : + V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_2BIT; + break; + } + default: + break; + } + + return rc; +} + +static const struct v4l2_ctrl_ops msm_vidc_ctrl_ops = { + + .s_ctrl = msm_vidc_op_s_ctrl, +}; + +static struct msm_vidc_inst_smem_ops msm_vidc_smem_ops = { + .smem_map_dma_buf = msm_smem_map_dma_buf, + .smem_unmap_dma_buf = msm_smem_unmap_dma_buf, + .smem_prefetch = msm_smem_memory_prefetch, + .smem_drain = msm_smem_memory_drain, +}; + +static void close_helper(struct kref *kref) +{ + struct msm_vidc_inst *inst = container_of(kref, + struct msm_vidc_inst, kref); + + msm_vidc_destroy(inst); +} + +void *msm_vidc_open(int core_id, int session_type) +{ + struct msm_vidc_inst *inst = NULL; + struct msm_vidc_core *core = NULL; + int rc = 0; + int i = 0; + + if (core_id >= MSM_VIDC_CORES_MAX || + session_type >= MSM_VIDC_MAX_DEVICES) { + d_vpr_e("Invalid input, core_id = %d, session = %d\n", + core_id, session_type); + goto err_invalid_core; + } + core = get_vidc_core(core_id); + if (!core) { + d_vpr_e("Failed to find core for core_id = %d\n", core_id); + goto err_invalid_core; + } + + inst = kzalloc(sizeof(*inst), GFP_KERNEL); + if (!inst) { + d_vpr_e("Failed to allocate memory\n"); + rc = -ENOMEM; + goto err_invalid_core; + } + mutex_lock(&core->lock); + rc = get_sid(&inst->sid, session_type); + mutex_unlock(&core->lock); + if (rc) { + d_vpr_e("Total instances count reached to max value\n"); + goto err_invalid_sid; + } + + pr_info(VIDC_DBG_TAG "Opening video instance: %pK, %d\n", + "high", inst->sid, get_codec_name(inst->sid), + inst, session_type); + mutex_init(&inst->sync_lock); + mutex_init(&inst->bufq[OUTPUT_PORT].lock); + mutex_init(&inst->bufq[INPUT_PORT].lock); + mutex_init(&inst->lock); + mutex_init(&inst->ubwc_stats_lock); + + INIT_MSM_VIDC_LIST(&inst->scratchbufs); + INIT_MSM_VIDC_LIST(&inst->input_crs); + INIT_MSM_VIDC_LIST(&inst->persistbufs); + INIT_MSM_VIDC_LIST(&inst->pending_getpropq); + INIT_MSM_VIDC_LIST(&inst->outputbufs); + INIT_MSM_VIDC_LIST(&inst->registeredbufs); + INIT_MSM_VIDC_LIST(&inst->refbufs); + INIT_MSM_VIDC_LIST(&inst->eosbufs); + INIT_MSM_VIDC_LIST(&inst->etb_data); + INIT_MSM_VIDC_LIST(&inst->fbd_data); + INIT_MSM_VIDC_LIST(&inst->window_data); + INIT_MSM_VIDC_LIST(&inst->timestamps); + + INIT_DELAYED_WORK(&inst->batch_work, msm_vidc_batch_handler); + kref_init(&inst->kref); + + inst->session_type = session_type; + inst->state = MSM_VIDC_CORE_UNINIT_DONE; + inst->core = core; + inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT; + inst->clk_data.dpb_fourcc = V4L2_PIX_FMT_NV12_UBWC; + inst->clk_data.opb_fourcc = V4L2_PIX_FMT_NV12_UBWC; + inst->bit_depth = MSM_VIDC_BIT_DEPTH_8; + inst->pic_struct = MSM_VIDC_PIC_STRUCT_PROGRESSIVE; + inst->colour_space = MSM_VIDC_BT601_6_525; + inst->smem_ops = &msm_vidc_smem_ops; + inst->rc_type = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR; + inst->dpb_extra_binfo = NULL; + inst->all_intra = false; + inst->max_filled_len = 0; + inst->entropy_mode = HFI_H264_ENTROPY_CABAC; + inst->full_range = COLOR_RANGE_UNSPECIFIED; + inst->bse_vpp_delay = DEFAULT_BSE_VPP_DELAY; + inst->first_reconfig_done = 0; + inst->active = true; + inst->has_bframe = 0; + inst->boost_qp_enabled = false; + inst->boost_min_qp = 0; + inst->boost_max_qp = 0; + + for (i = SESSION_MSG_INDEX(SESSION_MSG_START); + i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) { + init_completion(&inst->completions[i]); + } + + if (session_type == MSM_VIDC_DECODER) { + msm_vdec_inst_init(inst); + rc = msm_vdec_ctrl_init(inst, &msm_vidc_ctrl_ops); + } else if (session_type == MSM_VIDC_ENCODER) { + msm_venc_inst_init(inst); + rc = msm_venc_ctrl_init(inst, &msm_vidc_ctrl_ops); + } + if (rc) { + s_vpr_e(inst->sid, "Failed control initialization\n"); + goto fail_bufq_capture; + } + + rc = vb2_bufq_init(inst, OUTPUT_MPLANE, session_type); + if (rc) { + s_vpr_e(inst->sid, + "Failed to initialize vb2 queue on capture port\n"); + goto fail_bufq_capture; + } + rc = vb2_bufq_init(inst, INPUT_MPLANE, session_type); + if (rc) { + s_vpr_e(inst->sid, + "Failed to initialize vb2 queue on capture port\n"); + goto fail_bufq_output; + } + + setup_event_queue(inst, &core->vdev[session_type].vdev); + + mutex_lock(&core->lock); + list_add_tail(&inst->list, &core->instances); + mutex_unlock(&core->lock); + + rc = msm_comm_try_state(inst, MSM_VIDC_CORE_INIT_DONE); + if (rc) { + s_vpr_e(inst->sid, + "Failed to move video instance to init state\n"); + kref_put(&inst->kref, close_helper); + inst = NULL; + goto err_invalid_core; + } + + if (msm_comm_check_for_inst_overload(core)) { + s_vpr_e(inst->sid, + "Instance count reached Max limit, rejecting session"); + goto fail_init; + } + + msm_comm_scale_clocks_and_bus(inst, 1); + + inst->debugfs_root = + msm_vidc_debugfs_init_inst(inst, core->debugfs_root); + + return inst; +fail_init: + mutex_lock(&core->lock); + list_del(&inst->list); + mutex_unlock(&core->lock); + + v4l2_fh_del(&inst->event_handler); + v4l2_fh_exit(&inst->event_handler); + vb2_queue_release(&inst->bufq[INPUT_PORT].vb2_bufq); +fail_bufq_output: + vb2_queue_release(&inst->bufq[OUTPUT_PORT].vb2_bufq); +fail_bufq_capture: + msm_comm_ctrl_deinit(inst); + mutex_destroy(&inst->ubwc_stats_lock); + mutex_destroy(&inst->sync_lock); + mutex_destroy(&inst->bufq[OUTPUT_PORT].lock); + mutex_destroy(&inst->bufq[INPUT_PORT].lock); + mutex_destroy(&inst->lock); + + DEINIT_MSM_VIDC_LIST(&inst->scratchbufs); + DEINIT_MSM_VIDC_LIST(&inst->persistbufs); + DEINIT_MSM_VIDC_LIST(&inst->pending_getpropq); + DEINIT_MSM_VIDC_LIST(&inst->outputbufs); + DEINIT_MSM_VIDC_LIST(&inst->registeredbufs); + DEINIT_MSM_VIDC_LIST(&inst->refbufs); + DEINIT_MSM_VIDC_LIST(&inst->eosbufs); + DEINIT_MSM_VIDC_LIST(&inst->input_crs); + DEINIT_MSM_VIDC_LIST(&inst->etb_data); + DEINIT_MSM_VIDC_LIST(&inst->fbd_data); + DEINIT_MSM_VIDC_LIST(&inst->window_data); + DEINIT_MSM_VIDC_LIST(&inst->timestamps); + +err_invalid_sid: + put_sid(inst->sid); + kfree(inst); + inst = NULL; +err_invalid_core: + return inst; +} +EXPORT_SYMBOL(msm_vidc_open); + +static void msm_vidc_cleanup_instance(struct msm_vidc_inst *inst) +{ + struct msm_vidc_buffer *temp, *dummy; + struct getprop_buf *temp_prop, *dummy_prop; + struct list_head *ptr, *next; + enum vidc_ports ports[] = {INPUT_PORT, OUTPUT_PORT}; + int c = 0; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return; + } + + for (c = 0; c < ARRAY_SIZE(ports); ++c) { + enum vidc_ports port = ports[c]; + + mutex_lock(&inst->bufq[port].lock); + list_for_each_safe(ptr, next, + &inst->bufq[port].vb2_bufq.queued_list) { + struct vb2_buffer *vb = container_of(ptr, + struct vb2_buffer, queued_entry); + if (vb->state == VB2_BUF_STATE_ACTIVE) { + vb->planes[0].bytesused = 0; + print_vb2_buffer("undequeud vb2", inst, vb); + vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); + } + } + mutex_unlock(&inst->bufq[port].lock); + } + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry_safe(temp, dummy, &inst->registeredbufs.list, + list) { + print_vidc_buffer(VIDC_ERR, "undequeud buf", inst, temp); + msm_comm_unmap_vidc_buffer(inst, temp); + list_del(&temp->list); + kref_put_mbuf(temp); + } + mutex_unlock(&inst->registeredbufs.lock); + + cancel_batch_work(inst); + + msm_comm_free_input_cr_table(inst); + + if (msm_comm_release_scratch_buffers(inst, false)) + s_vpr_e(inst->sid, "Failed to release scratch buffers\n"); + + if (msm_comm_release_recon_buffers(inst)) + s_vpr_e(inst->sid, "Failed to release recon buffers\n"); + + if (msm_comm_release_persist_buffers(inst)) + s_vpr_e(inst->sid, "Failed to release persist buffers\n"); + + if (msm_comm_release_input_tag(inst)) + s_vpr_e(inst->sid, "Failed to release input_tag buffers\n"); + + msm_comm_release_window_data(inst); + + msm_comm_release_eos_buffers(inst); + + msm_comm_release_timestamps(inst); + + if (msm_comm_release_dpb_only_buffers(inst, true)) + s_vpr_e(inst->sid, "Failed to release output buffers\n"); + + if (inst->extradata_handle) + msm_comm_smem_free(inst, inst->extradata_handle); + + mutex_lock(&inst->pending_getpropq.lock); + if (!list_empty(&inst->pending_getpropq.list)) { + s_vpr_e(inst->sid, "pending_getpropq not empty\n"); + list_for_each_entry_safe(temp_prop, dummy_prop, + &inst->pending_getpropq.list, list) { + kfree(temp_prop->data); + list_del(&temp_prop->list); + kfree(temp_prop); + } + } + mutex_unlock(&inst->pending_getpropq.lock); +} + +int msm_vidc_destroy(struct msm_vidc_inst *inst) +{ + struct msm_vidc_core *core; + int i = 0; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + core = inst->core; + + for (i = 0; i < MAX_PORT_NUM; i++) + vb2_queue_release(&inst->bufq[i].vb2_bufq); + + mutex_lock(&core->lock); + /* inst->list lives in core->instances */ + list_del(&inst->list); + mutex_unlock(&core->lock); + + msm_comm_ctrl_deinit(inst); + + v4l2_fh_del(&inst->event_handler); + v4l2_fh_exit(&inst->event_handler); + + DEINIT_MSM_VIDC_LIST(&inst->scratchbufs); + DEINIT_MSM_VIDC_LIST(&inst->persistbufs); + DEINIT_MSM_VIDC_LIST(&inst->pending_getpropq); + DEINIT_MSM_VIDC_LIST(&inst->outputbufs); + DEINIT_MSM_VIDC_LIST(&inst->registeredbufs); + DEINIT_MSM_VIDC_LIST(&inst->refbufs); + DEINIT_MSM_VIDC_LIST(&inst->eosbufs); + DEINIT_MSM_VIDC_LIST(&inst->input_crs); + DEINIT_MSM_VIDC_LIST(&inst->etb_data); + DEINIT_MSM_VIDC_LIST(&inst->fbd_data); + DEINIT_MSM_VIDC_LIST(&inst->window_data); + DEINIT_MSM_VIDC_LIST(&inst->timestamps); + + mutex_destroy(&inst->ubwc_stats_lock); + mutex_destroy(&inst->sync_lock); + mutex_destroy(&inst->bufq[OUTPUT_PORT].lock); + mutex_destroy(&inst->bufq[INPUT_PORT].lock); + mutex_destroy(&inst->lock); + + msm_vidc_debugfs_deinit_inst(inst); + + pr_info(VIDC_DBG_TAG "Closed video instance: %pK\n", + "high", inst->sid, get_codec_name(inst->sid), + inst); + put_sid(inst->sid); + kfree(inst); + return 0; +} + +int msm_vidc_close(void *instance) +{ + struct msm_vidc_inst *inst = instance; + int rc = 0; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + /* + * Make sure that HW stop working on these buffers that + * we are going to free. + */ + rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE); + if (rc) + s_vpr_e(inst->sid, "Failed: move to rel resource done state\n"); + + msm_vidc_cleanup_instance(inst); + + rc = msm_comm_try_state(inst, MSM_VIDC_CORE_UNINIT); + if (rc) { + s_vpr_e(inst->sid, + "Failed to move inst %pK to uninit state\n", inst); + rc = msm_comm_force_cleanup(inst); + } + + msm_comm_session_clean(inst); + msm_comm_memory_drain(inst); + + kref_put(&inst->kref, close_helper); + return 0; +} +EXPORT_SYMBOL(msm_vidc_close); + +int msm_vidc_suspend(int core_id) +{ + return msm_comm_suspend(core_id); +} +EXPORT_SYMBOL(msm_vidc_suspend); + diff --git a/techpack/video/msm/vidc/msm_vidc.h b/techpack/video/msm/vidc/msm_vidc.h new file mode 100644 index 000000000000..136cea68a7e9 --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _MSM_VIDC_H_ +#define _MSM_VIDC_H_ + +#include +#include +#include "vidc/media/msm_vidc_utils.h" +#include + +#define HAL_BUFFER_MAX 0xe +#define CVP_FRAME_RATE_MAX (60) +#define MEMORY_REGIONS_MAX 30 + +enum smem_type { + SMEM_DMA = 1, +}; + +enum smem_prop { + SMEM_UNCACHED = 0x1, + SMEM_CACHED = 0x2, + SMEM_SECURE = 0x4, + SMEM_ADSP = 0x8, +}; + +/* NOTE: if you change this enum you MUST update the + * "buffer-type-tz-usage-table" for any affected target + * in arch/arm/boot/dts/.dtsi + */ +enum hal_buffer { + HAL_BUFFER_NONE = 0x0, + HAL_BUFFER_INPUT = 0x1, + HAL_BUFFER_OUTPUT = 0x2, + HAL_BUFFER_OUTPUT2 = 0x4, + HAL_BUFFER_EXTRADATA_INPUT = 0x8, + HAL_BUFFER_EXTRADATA_OUTPUT = 0x10, + HAL_BUFFER_EXTRADATA_OUTPUT2 = 0x20, + HAL_BUFFER_INTERNAL_SCRATCH = 0x40, + HAL_BUFFER_INTERNAL_SCRATCH_1 = 0x80, + HAL_BUFFER_INTERNAL_SCRATCH_2 = 0x100, + HAL_BUFFER_INTERNAL_PERSIST = 0x200, + HAL_BUFFER_INTERNAL_PERSIST_1 = 0x400, + HAL_BUFFER_INTERNAL_CMD_QUEUE = 0x800, + HAL_BUFFER_INTERNAL_RECON = 0x1000, +}; + +enum msm_vidc_blur_type { + MSM_VIDC_BLUR_INTERNAL = 0x0, + MSM_VIDC_BLUR_EXTERNAL_DYNAMIC = 0x1, + MSM_VIDC_BLUR_DISABLE = 0x2, +}; + +struct dma_mapping_info { + struct device *dev; + struct iommu_domain *domain; + struct sg_table *table; + struct dma_buf_attachment *attach; + struct dma_buf *buf; + void *cb_info; +}; + +struct msm_smem { + u32 refcount; + int fd; + void *dma_buf; + void *kvaddr; + u32 device_addr; + dma_addr_t dma_handle; + unsigned int offset; + unsigned int size; + unsigned long flags; + enum hal_buffer buffer_type; + struct dma_mapping_info mapping_info; +}; + +enum smem_cache_ops { + SMEM_CACHE_CLEAN, + SMEM_CACHE_INVALIDATE, + SMEM_CACHE_CLEAN_INVALIDATE, +}; + +struct memory_regions { + u32 num_regions; + struct { + u64 size; + u32 vmid; + } region[MEMORY_REGIONS_MAX]; +}; + +enum memory_ops { + MEMORY_PREFETCH = 1, + MEMORY_DRAIN, +}; + +enum core_id { + MSM_VIDC_CORE_VENUS = 0, + MSM_VIDC_CORE_Q6, + MSM_VIDC_CORES_MAX, +}; +enum session_type { + MSM_VIDC_ENCODER = 0, + MSM_VIDC_DECODER, + MSM_VIDC_UNKNOWN, + MSM_VIDC_MAX_DEVICES = MSM_VIDC_UNKNOWN, +}; + +enum load_type { + MSM_VIDC_VIDEO = 0, + MSM_VIDC_IMAGE, +}; + +union msm_v4l2_cmd { + struct v4l2_decoder_cmd dec; + struct v4l2_encoder_cmd enc; +}; + +void *msm_vidc_open(int core_id, int session_type); +int msm_vidc_close(void *instance); +int msm_vidc_suspend(int core_id); +int msm_vidc_querycap(void *instance, struct v4l2_capability *cap); +int msm_vidc_enum_fmt(void *instance, struct v4l2_fmtdesc *f); +int msm_vidc_s_fmt(void *instance, struct v4l2_format *f); +int msm_vidc_g_fmt(void *instance, struct v4l2_format *f); +int msm_vidc_s_ctrl(void *instance, struct v4l2_control *a); +int msm_vidc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a); +int msm_vidc_g_ext_ctrl(void *instance, struct v4l2_ext_controls *a); +int msm_vidc_g_ctrl(void *instance, struct v4l2_control *a); +int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b); +int msm_vidc_release_buffer(void *instance, int buffer_type, + unsigned int buffer_index); +int msm_vidc_qbuf(void *instance, struct media_device *mdev, + struct v4l2_buffer *b); +int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b); +int msm_vidc_streamon(void *instance, enum v4l2_buf_type i); +int msm_vidc_query_ctrl(void *instance, struct v4l2_queryctrl *ctrl); +int msm_vidc_query_menu(void *instance, struct v4l2_querymenu *qmenu); +int msm_vidc_streamoff(void *instance, enum v4l2_buf_type i); +int msm_vidc_comm_cmd(void *instance, union msm_v4l2_cmd *cmd); +int msm_vidc_poll(void *instance, struct file *filp, + struct poll_table_struct *pt); +int msm_vidc_subscribe_event(void *instance, + const struct v4l2_event_subscription *sub); +int msm_vidc_unsubscribe_event(void *instance, + const struct v4l2_event_subscription *sub); +int msm_vidc_dqevent(void *instance, struct v4l2_event *event); +int msm_vidc_g_crop(void *instance, struct v4l2_crop *a); +int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize); +#endif diff --git a/techpack/video/msm/vidc/msm_vidc_buffer_calculations.c b/techpack/video/msm/vidc/msm_vidc_buffer_calculations.c new file mode 100644 index 000000000000..997c1ee62749 --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_buffer_calculations.c @@ -0,0 +1,1953 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#include "msm_vidc_debug.h" +#include "msm_vidc_common.h" +#include "msm_vidc_buffer_calculations.h" +#include "msm_vidc_clocks.h" + +#define VP9_REFERENCE_COUNT 8 + +/* minimum number of input buffers */ +#define MIN_INPUT_BUFFERS 4 + +/* Decoder buffer count macros */ +/* total input buffers in case of decoder batch */ +#define BATCH_DEC_TOTAL_INPUT_BUFFERS 6 + +/* extra output buffers in case of decoder batch */ +#define BATCH_DEC_EXTRA_OUTPUT_BUFFERS 6 + +/* Encoder buffer count macros */ +/* minimum number of output buffers */ +#define MIN_ENC_OUTPUT_BUFFERS 4 + +/* extra output buffers for encoder HEIF usecase */ +#define HEIF_ENC_TOTAL_OUTPUT_BUFFERS 12 + +/* extra buffer count for heif decoder */ +#define HEIF_DEC_TOTAL_INPUT_BUFFERS 12 +#define HEIF_DEC_EXTRA_OUTPUT_BUFFERS 8 + +#define HFI_COLOR_FORMAT_YUV420_NV12_UBWC_Y_TILE_WIDTH 32 +#define HFI_COLOR_FORMAT_YUV420_NV12_UBWC_Y_TILE_HEIGHT 8 +#define HFI_COLOR_FORMAT_YUV420_NV12_UBWC_UV_TILE_WIDTH 16 +#define HFI_COLOR_FORMAT_YUV420_NV12_UBWC_UV_TILE_HEIGHT 8 +#define HFI_COLOR_FORMAT_YUV420_TP10_UBWC_Y_TILE_WIDTH 48 +#define HFI_COLOR_FORMAT_YUV420_TP10_UBWC_Y_TILE_HEIGHT 4 +#define BUFFER_ALIGNMENT_4096_BYTES 4096 +#define VENUS_METADATA_STRIDE_MULTIPLE 64 +#define VENUS_METADATA_HEIGHT_MULTIPLE 16 +#define HFI_UBWC_CALC_METADATA_PLANE_STRIDE \ + ((metadataStride, width, metadataStrideMultiple, tileWidthInPels) \ + metadataStride = ALIGN(((width + (tileWidthInPels - 1)) / \ + tileWidthInPels), metadataStrideMultiple)) +#define HFI_UBWC_METADATA_PLANE_BUFHEIGHT \ + ((metadataBufHeight, height, metadataHeightMultiple, tileHeightInPels) \ + metadataBufHeight = ALIGN(((height + (tileHeightInPels - 1)) / \ + tileHeightInPels), metadataHeightMultiple)) +#define HFI_UBWC_METADATA_PLANE_BUFFER_SIZE \ + ((buffersize, MetadataStride, MetadataBufHeight) \ + buffersize = ALIGN(MetadataStride * MetadataBufHeight, \ + BUFFER_ALIGNMENT_4096_BYTES)) +#define HFI_UBWC_UV_METADATA_PLANE_STRIDE \ + ((metadataStride, width, metadataStrideMultiple, tileWidthInPels) \ + metadataStride = ALIGN(((((width + 1) >> 1) + \ + (tileWidthInPels - 1)) / tileWidthInPels), \ + metadataStrideMultiple)) +#define HFI_UBWC_UV_METADATA_PLANE_BUFHEIGHT \ + ((metadataBufHeight, height, metadataHeightMultiple, tileHeightInPels) \ + metadataBufHeight = ALIGN(((((height + 1) >> 1) + \ + (tileHeightInPels - 1)) / tileHeightInPels), \ + metadataHeightMultiple)) + +#define BUFFER_ALIGNMENT_SIZE(x) x + +#define VENUS_DMA_ALIGNMENT BUFFER_ALIGNMENT_SIZE(256) + +#define MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE 64 +#define MAX_FE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE 64 +#define MAX_FE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE 64 +#define MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE 640 +#define MAX_FE_NBR_DATA_CB_LINE_BUFFER_SIZE 320 +#define MAX_FE_NBR_DATA_CR_LINE_BUFFER_SIZE 320 + +#define MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE (128 / 8) +#define MAX_SE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE (128 / 8) +#define MAX_SE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE (128 / 8) + +#define MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE (64 * 2 * 3) +#define MAX_PE_NBR_DATA_LCU32_LINE_BUFFER_SIZE (32 * 2 * 3) +#define MAX_PE_NBR_DATA_LCU16_LINE_BUFFER_SIZE (16 * 2 * 3) + +#define MAX_TILE_COLUMNS 32 /* 8K/256 */ + +#define VPP_CMD_MAX_SIZE (1 << 20) +#define NUM_HW_PIC_BUF 32 +#define BIN_BUFFER_THRESHOLD (1280 * 736) +#define H264D_MAX_SLICE 1800 +#define SIZE_H264D_BUFTAB_T 256 // sizeof(h264d_buftab_t) aligned to 256 +#define SIZE_H264D_HW_PIC_T (1 << 11) // sizeof(h264d_hw_pic_t) 32 aligned +#define SIZE_H264D_BSE_CMD_PER_BUF (32 * 4) +#define SIZE_H264D_VPP_CMD_PER_BUF 512 + +// Line Buffer definitions +/* one for luma and 1/2 for each chroma */ +#define SIZE_H264D_LB_FE_TOP_DATA(width, height) \ + (MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE * \ + ALIGN(width, 16) * 3) + +#define SIZE_H264D_LB_FE_TOP_CTRL(width, height) \ + (MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * \ + ((width + 15) >> 4)) + +#define SIZE_H264D_LB_FE_LEFT_CTRL(width, height) \ + (MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * \ + ((height + 15) >> 4)) + +#define SIZE_H264D_LB_SE_TOP_CTRL(width, height) \ + (MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * \ + ((width + 15) >> 4)) + +#define SIZE_H264D_LB_SE_LEFT_CTRL(width, height) \ + (MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * \ + ((height + 15) >> 4)) + +#define SIZE_H264D_LB_PE_TOP_DATA(width, height) \ + (MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE * \ + ((width + 15) >> 4)) + +#define SIZE_H264D_LB_VSP_TOP(width, height) \ + ((((width + 15) >> 4) << 7)) + +#define SIZE_H264D_LB_RECON_DMA_METADATA_WR(width, height) \ + (ALIGN(height, 16) * 32) + +#define SIZE_H264D_QP(width, height) \ + (((width + 63) >> 6) * ((height + 63) >> 6) * 128) + +#define SIZE_HW_PIC(sizePerBuf) \ + (NUM_HW_PIC_BUF * sizePerBuf) + +#define H264_CABAC_HDR_RATIO_HD_TOT 1 +#define H264_CABAC_RES_RATIO_HD_TOT 3 + +/* + * some content need more bin buffer, but limit buffer + * size for high resolution + */ + + +#define NUM_SLIST_BUF_H264 (256 + 32) +#define SIZE_SLIST_BUF_H264 512 +#define SIZE_SEI_USERDATA 4096 + +#define LCU_MAX_SIZE_PELS 64 +#define LCU_MIN_SIZE_PELS 16 + +#define H265D_MAX_SLICE 3600 +#define SIZE_H265D_HW_PIC_T SIZE_H264D_HW_PIC_T +#define SIZE_H265D_BSE_CMD_PER_BUF (16 * sizeof(u32)) +#define SIZE_H265D_VPP_CMD_PER_BUF 256 + +#define SIZE_H265D_LB_FE_TOP_DATA(width, height) \ + (MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE * \ + (ALIGN(width, 64) + 8) * 2) + +#define SIZE_H265D_LB_FE_TOP_CTRL(width, height) \ + (MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * \ + (ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS)) + +#define SIZE_H265D_LB_FE_LEFT_CTRL(width, height) \ + (MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * \ + (ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS)) + +#define SIZE_H265D_LB_SE_TOP_CTRL(width, height) \ + ((LCU_MAX_SIZE_PELS / 8 * (128 / 8)) * \ + ((width + 15) >> 4)) + +#define SIZE_H265D_LB_SE_LEFT_CTRL(width, height) \ + (max(((height + 16 - 1) / 8) * MAX_SE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE,\ + max(((height + 32 - 1) / 8) * MAX_SE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE, \ + ((height + 64 - 1) / 8) * MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE))) + +#define SIZE_H265D_LB_PE_TOP_DATA(width, height) \ + (MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE * \ + (ALIGN(width, LCU_MIN_SIZE_PELS) / LCU_MIN_SIZE_PELS)) + +#define SIZE_H265D_LB_VSP_TOP(width, height) \ + (((width + 63) >> 6) * 128) + +#define SIZE_H265D_LB_VSP_LEFT(width, height) \ + (((height + 63) >> 6) * 128) + +#define SIZE_H265D_LB_RECON_DMA_METADATA_WR(width, height) \ + SIZE_H264D_LB_RECON_DMA_METADATA_WR(width, height) + +#define SIZE_H265D_QP(width, height) SIZE_H264D_QP(width, height) + +#define H265_CABAC_HDR_RATIO_HD_TOT 2 +#define H265_CABAC_RES_RATIO_HD_TOT 2 + +/* + * some content need more bin buffer, but limit buffer size + * for high resolution + */ + +#define SIZE_SLIST_BUF_H265 (1 << 10) +#define NUM_SLIST_BUF_H265 (80 + 20) +#define H265_NUM_TILE_COL 32 +#define H265_NUM_TILE_ROW 128 +#define H265_NUM_TILE (H265_NUM_TILE_ROW * H265_NUM_TILE_COL + 1) + +#define SIZE_VPXD_LB_FE_LEFT_CTRL(width, height) \ + max(((height + 15) >> 4) * MAX_FE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE, \ + max(((height + 31) >> 5) * MAX_FE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE, \ + ((height + 63) >> 6) * MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE)) +#define SIZE_VPXD_LB_FE_TOP_CTRL(width, height) \ + (((ALIGN(width, 64) + 8) * 10 * 2)) /* + small line */ +#define SIZE_VPXD_LB_SE_TOP_CTRL(width, height) \ + (((width + 15) >> 4) * MAX_FE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE) +#define SIZE_VPXD_LB_SE_LEFT_CTRL(width, height) \ + max(((height + 15) >> 4) * MAX_SE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE, \ + max(((height + 31) >> 5) * MAX_SE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE, \ + ((height + 63) >> 6) * MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE)) +#define SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height) \ + ALIGN((ALIGN(height, 16) / (4 / 2)) * 64, BUFFER_ALIGNMENT_SIZE(32)) +#define SIZE_VP8D_LB_FE_TOP_DATA(width, height) \ + ((ALIGN(width, 16) + 8) * 10 * 2) +#define SIZE_VP9D_LB_FE_TOP_DATA(width, height) \ + ((ALIGN(ALIGN(width, 16), 64) + 8) * 10 * 2) +#define SIZE_VP8D_LB_PE_TOP_DATA(width, height) \ + ((ALIGN(width, 16) >> 4) * 64) +#define SIZE_VP9D_LB_PE_TOP_DATA(width, height) \ + ((ALIGN(ALIGN(width, 16), 64) >> 6) * 176) +#define SIZE_VP8D_LB_VSP_TOP(width, height) \ + (((ALIGN(width, 16) >> 4) * 64 / 2) + 256) +#define SIZE_VP9D_LB_VSP_TOP(width, height) \ + (((ALIGN(ALIGN(width, 16), 64) >> 6) * 64 * 8) + 256) + + +#define HFI_IRIS2_VP9D_COMV_SIZE \ + ((((8192 + 63) >> 6) * ((4320 + 63) >> 6) * 8 * 8 * 2 * 8)) + +#define VPX_DECODER_FRAME_CONCURENCY_LVL 2 +#define VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_NUM 1 +#define VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_DEN 2 +#define VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_NUM 3 +#define VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_DEN 2 + +#define VP8_NUM_FRAME_INFO_BUF (5 + 1) +#define VP9_NUM_FRAME_INFO_BUF (32) +#define VP8_NUM_PROBABILITY_TABLE_BUF (VP8_NUM_FRAME_INFO_BUF) +#define VP9_NUM_PROBABILITY_TABLE_BUF (VP9_NUM_FRAME_INFO_BUF + 4) +#define VP8_PROB_TABLE_SIZE 3840 +#define VP9_PROB_TABLE_SIZE 3840 + +#define VP9_UDC_HEADER_BUF_SIZE (3 * 128) +#define MAX_SUPERFRAME_HEADER_LEN (34) +#define CCE_TILE_OFFSET_SIZE ALIGN(32 * 4 * 4, BUFFER_ALIGNMENT_SIZE(32)) + +#define QMATRIX_SIZE (sizeof(u32) * 128 + 256) +#define MP2D_QPDUMP_SIZE 115200 + +#define HFI_IRIS2_ENC_PERSIST_SIZE 204800 + +#define HFI_MAX_COL_FRAME 6 +#define HFI_VENUS_VENC_TRE_WB_BUFF_SIZE (65 << 4) // bytes +#define HFI_VENUS_VENC_DB_LINE_BUFF_PER_MB 512 +#define HFI_VENUS_VPPSG_MAX_REGISTERS 2048 +#define HFI_VENUS_WIDTH_ALIGNMENT 128 +#define HFI_VENUS_WIDTH_TEN_BIT_ALIGNMENT 192 +#define HFI_VENUS_HEIGHT_ALIGNMENT 32 + +#define SYSTEM_LAL_TILE10 192 +#define NUM_MBS_360P (((480 + 15) >> 4) * ((360 + 15) >> 4)) +#define NUM_MBS_720P (((1280 + 15) >> 4) * ((720 + 15) >> 4)) +#define NUM_MBS_4k (((4096 + 15) >> 4) * ((2304 + 15) >> 4)) +#define MB_SIZE_IN_PIXEL (16 * 16) +#define HDR10PLUS_PAYLOAD_SIZE 1024 +#define HDR10_HIST_EXTRADATA_SIZE 4096 + +static int msm_vidc_get_extra_input_buff_count(struct msm_vidc_inst *inst); +static int msm_vidc_get_extra_output_buff_count(struct msm_vidc_inst *inst); + +static inline u32 calculate_h264d_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, bool is_interlaced, u32 delay, + u32 num_vpp_pipes); +static inline u32 calculate_h265d_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, bool is_interlaced, u32 delay, + u32 num_vpp_pipes); +static inline u32 calculate_vpxd_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, bool is_interlaced, u32 delay, + u32 num_vpp_pipes); +static inline u32 calculate_mpeg2d_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, bool is_interlaced, u32 delay, + u32 num_vpp_pipes); + +static inline u32 calculate_enc_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 work_mode, u32 lcu_size, u32 num_vpp_pipes); +static inline u32 calculate_h264e_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 work_mode, u32 num_vpp_pipes); +static inline u32 calculate_h265e_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 work_mode, u32 num_vpp_pipes); +static inline u32 calculate_vp8e_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 work_mode, u32 num_vpp_pipes); + +static inline u32 calculate_h264d_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 min_buf_count, bool split_mode_enabled, + u32 num_vpp_pipes); +static inline u32 calculate_h265d_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 min_buf_count, bool split_mode_enabled, + u32 num_vpp_pipes); +static inline u32 calculate_vp8d_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 min_buf_count, bool split_mode_enabled, + u32 num_vpp_pipes); +static inline u32 calculate_vp9d_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 min_buf_count, bool split_mode_enabled, + u32 num_vpp_pipes); +static inline u32 calculate_mpeg2d_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 min_buf_count, bool split_mode_enabled, + u32 num_vpp_pipes); + +static inline u32 calculate_h264e_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 num_ref, bool ten_bit, u32 num_vpp_pipes); +static inline u32 calculate_h265e_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 num_ref, bool ten_bit, u32 num_vpp_pipes); +static inline u32 calculate_vp8e_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 num_ref, bool ten_bit, u32 num_vpp_pipes); + +static inline u32 calculate_enc_scratch2_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 num_ref, bool ten_bit, bool downscale, + u32 rotation_val, u32 flip); + +static inline u32 calculate_enc_persist_size(void); + +static inline u32 calculate_h264d_persist1_size(void); +static inline u32 calculate_h265d_persist1_size(void); +static inline u32 calculate_vp8d_persist1_size(void); +static inline u32 calculate_vp9d_persist1_size(void); +static inline u32 calculate_mpeg2d_persist1_size(void); + +static struct msm_vidc_dec_buff_size_calculators h264d_calculators = { + .calculate_scratch_size = calculate_h264d_scratch_size, + .calculate_scratch1_size = calculate_h264d_scratch1_size, + .calculate_persist1_size = calculate_h264d_persist1_size, +}; + +static struct msm_vidc_dec_buff_size_calculators h265d_calculators = { + .calculate_scratch_size = calculate_h265d_scratch_size, + .calculate_scratch1_size = calculate_h265d_scratch1_size, + .calculate_persist1_size = calculate_h265d_persist1_size, +}; + +static struct msm_vidc_dec_buff_size_calculators vp8d_calculators = { + .calculate_scratch_size = calculate_vpxd_scratch_size, + .calculate_scratch1_size = calculate_vp8d_scratch1_size, + .calculate_persist1_size = calculate_vp8d_persist1_size, +}; + +static struct msm_vidc_dec_buff_size_calculators vp9d_calculators = { + .calculate_scratch_size = calculate_vpxd_scratch_size, + .calculate_scratch1_size = calculate_vp9d_scratch1_size, + .calculate_persist1_size = calculate_vp9d_persist1_size, +}; + +static struct msm_vidc_dec_buff_size_calculators mpeg2d_calculators = { + .calculate_scratch_size = calculate_mpeg2d_scratch_size, + .calculate_scratch1_size = calculate_mpeg2d_scratch1_size, + .calculate_persist1_size = calculate_mpeg2d_persist1_size, +}; + +static struct msm_vidc_enc_buff_size_calculators h264e_calculators = { + .calculate_scratch_size = calculate_h264e_scratch_size, + .calculate_scratch1_size = calculate_h264e_scratch1_size, + .calculate_scratch2_size = calculate_enc_scratch2_size, + .calculate_persist_size = calculate_enc_persist_size, +}; + +static struct msm_vidc_enc_buff_size_calculators h265e_calculators = { + .calculate_scratch_size = calculate_h265e_scratch_size, + .calculate_scratch1_size = calculate_h265e_scratch1_size, + .calculate_scratch2_size = calculate_enc_scratch2_size, + .calculate_persist_size = calculate_enc_persist_size, +}; + +static struct msm_vidc_enc_buff_size_calculators vp8e_calculators = { + .calculate_scratch_size = calculate_vp8e_scratch_size, + .calculate_scratch1_size = calculate_vp8e_scratch1_size, + .calculate_scratch2_size = calculate_enc_scratch2_size, + .calculate_persist_size = calculate_enc_persist_size, +}; + +int msm_vidc_get_decoder_internal_buffer_sizes(struct msm_vidc_inst *inst) +{ + struct msm_vidc_dec_buff_size_calculators *dec_calculators; + u32 width, height, i, out_min_count, num_vpp_pipes; + struct v4l2_format *f; + u32 vpp_delay; + + if (!inst || !inst->core || !inst->core->platform_data) { + d_vpr_e("%s: Instance is null!", __func__); + return -EINVAL; + } + + vpp_delay = inst->bse_vpp_delay; + + num_vpp_pipes = inst->core->platform_data->num_vpp_pipes; + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + switch (f->fmt.pix_mp.pixelformat) { + case V4L2_PIX_FMT_H264: + dec_calculators = &h264d_calculators; + break; + case V4L2_PIX_FMT_HEVC: + dec_calculators = &h265d_calculators; + break; + case V4L2_PIX_FMT_VP8: + dec_calculators = &vp8d_calculators; + break; + case V4L2_PIX_FMT_VP9: + dec_calculators = &vp9d_calculators; + break; + case V4L2_PIX_FMT_MPEG2: + dec_calculators = &mpeg2d_calculators; + break; + default: + s_vpr_e(inst->sid, + "Invalid pix format. Internal buffer cal not defined : %x\n", + f->fmt.pix_mp.pixelformat); + return -EINVAL; + } + + width = f->fmt.pix_mp.width; + height = f->fmt.pix_mp.height; + for (i = 0; i < HAL_BUFFER_MAX; i++) { + struct hal_buffer_requirements *curr_req; + bool valid_buffer_type = false; + + curr_req = &inst->buff_req.buffer[i]; + if (curr_req->buffer_type == HAL_BUFFER_INTERNAL_SCRATCH) { + bool is_interlaced = false; + + is_interlaced = (inst->pic_struct == + MSM_VIDC_PIC_STRUCT_MAYBE_INTERLACED); + curr_req->buffer_size = + dec_calculators->calculate_scratch_size( + inst, width, height, is_interlaced, + vpp_delay, num_vpp_pipes); + valid_buffer_type = true; + } else if (curr_req->buffer_type == + HAL_BUFFER_INTERNAL_SCRATCH_1) { + struct msm_vidc_format *fmt = NULL; + + fmt = &inst->fmts[OUTPUT_PORT]; + out_min_count = fmt->count_min; + out_min_count = + max(vpp_delay + 1, out_min_count); + curr_req->buffer_size = + dec_calculators->calculate_scratch1_size( + inst, width, height, out_min_count, + is_secondary_output_mode(inst), + num_vpp_pipes); + valid_buffer_type = true; + } else if (curr_req->buffer_type == + HAL_BUFFER_INTERNAL_PERSIST_1) { + curr_req->buffer_size = + dec_calculators->calculate_persist1_size(); + valid_buffer_type = true; + } + + if (valid_buffer_type) { + curr_req->buffer_alignment = 256; + curr_req->buffer_count_actual = + curr_req->buffer_count_min = + curr_req->buffer_count_min_host = 1; + } + } + return 0; +} + +int msm_vidc_get_num_ref_frames(struct msm_vidc_inst *inst) +{ + int num_ref = 1; + int num_bframes = -1, ltr_count = -1; + struct v4l2_ctrl *bframe_ctrl = NULL; + struct v4l2_ctrl *ltr_ctrl = NULL; + struct v4l2_ctrl *frame_t = NULL; + struct v4l2_ctrl *max_layer = NULL; + u32 codec; + + bframe_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_B_FRAMES); + num_bframes = bframe_ctrl->val; + if (num_bframes > 0) + num_ref = num_bframes + 1; + + ltr_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT); + ltr_count = ltr_ctrl->val; + /* B and LTR can't be at same time */ + if (ltr_count > 0) + num_ref = num_ref + ltr_count; + + frame_t = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE); + max_layer = get_ctrl(inst, + V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER); + if (frame_t->val == V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P && + max_layer->val > 1) { + codec = get_v4l2_codec(inst); + /* LTR and B - frame not supported with hybrid HP */ + if (inst->hybrid_hp) + num_ref = (max_layer->val + 1) >> 1; + else if (codec == V4L2_PIX_FMT_HEVC) + num_ref = ((max_layer->val + 1) / 2) + ltr_count; + else if ((codec == V4L2_PIX_FMT_H264) && (max_layer->val < 4)) + num_ref = (max_layer->val - 1) + ltr_count; + else + num_ref = max_layer->val + ltr_count; + } + + if (is_hier_b_session(inst)) { + num_ref = (1 << (max_layer->val - 1)) / 2 + 1; + } + + return num_ref; +} + +int msm_vidc_get_encoder_internal_buffer_sizes(struct msm_vidc_inst *inst) +{ + struct msm_vidc_enc_buff_size_calculators *enc_calculators; + u32 width, height, i, num_ref, num_vpp_pipes; + u32 rotation_val = 0, flip = 0; + bool is_tenbit = false, is_downscale = false; + int num_bframes; + struct v4l2_ctrl *bframe, *rotation, *hflip, *vflip; + struct v4l2_format *f; + + if (!inst || !inst->core || !inst->core->platform_data) { + d_vpr_e("%s: Instance is null!", __func__); + return -EINVAL; + } + + num_vpp_pipes = inst->core->platform_data->num_vpp_pipes; + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + switch (f->fmt.pix_mp.pixelformat) { + case V4L2_PIX_FMT_H264: + enc_calculators = &h264e_calculators; + break; + case V4L2_PIX_FMT_HEVC: + enc_calculators = &h265e_calculators; + break; + case V4L2_PIX_FMT_VP8: + enc_calculators = &vp8e_calculators; + break; + default: + s_vpr_e(inst->sid, + "Invalid pix format. Internal buffer cal not defined : %x ", + f->fmt.pix_mp.pixelformat); + return -EINVAL; + } + + bframe = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_B_FRAMES); + num_bframes = bframe->val; + if (num_bframes < 0) { + s_vpr_e(inst->sid, "%s: get num bframe failed\n", __func__); + return -EINVAL; + } + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + rotation = get_ctrl(inst, V4L2_CID_ROTATE); + rotation_val = rotation->val; + if (rotation_val == 90 || rotation_val == 270) { + /* Internal buffer size calculators are based on rotated w x h */ + width = f->fmt.pix_mp.height; + height = f->fmt.pix_mp.width; + } else { + width = f->fmt.pix_mp.width; + height = f->fmt.pix_mp.height; + } + hflip = get_ctrl(inst, V4L2_CID_HFLIP); + vflip = get_ctrl(inst, V4L2_CID_VFLIP); + flip = hflip->val | vflip->val; + + num_ref = msm_vidc_get_num_ref_frames(inst); + is_tenbit = (inst->bit_depth == MSM_VIDC_BIT_DEPTH_10); + is_downscale = vidc_scalar_enabled(inst); + + for (i = 0; i < HAL_BUFFER_MAX; i++) { + struct hal_buffer_requirements *curr_req; + bool valid_buffer_type = false; + + curr_req = &inst->buff_req.buffer[i]; + if (curr_req->buffer_type == HAL_BUFFER_INTERNAL_SCRATCH) { + curr_req->buffer_size = + enc_calculators->calculate_scratch_size( + inst, width, height, + inst->clk_data.work_mode, + num_vpp_pipes); + valid_buffer_type = true; + } else if (curr_req->buffer_type == + HAL_BUFFER_INTERNAL_SCRATCH_1) { + curr_req->buffer_size = + enc_calculators->calculate_scratch1_size( + inst, width, height, num_ref, + is_tenbit, num_vpp_pipes); + valid_buffer_type = true; + } else if (curr_req->buffer_type == + HAL_BUFFER_INTERNAL_SCRATCH_2) { + curr_req->buffer_size = + enc_calculators->calculate_scratch2_size( + inst, width, height, num_ref, + is_tenbit, is_downscale, rotation_val, flip); + valid_buffer_type = true; + } else if (curr_req->buffer_type == + HAL_BUFFER_INTERNAL_PERSIST) { + curr_req->buffer_size = + enc_calculators->calculate_persist_size(); + valid_buffer_type = true; + } + + if (valid_buffer_type) { + curr_req->buffer_alignment = 256; + curr_req->buffer_count_actual = + curr_req->buffer_count_min = + curr_req->buffer_count_min_host = 1; + } + } + return 0; +} + +int msm_vidc_calculate_internal_buffer_sizes(struct msm_vidc_inst *inst) +{ + if (!inst) { + d_vpr_e("%s: Instance is null!", __func__); + return -EINVAL; + } + + if (inst->session_type == MSM_VIDC_DECODER) + return msm_vidc_get_decoder_internal_buffer_sizes(inst); + else if (inst->session_type == MSM_VIDC_ENCODER) + return msm_vidc_get_encoder_internal_buffer_sizes(inst); + + return 0; +} + +void msm_vidc_init_buffer_size_calculators(struct msm_vidc_inst *inst) +{ + struct msm_vidc_core *core; + uint32_t vpu; + + if (!inst) + return; + + inst->buffer_size_calculators = NULL; + core = inst->core; + vpu = core->platform_data->vpu_ver; + + /* Change this to IRIS2 when ready */ + if (vpu == VPU_VERSION_IRIS2 || vpu == VPU_VERSION_IRIS2_1) + inst->buffer_size_calculators = + msm_vidc_calculate_internal_buffer_sizes; +} + +int msm_vidc_calculate_input_buffer_count(struct msm_vidc_inst *inst) +{ + struct msm_vidc_format *fmt; + int extra_buff_count = 0; + struct v4l2_ctrl *max_layer = NULL; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + fmt = &inst->fmts[INPUT_PORT]; + + if (!is_decode_session(inst) && !is_encode_session(inst)) + return 0; + + /* do not change buffer count while session is running */ + if (inst->state == MSM_VIDC_START_DONE) + return 0; + + if (is_thumbnail_session(inst)) { + fmt->count_min = fmt->count_min_host = fmt->count_actual = + SINGLE_INPUT_BUFFER; + return 0; + } + + if (is_grid_session(inst)) { + fmt->count_min = fmt->count_min_host = fmt->count_actual = + SINGLE_INPUT_BUFFER + 1; + return 0; + } + + extra_buff_count = msm_vidc_get_extra_buff_count(inst, + HAL_BUFFER_INPUT); + fmt->count_min = MIN_INPUT_BUFFERS; + + if (is_hier_b_session(inst)) { + max_layer = get_ctrl(inst, + V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER); + fmt->count_min = (1 << (max_layer->val - 1)) + 2; + } + + fmt->count_min_host = fmt->count_actual = + fmt->count_min + extra_buff_count; + + s_vpr_h(inst->sid, "%s: input min %d min_host %d actual %d\n", + __func__, fmt->count_min, + fmt->count_min_host, fmt->count_actual); + + return 0; +} + +int msm_vidc_calculate_output_buffer_count(struct msm_vidc_inst *inst) +{ + struct msm_vidc_format *fmt; + int extra_buff_count = 0; + u32 codec, output_min_count; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + fmt = &inst->fmts[OUTPUT_PORT]; + codec = get_v4l2_codec(inst); + + if (!is_decode_session(inst) && !is_encode_session(inst)) + return 0; + + /* do not change buffer count while session is running */ + if (inst->state == MSM_VIDC_START_DONE) + return 0; + + if (is_thumbnail_session(inst)) { + fmt->count_min = (codec == V4L2_PIX_FMT_VP9) ? + VP9_REFERENCE_COUNT : SINGLE_OUTPUT_BUFFER; + fmt->count_min_host = fmt->count_actual = fmt->count_min; + return 0; + } + + /* Update output buff count: Changes for decoder based on codec */ + if (is_decode_session(inst)) { + switch (codec) { + case V4L2_PIX_FMT_MPEG2: + case V4L2_PIX_FMT_VP8: + output_min_count = 6; + break; + case V4L2_PIX_FMT_VP9: + output_min_count = 9; + break; + default: + output_min_count = 4; //H264, HEVC + } + } else { + output_min_count = MIN_ENC_OUTPUT_BUFFERS; + } + + if (is_vpp_delay_allowed(inst)) { + output_min_count = + max(output_min_count, (u32)MAX_BSE_VPP_DELAY); + output_min_count = + max(output_min_count, (u32)(msm_vidc_vpp_delay & 0x1F)); + } + + extra_buff_count = msm_vidc_get_extra_buff_count(inst, + HAL_BUFFER_OUTPUT); + fmt->count_min = output_min_count; + fmt->count_min_host = fmt->count_actual = + fmt->count_min + extra_buff_count; + + s_vpr_h(inst->sid, "%s: output min %d min_host %d actual %d\n", + __func__, fmt->count_min, fmt->count_min_host, + fmt->count_actual); + + return 0; +} + +int msm_vidc_calculate_buffer_counts(struct msm_vidc_inst *inst) +{ + int rc; + + rc = msm_vidc_calculate_input_buffer_count(inst); + if (rc) + return rc; + rc = msm_vidc_calculate_output_buffer_count(inst); + if (rc) + return rc; + + return rc; +} + +int msm_vidc_get_extra_buff_count(struct msm_vidc_inst *inst, + enum hal_buffer buffer_type) +{ + if (!inst || !inst->core) { + d_vpr_e("%s: Invalid args\n", __func__); + return 0; + } + + if (!is_decode_session(inst) && !is_encode_session(inst)) + return 0; + + if (buffer_type == HAL_BUFFER_OUTPUT) + return msm_vidc_get_extra_output_buff_count(inst); + else if (buffer_type == HAL_BUFFER_INPUT) + return msm_vidc_get_extra_input_buff_count(inst); + + return 0; +} + +static int msm_vidc_get_extra_input_buff_count(struct msm_vidc_inst *inst) +{ + unsigned int extra_input_count = 0; + struct msm_vidc_core *core; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + + core = inst->core; + + if (is_heif_decoder(inst)) + return (HEIF_DEC_TOTAL_INPUT_BUFFERS - MIN_INPUT_BUFFERS); + + /* + * For thumbnail session, extra buffers are not required as + * neither dcvs nor batching will be enabled. + */ + if (is_thumbnail_session(inst)) + return extra_input_count; + + if (is_decode_session(inst)) { + /* add 2 extra buffers for batching */ + if (inst->batch.enable) + extra_input_count = (BATCH_DEC_TOTAL_INPUT_BUFFERS - + MIN_INPUT_BUFFERS); + } else if (is_encode_session(inst)) { + /* add 4 extra buffers for dcvs */ + if (core->resources.dcvs) + extra_input_count = DCVS_ENC_EXTRA_INPUT_BUFFERS; + } + return extra_input_count; +} + +static int msm_vidc_get_extra_output_buff_count(struct msm_vidc_inst *inst) +{ + unsigned int extra_output_count = 0; + struct msm_vidc_core *core; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + + core = inst->core; + + if (is_heif_decoder(inst)) + return HEIF_DEC_EXTRA_OUTPUT_BUFFERS; + + /* + * For a non-realtime session, extra buffers are not required. + * For thumbnail session, extra buffers are not required as + * neither dcvs nor batching will be enabled. + */ + if (!is_realtime_session(inst) || is_thumbnail_session(inst)) + return extra_output_count; + + /* For HEIF encoder, we are increasing buffer count */ + if (is_image_session(inst)) { + extra_output_count = (HEIF_ENC_TOTAL_OUTPUT_BUFFERS - + MIN_ENC_OUTPUT_BUFFERS); + return extra_output_count; + } + + if (is_decode_session(inst)) { + /* add 4 extra buffers for dcvs */ + if (core->resources.dcvs) + extra_output_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS; + /* + * Minimum number of decoder output buffers is codec specific. + * If platform supports decode batching ensure minimum 6 extra + * output buffers. Else add 4 extra output buffers for DCVS. + */ + if (inst->batch.enable) + extra_output_count = BATCH_DEC_EXTRA_OUTPUT_BUFFERS; + } + return extra_output_count; +} + +u32 msm_vidc_calculate_dec_input_frame_size(struct msm_vidc_inst *inst, u32 buffer_size_limit) +{ + u32 frame_size, num_mbs; + u32 div_factor = 1; + u32 base_res_mbs = NUM_MBS_4k; + struct v4l2_format *f; + + /* + * Decoder input size calculation: + * If clip is 8k buffer size is calculated for 8k : 8k mbs/4 + * For 8k cases we expect width/height to be set always. + * In all other cases size is calculated for 4k: + * 4k mbs for VP8/VP9 and 4k/2 for remaining codecs + */ + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + num_mbs = msm_vidc_get_mbs_per_frame(inst); + if (num_mbs > NUM_MBS_4k) { + div_factor = 4; + base_res_mbs = inst->capability.cap[CAP_MBS_PER_FRAME].max; + } else { + base_res_mbs = NUM_MBS_4k; + if (f->fmt.pix_mp.pixelformat == V4L2_PIX_FMT_VP9) + div_factor = 1; + else + div_factor = 2; + } + + if (is_secure_session(inst)) + div_factor = div_factor << 1; + + /* + * For targets that doesn't support 4k, consider max mb's for that + * target and allocate max input buffer size for the same + */ + if (inst->core->platform_data->vpu_ver == VPU_VERSION_AR50_LITE) { + base_res_mbs = inst->capability.cap[CAP_MBS_PER_FRAME].max; + div_factor = 1; + if (num_mbs < NUM_MBS_720P) + base_res_mbs = base_res_mbs * 2; + } + /* For HEIF image, use the actual resolution to calc buffer size */ + if (is_heif_decoder(inst)) { + base_res_mbs = num_mbs; + div_factor = 1; + } + + frame_size = base_res_mbs * MB_SIZE_IN_PIXEL * 3 / 2 / div_factor; + + /* multiply by 10/8 (1.25) to get size for 10 bit case */ + if ((f->fmt.pix_mp.pixelformat == V4L2_PIX_FMT_VP9 || + f->fmt.pix_mp.pixelformat == V4L2_PIX_FMT_HEVC) && + inst->core->platform_data->vpu_ver != VPU_VERSION_AR50_LITE) + frame_size = frame_size + (frame_size >> 2); + + if (buffer_size_limit && (buffer_size_limit < frame_size)) { + frame_size = buffer_size_limit; + s_vpr_h(inst->sid, "input buffer size limited to %d\n", + frame_size); + } else { + s_vpr_h(inst->sid, "set input buffer size to %d\n", + frame_size); + } + + return ALIGN(frame_size, SZ_4K); +} + +u32 msm_vidc_calculate_dec_output_frame_size(struct msm_vidc_inst *inst) +{ + u32 hfi_fmt; + struct v4l2_format *f; + + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + hfi_fmt = msm_comm_convert_color_fmt(f->fmt.pix_mp.pixelformat, + inst->sid); + return VENUS_BUFFER_SIZE(hfi_fmt, f->fmt.pix_mp.width, + f->fmt.pix_mp.height); +} + +u32 msm_vidc_calculate_dec_output_extra_size(struct msm_vidc_inst *inst) +{ + struct v4l2_format *f; + + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + return VENUS_EXTRADATA_SIZE(f->fmt.pix_mp.width, f->fmt.pix_mp.height); +} + +u32 msm_vidc_calculate_enc_input_frame_size(struct msm_vidc_inst *inst) +{ + u32 hfi_fmt; + struct v4l2_format *f; + + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + hfi_fmt = msm_comm_convert_color_fmt(f->fmt.pix_mp.pixelformat, + inst->sid); + return VENUS_BUFFER_SIZE(hfi_fmt, f->fmt.pix_mp.width, + f->fmt.pix_mp.height); +} + +u32 msm_vidc_calculate_enc_output_frame_size(struct msm_vidc_inst *inst) +{ + u32 frame_size; + u32 mbs_per_frame; + u32 width, height; + struct v4l2_format *f; + + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + /* + * Encoder output size calculation: 32 Align width/height + * For CQ or heic session : YUVsize * 2 + * For resolution <= 480x360p : YUVsize * 2 + * For resolution > 360p & <= 4K : YUVsize / 2 + * For resolution > 4k : YUVsize / 4 + * Initially frame_size = YUVsize * 2; + */ + + if (is_grid_session(inst)) { + f->fmt.pix_mp.width = f->fmt.pix_mp.height = HEIC_GRID_DIMENSION; + } + width = ALIGN(f->fmt.pix_mp.width, BUFFER_ALIGNMENT_SIZE(32)); + height = ALIGN(f->fmt.pix_mp.height, BUFFER_ALIGNMENT_SIZE(32)); + mbs_per_frame = NUM_MBS_PER_FRAME(width, height); + frame_size = (width * height * 3); + + if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ || + is_grid_session(inst) || is_image_session(inst)) + goto calc_done; + + if (mbs_per_frame <= NUM_MBS_360P) + (void)frame_size; /* Default frame_size = YUVsize * 2 */ + else if (mbs_per_frame <= NUM_MBS_4k) + frame_size = frame_size >> 2; + else + frame_size = frame_size >> 3; + + if (inst->rc_type == RATE_CONTROL_OFF) + frame_size = frame_size << 1; + + if (inst->rc_type == RATE_CONTROL_LOSSLESS) + frame_size = (width * height * 9) >> 2; + + /* multiply by 10/8 (1.25) to get size for 10 bit case */ + if (inst->core->platform_data->vpu_ver != VPU_VERSION_AR50_LITE && + f->fmt.pix_mp.pixelformat == V4L2_PIX_FMT_HEVC) { + frame_size = frame_size + (frame_size >> 2); + } + +calc_done: + return ALIGN(frame_size, SZ_4K); +} + +static inline u32 ROI_EXTRADATA_SIZE( + u32 width, u32 height, u32 lcu_size) { + u32 lcu_width = 0; + u32 lcu_height = 0; + u32 n_shift = 0; + + while (lcu_size && !(lcu_size & 0x1)) { + n_shift++; + lcu_size = lcu_size >> 1; + } + lcu_width = (width + (lcu_size - 1)) >> n_shift; + lcu_height = (height + (lcu_size - 1)) >> n_shift; + + return (((lcu_width + 7) >> 3) << 3) * lcu_height * 2; +} + +u32 msm_vidc_calculate_enc_input_extra_size(struct msm_vidc_inst *inst) +{ + u32 size = 0; + u32 extradata_count = 0; + struct v4l2_format *f; + + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + /* Add size for default extradata */ + size += sizeof(struct msm_vidc_enc_cvp_metadata_payload); + extradata_count++; + + if (inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_ROI) { + u32 lcu_size = 16; + + if (f->fmt.pix_mp.pixelformat == V4L2_PIX_FMT_HEVC) + lcu_size = 32; + + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + size += ROI_EXTRADATA_SIZE(f->fmt.pix_mp.width, + f->fmt.pix_mp.height, lcu_size); + extradata_count++; + } + + if (inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_HDR10PLUS) { + size += HDR10PLUS_PAYLOAD_SIZE; + extradata_count++; + } + + /* Add extradata header sizes including EXTRADATA_NONE */ + if (size) + size += sizeof(struct msm_vidc_extradata_header) * + (extradata_count + 1); + + return ALIGN(size, SZ_4K); +} + +u32 msm_vidc_calculate_enc_output_extra_size(struct msm_vidc_inst *inst) +{ + u32 size = 0; + + if (inst->prop.extradata_ctrls & EXTRADATA_ADVANCED) + size += sizeof(struct msm_vidc_metadata_ltr_payload); + + if (inst->prop.extradata_ctrls & EXTRADATA_ENC_FRAME_QP) + size += sizeof(struct msm_vidc_frame_qp_payload); + + /* Add size for extradata none */ + if (size) + size += sizeof(struct msm_vidc_extradata_header); + + return ALIGN(size, SZ_4K); +} + +static inline u32 size_vpss_lb(u32 width, u32 height, u32 num_vpp_pipes) +{ + u32 vpss_4tap_top_buffer_size, vpss_div2_top_buffer_size; + u32 vpss_4tap_left_buffer_size, vpss_div2_left_buffer_size; + u32 opb_wr_top_line_luma_buf_size, opb_wr_top_line_chroma_buf_size; + u32 opb_lb_wr_llb_y_buffer_size, opb_lb_wr_llb_uv_buffer_size; + u32 macrotiling_size; + u32 size = 0; + + vpss_4tap_top_buffer_size = vpss_div2_top_buffer_size = + vpss_4tap_left_buffer_size = vpss_div2_left_buffer_size = 0; + macrotiling_size = 32; + opb_wr_top_line_luma_buf_size = ALIGN(width, macrotiling_size) / + macrotiling_size * 256; + opb_wr_top_line_luma_buf_size = ALIGN(opb_wr_top_line_luma_buf_size, + VENUS_DMA_ALIGNMENT) + (MAX_TILE_COLUMNS - 1) * 256; + opb_wr_top_line_luma_buf_size = max(opb_wr_top_line_luma_buf_size, + (32 * ALIGN(height, 16))); + opb_wr_top_line_chroma_buf_size = opb_wr_top_line_luma_buf_size; + opb_lb_wr_llb_uv_buffer_size = opb_lb_wr_llb_y_buffer_size = + ALIGN((ALIGN(height, 16) / 2) * + 64, BUFFER_ALIGNMENT_SIZE(32)); + size = num_vpp_pipes * 2 * (vpss_4tap_top_buffer_size + + vpss_div2_top_buffer_size) + + 2 * (vpss_4tap_left_buffer_size + + vpss_div2_left_buffer_size) + + opb_wr_top_line_luma_buf_size + + opb_wr_top_line_chroma_buf_size + + opb_lb_wr_llb_uv_buffer_size + + opb_lb_wr_llb_y_buffer_size; + + return size; +} + +static inline u32 hfi_iris2_h264d_comv_size(u32 width, u32 height, + u32 yuv_buf_min_count) +{ + u32 comv_size = 0; + u32 frame_width_in_mbs = ((width + 15) >> 4); + u32 frame_height_in_mbs = ((height + 15) >> 4); + u32 col_mv_aligned_width = (frame_width_in_mbs << 7); + u32 col_zero_aligned_width = (frame_width_in_mbs << 2); + u32 col_zero_size = 0, size_colloc = 0; + + col_mv_aligned_width = ALIGN(col_mv_aligned_width, + BUFFER_ALIGNMENT_SIZE(16)); + col_zero_aligned_width = ALIGN(col_zero_aligned_width, + BUFFER_ALIGNMENT_SIZE(16)); + col_zero_size = col_zero_aligned_width * + ((frame_height_in_mbs + 1) >> 1); + col_zero_size = ALIGN(col_zero_size, BUFFER_ALIGNMENT_SIZE(64)); + col_zero_size <<= 1; + col_zero_size = ALIGN(col_zero_size, BUFFER_ALIGNMENT_SIZE(512)); + size_colloc = col_mv_aligned_width * ((frame_height_in_mbs + 1) >> 1); + size_colloc = ALIGN(size_colloc, BUFFER_ALIGNMENT_SIZE(64)); + size_colloc <<= 1; + size_colloc = ALIGN(size_colloc, BUFFER_ALIGNMENT_SIZE(512)); + size_colloc += (col_zero_size + SIZE_H264D_BUFTAB_T * 2); + comv_size = size_colloc * yuv_buf_min_count; + comv_size += BUFFER_ALIGNMENT_SIZE(512); + + return comv_size; +} + +static inline u32 size_h264d_bse_cmd_buf(u32 height) +{ + u32 aligned_height = ALIGN(height, BUFFER_ALIGNMENT_SIZE(32)); + + return min_t(u32, (((aligned_height + 15) >> 4) * 3 * 4), + H264D_MAX_SLICE) * + SIZE_H264D_BSE_CMD_PER_BUF; +} + +static inline u32 size_h264d_vpp_cmd_buf(u32 height) +{ + u32 size = 0; + u32 aligned_height = ALIGN(height, BUFFER_ALIGNMENT_SIZE(32)); + + size = min_t(u32, (((aligned_height + 15) >> 4) * 3 * 4), + H264D_MAX_SLICE) * + SIZE_H264D_VPP_CMD_PER_BUF; + if (size > VPP_CMD_MAX_SIZE) + size = VPP_CMD_MAX_SIZE; + return size; +} + +static inline u32 hfi_iris2_h264d_non_comv_size(u32 width, u32 height, + u32 num_vpp_pipes) +{ + u32 size; + u32 size_bse, size_vpp; + + size_bse = size_h264d_bse_cmd_buf(height); + size_vpp = size_h264d_vpp_cmd_buf(height); + size = ALIGN(size_bse, VENUS_DMA_ALIGNMENT) + + ALIGN(size_vpp, VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_HW_PIC(SIZE_H264D_HW_PIC_T), VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_H264D_LB_FE_TOP_DATA(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_H264D_LB_FE_TOP_CTRL(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_H264D_LB_FE_LEFT_CTRL(width, height), + VENUS_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_H264D_LB_SE_TOP_CTRL(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_H264D_LB_SE_LEFT_CTRL(width, height), + VENUS_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_H264D_LB_PE_TOP_DATA(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_H264D_LB_VSP_TOP(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_H264D_LB_RECON_DMA_METADATA_WR(width, height), + VENUS_DMA_ALIGNMENT) * 2 + + ALIGN(SIZE_H264D_QP(width, height), VENUS_DMA_ALIGNMENT); + size = ALIGN(size, VENUS_DMA_ALIGNMENT); + return size; +} + +static inline u32 size_h264d_hw_bin_buffer(u32 width, u32 height, u32 delay, + u32 num_vpp_pipes) +{ + u32 size_yuv, size_bin_hdr, size_bin_res; + u32 size = 0; + u32 product; + + product = width * height; + size_yuv = (product <= BIN_BUFFER_THRESHOLD) ? + ((BIN_BUFFER_THRESHOLD * 3) >> 1) : + ((product * 3) >> 1); + + size_bin_hdr = size_yuv * H264_CABAC_HDR_RATIO_HD_TOT; + size_bin_res = size_yuv * H264_CABAC_RES_RATIO_HD_TOT; + size_bin_hdr = size_bin_hdr * (((((u32)(delay)) & 31) / 10) + 2) / 2; + size_bin_res = size_bin_res * (((((u32)(delay)) & 31) / 10) + 2) / 2; + size_bin_hdr = ALIGN(size_bin_hdr / num_vpp_pipes, + VENUS_DMA_ALIGNMENT) * num_vpp_pipes; + size_bin_res = ALIGN(size_bin_res / num_vpp_pipes, + VENUS_DMA_ALIGNMENT) * num_vpp_pipes; + size = size_bin_hdr + size_bin_res; + return size; +} + +static inline u32 calculate_h264d_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, bool is_interlaced, u32 delay, u32 num_vpp_pipes) +{ + u32 aligned_width = ALIGN(width, BUFFER_ALIGNMENT_SIZE(16)); + u32 aligned_height = ALIGN(height, BUFFER_ALIGNMENT_SIZE(16)); + u32 size = 0; + + if (!is_interlaced) + size = size_h264d_hw_bin_buffer(aligned_width, aligned_height, + delay, num_vpp_pipes); + else + size = 0; + + return size; +} + +static inline u32 size_h265d_bse_cmd_buf(u32 width, u32 height) +{ + u32 size; + + size = (ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) * + (ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) * + NUM_HW_PIC_BUF; + size = min_t(u32, size, H265D_MAX_SLICE + 1); + size = 2 * size * SIZE_H265D_BSE_CMD_PER_BUF; + size = ALIGN(size, VENUS_DMA_ALIGNMENT); + + return size; +} + +static inline u32 size_h265d_vpp_cmd_buf(u32 width, u32 height) +{ + u32 size = 0; + + size = (ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) * + (ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) * + NUM_HW_PIC_BUF; + size = min_t(u32, size, H265D_MAX_SLICE + 1); + size = ALIGN(size, 4); + size = 2 * size * SIZE_H265D_VPP_CMD_PER_BUF; + size = ALIGN(size, VENUS_DMA_ALIGNMENT); + if (size > VPP_CMD_MAX_SIZE) + size = VPP_CMD_MAX_SIZE; + return size; +} + +static inline u32 hfi_iris2_h265d_comv_size(u32 width, u32 height, + u32 yuv_buf_count_min) +{ + u32 size = 0; + + size = ALIGN(((((width + 15) >> 4) * ((height + 15) >> 4)) << 8), + BUFFER_ALIGNMENT_SIZE(512)); + size *= yuv_buf_count_min; + size += BUFFER_ALIGNMENT_SIZE(512); + + return size; +} + +static inline u32 hfi_iris2_h265d_non_comv_size(u32 width, u32 height, + u32 num_vpp_pipes) +{ + u32 size_bse, size_vpp; + u32 size = 0; + + size_bse = size_h265d_bse_cmd_buf(width, height); + size_vpp = size_h265d_vpp_cmd_buf(width, height); + size = ALIGN(size_bse, VENUS_DMA_ALIGNMENT) + + ALIGN(size_vpp, VENUS_DMA_ALIGNMENT) + + ALIGN(NUM_HW_PIC_BUF * 20 * 22 * 4, VENUS_DMA_ALIGNMENT) + + ALIGN(2 * sizeof(u16) * + (ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) * + (ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_HW_PIC(SIZE_H265D_HW_PIC_T), VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_H265D_LB_FE_TOP_DATA(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_H265D_LB_FE_TOP_CTRL(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_H265D_LB_FE_LEFT_CTRL(width, height), + VENUS_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_H265D_LB_SE_LEFT_CTRL(width, height), + VENUS_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_H265D_LB_SE_TOP_CTRL(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_H265D_LB_PE_TOP_DATA(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_H265D_LB_VSP_TOP(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_H265D_LB_VSP_LEFT(width, height), + VENUS_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_H265D_LB_RECON_DMA_METADATA_WR(width, height), + VENUS_DMA_ALIGNMENT) * 4 + + ALIGN(SIZE_H265D_QP(width, height), VENUS_DMA_ALIGNMENT); + size = ALIGN(size, VENUS_DMA_ALIGNMENT); + return size; +} + +static inline u32 size_h265d_hw_bin_buffer(u32 width, u32 height, u32 delay, + u32 num_vpp_pipes) +{ + u32 size = 0; + u32 size_yuv, size_bin_hdr, size_bin_res; + u32 product; + + product = width * height; + size_yuv = (product <= BIN_BUFFER_THRESHOLD) ? + ((BIN_BUFFER_THRESHOLD * 3) >> 1) : + ((product * 3) >> 1); + size_bin_hdr = size_yuv * H265_CABAC_HDR_RATIO_HD_TOT; + size_bin_res = size_yuv * H265_CABAC_RES_RATIO_HD_TOT; + size_bin_hdr = size_bin_hdr * (((((u32)(delay)) & 31) / 10) + 2) / 2; + size_bin_res = size_bin_res * (((((u32)(delay)) & 31) / 10) + 2) / 2; + size_bin_hdr = ALIGN(size_bin_hdr / num_vpp_pipes, + VENUS_DMA_ALIGNMENT) * num_vpp_pipes; + size_bin_res = ALIGN(size_bin_res / num_vpp_pipes, + VENUS_DMA_ALIGNMENT) * num_vpp_pipes; + size = size_bin_hdr + size_bin_res; + + return size; +} + +static inline u32 calculate_h265d_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, bool is_interlaced, u32 delay, u32 num_vpp_pipes) +{ + u32 aligned_width = ALIGN(width, BUFFER_ALIGNMENT_SIZE(16)); + u32 aligned_height = ALIGN(height, BUFFER_ALIGNMENT_SIZE(16)); + u32 size = 0; + + if (!is_interlaced) + size = size_h265d_hw_bin_buffer(aligned_width, aligned_height, + delay, num_vpp_pipes); + else + size = 0; + + return size; +} + +static inline u32 calculate_vpxd_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, bool is_interlaced, u32 delay, u32 num_vpp_pipes) +{ + u32 aligned_width = ALIGN(width, BUFFER_ALIGNMENT_SIZE(16)); + u32 aligned_height = ALIGN(height, BUFFER_ALIGNMENT_SIZE(16)); + u32 size = 0; + u32 size_yuv = aligned_width * aligned_height * 3 / 2; + + if (!is_interlaced) { + /* binbuffer1_size + binbufer2_size */ + u32 binbuffer1_size = 0, binbuffer2_size = 0; + + binbuffer1_size = ALIGN(max_t(u32, size_yuv, + ((BIN_BUFFER_THRESHOLD * 3) >> 1)) * + VPX_DECODER_FRAME_CONCURENCY_LVL * + VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_NUM / + VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_DEN / + num_vpp_pipes, + VENUS_DMA_ALIGNMENT); + binbuffer2_size = ALIGN(max_t(u32, size_yuv, + ((BIN_BUFFER_THRESHOLD * 3) >> 1)) * + VPX_DECODER_FRAME_CONCURENCY_LVL * + VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_NUM / + VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_DEN / + num_vpp_pipes, + VENUS_DMA_ALIGNMENT); + size = binbuffer1_size + binbuffer2_size; + size = size * num_vpp_pipes; + } else { + size = 0; + } + + return size; +} + +static inline u32 calculate_mpeg2d_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, bool is_interlaced, u32 delay, u32 num_vpp_pipes) +{ + return 0; +} + +static inline u32 calculate_enc_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 work_mode, u32 lcu_size, u32 num_vpp_pipes) +{ + u32 aligned_width, aligned_height, bitstream_size; + u32 total_bitbin_buffers = 0, size_singlePipe, bitbin_size = 0; + u32 sao_bin_buffer_size, padded_bin_size, size = 0; + + aligned_width = ALIGN(width, lcu_size); + aligned_height = ALIGN(height, lcu_size); + bitstream_size = msm_vidc_calculate_enc_output_frame_size(inst); + + bitstream_size = ALIGN(bitstream_size, VENUS_DMA_ALIGNMENT); + if (work_mode == HFI_WORKMODE_2) { + total_bitbin_buffers = 3; + bitbin_size = bitstream_size * 17 / 10; + bitbin_size = ALIGN(bitbin_size, VENUS_DMA_ALIGNMENT); + } else { + total_bitbin_buffers = 1; + bitstream_size = aligned_width * aligned_height * 3; + bitbin_size = ALIGN(bitstream_size, VENUS_DMA_ALIGNMENT); + } + if (aligned_width * aligned_height >= 3840 * 2160) + size_singlePipe = bitbin_size / num_vpp_pipes; + else if (num_vpp_pipes > 2) + size_singlePipe = bitbin_size / 2; + else + size_singlePipe = bitbin_size; + if (inst->rc_type == RATE_CONTROL_LOSSLESS) + size_singlePipe <<= 1; + size_singlePipe = ALIGN(size_singlePipe, VENUS_DMA_ALIGNMENT); + sao_bin_buffer_size = (64 * (((width + BUFFER_ALIGNMENT_SIZE(32)) * + (height + BUFFER_ALIGNMENT_SIZE(32))) >> 10)) + 384; + padded_bin_size = ALIGN(size_singlePipe, VENUS_DMA_ALIGNMENT); + size_singlePipe = sao_bin_buffer_size + padded_bin_size; + size_singlePipe = ALIGN(size_singlePipe, VENUS_DMA_ALIGNMENT); + bitbin_size = size_singlePipe * num_vpp_pipes; + size = ALIGN(bitbin_size, VENUS_DMA_ALIGNMENT) * total_bitbin_buffers + + 512; + + return size; +} + +static inline u32 calculate_h264e_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 work_mode, u32 num_vpp_pipes) +{ + return calculate_enc_scratch_size(inst, width, height, work_mode, 16, + num_vpp_pipes); +} + +static inline u32 calculate_h265e_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 work_mode, u32 num_vpp_pipes) +{ + return calculate_enc_scratch_size(inst, width, height, work_mode, 32, + num_vpp_pipes); +} + +static inline u32 calculate_vp8e_scratch_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 work_mode, u32 num_vpp_pipes) +{ + return calculate_enc_scratch_size(inst, width, height, work_mode, 16, + num_vpp_pipes); +} + +static inline u32 calculate_h264d_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 min_buf_count, bool split_mode_enabled, + u32 num_vpp_pipes) +{ + u32 co_mv_size = 0, nonco_mv_size = 0; + u32 vpss_lb_size = 0; + u32 size = 0; + + co_mv_size = hfi_iris2_h264d_comv_size(width, height, min_buf_count); + nonco_mv_size = hfi_iris2_h264d_non_comv_size(width, height, + num_vpp_pipes); + if (split_mode_enabled) + vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes); + size = co_mv_size + nonco_mv_size + vpss_lb_size; + return size; +} + +static inline u32 calculate_h265d_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 min_buf_count, bool split_mode_enabled, + u32 num_vpp_pipes) +{ + u32 co_mv_size = 0, nonco_mv_size = 0; + u32 vpss_lb_size = 0; + u32 size = 0; + + co_mv_size = hfi_iris2_h265d_comv_size(width, height, min_buf_count); + nonco_mv_size = + hfi_iris2_h265d_non_comv_size(width, height, num_vpp_pipes); + if (split_mode_enabled) + vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes); + + size = co_mv_size + nonco_mv_size + vpss_lb_size + + HDR10_HIST_EXTRADATA_SIZE; + return size; +} + +static inline u32 hfi_iris2_vp8d_comv_size(u32 width, u32 height, + u32 yuv_min_buf_count) +{ + return (((width + 15) >> 4) * ((height + 15) >> 4) * 8 * 2); +} + +static inline u32 calculate_vp8d_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 min_buf_count, bool split_mode_enabled, + u32 num_vpp_pipes) +{ + u32 vpss_lb_size = 0; + u32 size = 0; + + size = hfi_iris2_vp8d_comv_size(width, height, 0); + size += ALIGN(SIZE_VPXD_LB_FE_LEFT_CTRL(width, height), + VENUS_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_VPXD_LB_SE_LEFT_CTRL(width, height), + VENUS_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_VP8D_LB_VSP_TOP(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_VPXD_LB_FE_TOP_CTRL(width, height), + VENUS_DMA_ALIGNMENT) + + 2 * ALIGN(SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_VPXD_LB_SE_TOP_CTRL(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_VP8D_LB_PE_TOP_DATA(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_VP8D_LB_FE_TOP_DATA(width, height), + VENUS_DMA_ALIGNMENT); + if (split_mode_enabled) + vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes); + + size += vpss_lb_size; + return size; +} + +static inline u32 calculate_vp9d_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 min_buf_count, bool split_mode_enabled, + u32 num_vpp_pipes) +{ + u32 vpss_lb_size = 0; + u32 size = 0; + + size = ALIGN(SIZE_VPXD_LB_FE_LEFT_CTRL(width, height), + VENUS_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_VPXD_LB_SE_LEFT_CTRL(width, height), + VENUS_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_VP9D_LB_VSP_TOP(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_VPXD_LB_FE_TOP_CTRL(width, height), + VENUS_DMA_ALIGNMENT) + + 2 * ALIGN(SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_VPXD_LB_SE_TOP_CTRL(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_VP9D_LB_PE_TOP_DATA(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_VP9D_LB_FE_TOP_DATA(width, height), + VENUS_DMA_ALIGNMENT); + if (split_mode_enabled) + vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes); + + size += vpss_lb_size + HDR10_HIST_EXTRADATA_SIZE; + return size; +} + +static inline u32 calculate_mpeg2d_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 min_buf_count, bool split_mode_enabled, + u32 num_vpp_pipes) +{ + u32 vpss_lb_size = 0; + u32 size = 0; + + size = ALIGN(SIZE_VPXD_LB_FE_LEFT_CTRL(width, height), + VENUS_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_VPXD_LB_SE_LEFT_CTRL(width, height), + VENUS_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_VP8D_LB_VSP_TOP(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_VPXD_LB_FE_TOP_CTRL(width, height), + VENUS_DMA_ALIGNMENT) + + 2 * ALIGN(SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_VPXD_LB_SE_TOP_CTRL(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_VP8D_LB_PE_TOP_DATA(width, height), + VENUS_DMA_ALIGNMENT) + + ALIGN(SIZE_VP8D_LB_FE_TOP_DATA(width, height), + VENUS_DMA_ALIGNMENT); + if (split_mode_enabled) + vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes); + + size += vpss_lb_size; + return size; +} + +static inline u32 calculate_enc_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 lcu_size, u32 num_ref, bool ten_bit, + u32 num_vpp_pipes, bool is_h265) +{ + u32 line_buf_ctrl_size, line_buf_data_size, leftline_buf_ctrl_size; + u32 line_buf_sde_size, sps_pps_slice_hdr, topline_buf_ctrl_size_FE; + u32 leftline_buf_ctrl_size_FE, line_buf_recon_pix_size; + u32 leftline_buf_recon_pix_size, lambda_lut_size, override_buffer_size; + u32 col_mv_buf_size, vpp_reg_buffer_size, ir_buffer_size; + u32 vpss_line_buf, leftline_buf_meta_recony, col_rc_buf_size; + u32 h265e_framerc_bufsize, h265e_lcubitcnt_bufsize; + u32 h265e_lcubitmap_bufsize, se_stats_bufsize; + u32 bse_reg_buffer_size, bse_slice_cmd_buffer_size, slice_info_bufsize; + u32 line_buf_ctrl_size_buffid2, slice_cmd_buffer_size; + u32 width_lcu_num, height_lcu_num, width_coded, height_coded; + u32 frame_num_lcu, linebuf_meta_recon_uv, topline_bufsize_fe_1stg_sao; + u32 output_mv_bufsize = 0, temp_scratch_mv_bufsize = 0; + u32 size, bit_depth, num_LCUMB; + u32 vpss_lineBufferSize_1 = 0; + u32 width_mb_num = ((width + 15) >> 4); + u32 height_mb_num = ((height + 15) >> 4); + + width_lcu_num = ((width)+(lcu_size)-1) / (lcu_size); + height_lcu_num = ((height)+(lcu_size)-1) / (lcu_size); + frame_num_lcu = width_lcu_num * height_lcu_num; + width_coded = width_lcu_num * (lcu_size); + height_coded = height_lcu_num * (lcu_size); + num_LCUMB = (height_coded / lcu_size) * ((width_coded + lcu_size * 8) / lcu_size); + slice_info_bufsize = (256 + (frame_num_lcu << 4)); + slice_info_bufsize = ALIGN(slice_info_bufsize, VENUS_DMA_ALIGNMENT); + line_buf_ctrl_size = ALIGN(width_coded, VENUS_DMA_ALIGNMENT); + line_buf_ctrl_size_buffid2 = ALIGN(width_coded, VENUS_DMA_ALIGNMENT); + + bit_depth = ten_bit ? 10 : 8; + line_buf_data_size = (((((bit_depth * width_coded + 1024) + + (VENUS_DMA_ALIGNMENT - 1)) & (~(VENUS_DMA_ALIGNMENT - 1))) * 1) + + (((((bit_depth * width_coded + 1024) >> 1) + + (VENUS_DMA_ALIGNMENT - 1)) & + (~(VENUS_DMA_ALIGNMENT - 1))) * 2)); + leftline_buf_ctrl_size = (is_h265) ? + ((height_coded + (BUFFER_ALIGNMENT_SIZE(32))) / + BUFFER_ALIGNMENT_SIZE(32) * 4 * 16) : + ((height_coded + 15) / 16 * 5 * 16); + if (num_vpp_pipes > 1) { + leftline_buf_ctrl_size += BUFFER_ALIGNMENT_SIZE(512); + leftline_buf_ctrl_size = ALIGN(leftline_buf_ctrl_size, + BUFFER_ALIGNMENT_SIZE(512)) * num_vpp_pipes; + } + leftline_buf_ctrl_size = ALIGN(leftline_buf_ctrl_size, + VENUS_DMA_ALIGNMENT); + leftline_buf_recon_pix_size = (((ten_bit + 1) * 2 * + (height_coded)+VENUS_DMA_ALIGNMENT) + + (VENUS_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1) & + (~((VENUS_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1)) * 1; + topline_buf_ctrl_size_FE = (is_h265) ? (64 * (width_coded >> 5)) : + (VENUS_DMA_ALIGNMENT + 16 * (width_coded >> 4)); + topline_buf_ctrl_size_FE = ALIGN(topline_buf_ctrl_size_FE, + VENUS_DMA_ALIGNMENT); + leftline_buf_ctrl_size_FE = (((VENUS_DMA_ALIGNMENT + 64 * + (height_coded >> 4)) + + (VENUS_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1) & + (~((VENUS_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1)) * 1) * + num_vpp_pipes; + leftline_buf_meta_recony = (VENUS_DMA_ALIGNMENT + 64 * + ((height_coded) / (8 * (ten_bit ? 4 : 8)))); + leftline_buf_meta_recony = ALIGN(leftline_buf_meta_recony, + VENUS_DMA_ALIGNMENT); + leftline_buf_meta_recony = leftline_buf_meta_recony * + num_vpp_pipes; + linebuf_meta_recon_uv = (VENUS_DMA_ALIGNMENT + 64 * + ((height_coded) / (4 * (ten_bit ? 4 : 8)))); + linebuf_meta_recon_uv = ALIGN(linebuf_meta_recon_uv, + VENUS_DMA_ALIGNMENT); + linebuf_meta_recon_uv = linebuf_meta_recon_uv * + num_vpp_pipes; + line_buf_recon_pix_size = ((ten_bit ? 3 : 2) * width_coded); + line_buf_recon_pix_size = ALIGN(line_buf_recon_pix_size, + VENUS_DMA_ALIGNMENT); + slice_cmd_buffer_size = ALIGN(20480, VENUS_DMA_ALIGNMENT); + sps_pps_slice_hdr = 2048 + 4096; + col_mv_buf_size = (is_h265) ? (16 * ((frame_num_lcu << 2) + + BUFFER_ALIGNMENT_SIZE(32))) : + (3 * 16 * (width_lcu_num * height_lcu_num + + BUFFER_ALIGNMENT_SIZE(32))); + col_mv_buf_size = ALIGN(col_mv_buf_size, VENUS_DMA_ALIGNMENT) + * (num_ref + 1); + col_rc_buf_size = (((width_mb_num + 7) >> 3) * + 16 * 2 * height_mb_num); + col_rc_buf_size = ALIGN(col_rc_buf_size, + VENUS_DMA_ALIGNMENT) * HFI_MAX_COL_FRAME; + h265e_framerc_bufsize = (is_h265) ? (256 + 16 * + (14 + (((height_coded >> 5) + 7) >> 3))) : + (256 + 16 * (14 + (((height_coded >> 4) + 7) >> 3))); + h265e_framerc_bufsize *= 6; /* multiply by max numtilescol*/ + if (num_vpp_pipes > 1) + h265e_framerc_bufsize = ALIGN(h265e_framerc_bufsize, + VENUS_DMA_ALIGNMENT) * num_vpp_pipes; + + h265e_framerc_bufsize = ALIGN(h265e_framerc_bufsize, + BUFFER_ALIGNMENT_SIZE(512)) * HFI_MAX_COL_FRAME; + h265e_lcubitcnt_bufsize = (256 + 4 * frame_num_lcu); + h265e_lcubitcnt_bufsize = ALIGN(h265e_lcubitcnt_bufsize, + VENUS_DMA_ALIGNMENT); + h265e_lcubitmap_bufsize = 256 + (frame_num_lcu >> 3); + h265e_lcubitmap_bufsize = ALIGN(h265e_lcubitmap_bufsize, + VENUS_DMA_ALIGNMENT); + line_buf_sde_size = 256 + 16 * (width_coded >> 4); + line_buf_sde_size = ALIGN(line_buf_sde_size, VENUS_DMA_ALIGNMENT); + if ((width_coded * height_coded) > (4096 * 2160)) + se_stats_bufsize = 0; + else if ((width_coded * height_coded) > (1920 * 1088)) + se_stats_bufsize = (40 * 4 * frame_num_lcu + 256 + 256); + else + se_stats_bufsize = (1024 * frame_num_lcu + 256 + 256); + + se_stats_bufsize = ALIGN(se_stats_bufsize, VENUS_DMA_ALIGNMENT) * 2; + bse_slice_cmd_buffer_size = ((((8192 << 2) + 7) & (~7)) * 6); + bse_reg_buffer_size = ((((512 << 3) + 7) & (~7)) * 4); + vpp_reg_buffer_size = ((((HFI_VENUS_VPPSG_MAX_REGISTERS << 3) + 31) & + (~31)) * 10); + lambda_lut_size = (256 * 11); + override_buffer_size = 16 * ((num_LCUMB + 7) >> 3); + override_buffer_size = ALIGN(override_buffer_size, + VENUS_DMA_ALIGNMENT) * 2; + ir_buffer_size = (((frame_num_lcu << 1) + 7) & (~7)) * 3; + vpss_lineBufferSize_1 = ((((8192) >> 2) << 5) * num_vpp_pipes) + 64; + vpss_line_buf = (((((max(width_coded, height_coded) + 3) >> 2) << 5) + + 256) * 16) + vpss_lineBufferSize_1; + topline_bufsize_fe_1stg_sao = (16 * (width_coded >> 5)); + topline_bufsize_fe_1stg_sao = ALIGN(topline_bufsize_fe_1stg_sao, + VENUS_DMA_ALIGNMENT); + size = line_buf_ctrl_size + line_buf_data_size + + line_buf_ctrl_size_buffid2 + leftline_buf_ctrl_size + + vpss_line_buf + col_mv_buf_size + topline_buf_ctrl_size_FE + + leftline_buf_ctrl_size_FE + line_buf_recon_pix_size + + leftline_buf_recon_pix_size + leftline_buf_meta_recony + + linebuf_meta_recon_uv + col_rc_buf_size + + h265e_framerc_bufsize + h265e_lcubitcnt_bufsize + + h265e_lcubitmap_bufsize + line_buf_sde_size + + topline_bufsize_fe_1stg_sao + override_buffer_size + + bse_reg_buffer_size + vpp_reg_buffer_size + + sps_pps_slice_hdr + slice_cmd_buffer_size + + bse_slice_cmd_buffer_size + ir_buffer_size + slice_info_bufsize + + lambda_lut_size + se_stats_bufsize + temp_scratch_mv_bufsize + + output_mv_bufsize + 1024; + return size; +} + +static inline u32 calculate_h264e_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 num_ref, bool ten_bit, u32 num_vpp_pipes) +{ + return calculate_enc_scratch1_size(inst, width, height, 16, + num_ref, ten_bit, num_vpp_pipes, false); +} + +static inline u32 calculate_h265e_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 num_ref, bool ten_bit, u32 num_vpp_pipes) +{ + return calculate_enc_scratch1_size(inst, width, height, 32, + num_ref, ten_bit, num_vpp_pipes, true); +} + +static inline u32 calculate_vp8e_scratch1_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 num_ref, bool ten_bit, u32 num_vpp_pipes) +{ + (void)num_vpp_pipes; + return calculate_enc_scratch1_size(inst, width, height, 16, + num_ref, ten_bit, 1, false); +} + +static inline u32 hfi_ubwc_calc_metadata_plane_stride(u32 width, + u32 metadata_stride_multi, u32 tile_width_pels) +{ + return ALIGN(((width + (tile_width_pels - 1)) / tile_width_pels), + metadata_stride_multi); +} + +static inline u32 hfi_ubwc_metadata_plane_bufheight(u32 height, + u32 metadata_height_multi, u32 tile_height_pels) +{ + return ALIGN(((height + (tile_height_pels - 1)) / tile_height_pels), + metadata_height_multi); +} + +static inline u32 hfi_ubwc_metadata_plane_buffer_size(u32 metadata_stride, + u32 metadata_buf_height) +{ + return ALIGN(metadata_stride * metadata_buf_height, + BUFFER_ALIGNMENT_4096_BYTES); +} + +static inline u32 hfi_ubwc_uv_metadata_plane_stride(u32 width, + u32 metadata_stride_multi, u32 tile_width_pels) +{ + return ALIGN(((((width + 1) >> 1) + (tile_width_pels - 1)) / + tile_width_pels), metadata_stride_multi); +} + +static inline u32 hfi_ubwc_uv_metadata_plane_bufheight(u32 height, + u32 metadata_height_multi, u32 tile_height_pels) +{ + return ALIGN(((((height + 1) >> 1) + (tile_height_pels - 1)) / + tile_height_pels), metadata_height_multi); +} + +static inline u32 hfi_iris2_enc_dpb_buffer_size(u32 width, u32 height, + bool ten_bit) +{ + u32 aligned_width, aligned_height, chroma_height, ref_buf_height; + u32 luma_size, chroma_size; + u32 metadata_stride, meta_buf_height, meta_size_y, meta_size_c; + u32 ref_luma_stride_bytes, ref_chroma_height_bytes; + u32 ref_buf_size = 0, ref_stride; + u32 size; + + if (!ten_bit) { + aligned_height = ALIGN(height, HFI_VENUS_HEIGHT_ALIGNMENT); + chroma_height = height >> 1; + chroma_height = ALIGN(chroma_height, + HFI_VENUS_HEIGHT_ALIGNMENT); + aligned_width = ALIGN(width, HFI_VENUS_WIDTH_ALIGNMENT); + metadata_stride = hfi_ubwc_calc_metadata_plane_stride(width, + 64, HFI_COLOR_FORMAT_YUV420_NV12_UBWC_Y_TILE_WIDTH); + meta_buf_height = hfi_ubwc_metadata_plane_bufheight(height, + 16, HFI_COLOR_FORMAT_YUV420_NV12_UBWC_Y_TILE_HEIGHT); + meta_size_y = hfi_ubwc_metadata_plane_buffer_size( + metadata_stride, meta_buf_height); + meta_size_c = hfi_ubwc_metadata_plane_buffer_size( + metadata_stride, meta_buf_height); + size = (aligned_height + chroma_height) * aligned_width + + meta_size_y + meta_size_c; + } else { + ref_buf_height = (height + (HFI_VENUS_HEIGHT_ALIGNMENT - 1)) + & (~(HFI_VENUS_HEIGHT_ALIGNMENT - 1)); + ref_luma_stride_bytes = ((width + SYSTEM_LAL_TILE10 - 1) / + SYSTEM_LAL_TILE10) * SYSTEM_LAL_TILE10; + ref_stride = 4 * (ref_luma_stride_bytes / 3); + ref_stride = (ref_stride + (BUFFER_ALIGNMENT_SIZE(128) - 1)) & + (~(BUFFER_ALIGNMENT_SIZE(128) - 1)); + luma_size = ref_buf_height * ref_stride; + ref_chroma_height_bytes = (((height + 1) >> 1) + + (BUFFER_ALIGNMENT_SIZE(32) - 1)) & + (~(BUFFER_ALIGNMENT_SIZE(32) - 1)); + chroma_size = ref_stride * ref_chroma_height_bytes; + luma_size = (luma_size + (BUFFER_ALIGNMENT_4096_BYTES - 1)) & + (~(BUFFER_ALIGNMENT_4096_BYTES - 1)); + chroma_size = (chroma_size + + (BUFFER_ALIGNMENT_4096_BYTES - 1)) & + (~(BUFFER_ALIGNMENT_4096_BYTES - 1)); + ref_buf_size = luma_size + chroma_size; + metadata_stride = hfi_ubwc_calc_metadata_plane_stride( + width, + VENUS_METADATA_STRIDE_MULTIPLE, + HFI_COLOR_FORMAT_YUV420_TP10_UBWC_Y_TILE_WIDTH); + meta_buf_height = hfi_ubwc_metadata_plane_bufheight( + height, + VENUS_METADATA_HEIGHT_MULTIPLE, + HFI_COLOR_FORMAT_YUV420_TP10_UBWC_Y_TILE_HEIGHT); + meta_size_y = hfi_ubwc_metadata_plane_buffer_size( + metadata_stride, meta_buf_height); + meta_size_c = hfi_ubwc_metadata_plane_buffer_size( + metadata_stride, meta_buf_height); + size = ref_buf_size + meta_size_y + meta_size_c; + } + return size; +} + +static inline u32 calculate_enc_scratch2_size(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 num_ref, bool ten_bit, bool downscale, + u32 rotation_val, u32 flip) +{ + u32 size; + + size = hfi_iris2_enc_dpb_buffer_size(width, height, ten_bit); + size = size * (num_ref + 1) + 4096; + if (downscale && (rotation_val || flip)) { + /* VPSS output is always 128 x 32 (8-bit) or 192 x 16 (10-bit) aligned */ + if (rotation_val == 90 || rotation_val == 270) + size += hfi_iris2_enc_dpb_buffer_size(height, width, ten_bit); + else + size += hfi_iris2_enc_dpb_buffer_size(width, height, ten_bit); + size += 4096; + } + return size; +} + +static inline u32 calculate_enc_persist_size(void) +{ + return HFI_IRIS2_ENC_PERSIST_SIZE; +} + +static inline u32 calculate_h264d_persist1_size(void) +{ + u32 size = 0; + + size = ALIGN((SIZE_SLIST_BUF_H264 * NUM_SLIST_BUF_H264 + + NUM_HW_PIC_BUF * SIZE_SEI_USERDATA), + VENUS_DMA_ALIGNMENT); + return size; +} + +static inline u32 calculate_h265d_persist1_size(void) +{ + u32 size = 0; + + size = ALIGN((SIZE_SLIST_BUF_H265 * NUM_SLIST_BUF_H265 + H265_NUM_TILE + * sizeof(u32) + NUM_HW_PIC_BUF * SIZE_SEI_USERDATA), + VENUS_DMA_ALIGNMENT); + return size; +} + +static inline u32 calculate_vp8d_persist1_size(void) +{ + u32 size = 0; + + size = ALIGN(VP8_NUM_PROBABILITY_TABLE_BUF * VP8_PROB_TABLE_SIZE, + VENUS_DMA_ALIGNMENT); + return size; +} + +static inline u32 calculate_vp9d_persist1_size(void) +{ + u32 size = 0; + + size = ALIGN(VP9_NUM_PROBABILITY_TABLE_BUF * VP9_PROB_TABLE_SIZE, + VENUS_DMA_ALIGNMENT) + + ALIGN(HFI_IRIS2_VP9D_COMV_SIZE, VENUS_DMA_ALIGNMENT) + + ALIGN(MAX_SUPERFRAME_HEADER_LEN, VENUS_DMA_ALIGNMENT) + + ALIGN(VP9_UDC_HEADER_BUF_SIZE, VENUS_DMA_ALIGNMENT) + + ALIGN(VP9_NUM_FRAME_INFO_BUF * CCE_TILE_OFFSET_SIZE, + VENUS_DMA_ALIGNMENT); + return size; +} + +static inline u32 calculate_mpeg2d_persist1_size(void) +{ + return QMATRIX_SIZE + MP2D_QPDUMP_SIZE; +} diff --git a/techpack/video/msm/vidc/msm_vidc_buffer_calculations.h b/techpack/video/msm/vidc/msm_vidc_buffer_calculations.h new file mode 100644 index 000000000000..043199854830 --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_buffer_calculations.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __H_MSM_VIDC_BUFFER_MEM_DEFS_H__ +#define __H_MSM_VIDC_BUFFER_MEM_DEFS_H__ + +/* extra buffers in case of dcvs */ +#define DCVS_DEC_EXTRA_OUTPUT_BUFFERS 4 +#define DCVS_ENC_EXTRA_INPUT_BUFFERS 4 + +struct msm_vidc_dec_buff_size_calculators { + u32 (*calculate_scratch_size)(struct msm_vidc_inst *inst, u32 width, + u32 height, bool is_interlaced, u32 delay, u32 num_vpp_pipes); + u32 (*calculate_scratch1_size)(struct msm_vidc_inst *inst, u32 width, + u32 height, u32 min_buf_count, bool split_mode_enabled, + u32 num_vpp_pipes); + u32 (*calculate_persist1_size)(void); +}; + +struct msm_vidc_enc_buff_size_calculators { + u32 (*calculate_scratch_size)(struct msm_vidc_inst *inst, u32 width, + u32 height, u32 work_mode, u32 num_vpp_pipes); + u32 (*calculate_scratch1_size)(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 num_ref, bool ten_bit, + u32 num_vpp_pipes); + u32 (*calculate_scratch2_size)(struct msm_vidc_inst *inst, + u32 width, u32 height, u32 num_ref, bool ten_bit, bool downscale, + u32 rotation_val, u32 flip); + u32 (*calculate_persist_size)(void); +}; + +void msm_vidc_init_buffer_size_calculators(struct msm_vidc_inst *inst); +int msm_vidc_calculate_input_buffer_count(struct msm_vidc_inst *inst); +int msm_vidc_calculate_output_buffer_count(struct msm_vidc_inst *inst); +int msm_vidc_calculate_buffer_counts(struct msm_vidc_inst *inst); +int msm_vidc_get_extra_buff_count(struct msm_vidc_inst *inst, + enum hal_buffer buffer_type); +u32 msm_vidc_calculate_dec_input_frame_size(struct msm_vidc_inst *inst, + u32 buffer_size_limit); +u32 msm_vidc_calculate_dec_output_frame_size(struct msm_vidc_inst *inst); +u32 msm_vidc_calculate_dec_output_extra_size(struct msm_vidc_inst *inst); +u32 msm_vidc_calculate_enc_input_frame_size(struct msm_vidc_inst *inst); +u32 msm_vidc_calculate_enc_output_frame_size(struct msm_vidc_inst *inst); +u32 msm_vidc_calculate_enc_input_extra_size(struct msm_vidc_inst *inst); +u32 msm_vidc_calculate_enc_output_extra_size(struct msm_vidc_inst *inst); + +#endif // __H_MSM_VIDC_BUFFER_MEM_DEFS_H__ diff --git a/techpack/video/msm/vidc/msm_vidc_bus.h b/techpack/video/msm/vidc/msm_vidc_bus.h new file mode 100644 index 000000000000..43dc76ea9ed6 --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_bus.h @@ -0,0 +1,259 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __H_MSM_VIDC_BUS_DEFS_H__ +#define __H_MSM_VIDC_BUS_DEFS_H__ + +#include "fixedpoint.h" +#include "msm_vidc_debug.h" +#include "vidc_hfi_api.h" + +#define COMPRESSION_RATIO_MAX 5 + +enum vidc_bus_type { + PERF, + DDR, + LLCC, +}; + +/* + * Minimum dimensions for which to calculate bandwidth. + * This means that anything bandwidth(0, 0) == + * bandwidth(BASELINE_DIMENSIONS.width, BASELINE_DIMENSIONS.height) + */ +static const struct { + int height, width; +} BASELINE_DIMENSIONS = { + .width = 1280, + .height = 720, +}; + +/* converts Mbps to bps (the "b" part can be bits or bytes based on context) */ +#define kbps(__mbps) ((__mbps) * 1000) +#define bps(__mbps) (kbps(__mbps) * 1000) + +#define GENERATE_COMPRESSION_PROFILE(__bpp, __worst) { \ + .bpp = __bpp, \ + .ratio = __worst, \ +} + +/* + * The below table is a structural representation of the following table: + * Resolution | Bitrate | Compression Ratio | + * ............|............|.........................................| + * Width Height|Average High|Avg_8bpc Worst_8bpc Avg_10bpc Worst_10bpc| + * 1280 720| 7 14| 1.69 1.28 1.49 1.23| + * 1920 1080| 20 40| 1.69 1.28 1.49 1.23| + * 2560 1440| 32 64| 2.2 1.26 1.97 1.22| + * 3840 2160| 42 84| 2.2 1.26 1.97 1.22| + * 4096 2160| 44 88| 2.2 1.26 1.97 1.22| + * 4096 2304| 48 96| 2.2 1.26 1.97 1.22| + */ +static struct lut { + int frame_size; /* width x height */ + int frame_rate; + unsigned long bitrate; + struct { + int bpp; + fp_t ratio; + } compression_ratio[COMPRESSION_RATIO_MAX]; +} const LUT[] = { + { + .frame_size = 1280 * 720, + .frame_rate = 30, + .bitrate = 14, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 28, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 23, 100)), + } + }, + { + .frame_size = 1280 * 720, + .frame_rate = 60, + .bitrate = 22, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 28, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 23, 100)), + } + }, + { + .frame_size = 1920 * 1088, + .frame_rate = 30, + .bitrate = 40, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 28, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 23, 100)), + } + }, + { + .frame_size = 1920 * 1088, + .frame_rate = 60, + .bitrate = 64, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 28, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 23, 100)), + } + }, + { + .frame_size = 2560 * 1440, + .frame_rate = 30, + .bitrate = 64, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, + { + .frame_size = 2560 * 1440, + .frame_rate = 60, + .bitrate = 102, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, + { + .frame_size = 3840 * 2160, + .frame_rate = 30, + .bitrate = 84, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, + { + .frame_size = 3840 * 2160, + .frame_rate = 60, + .bitrate = 134, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, + { + .frame_size = 4096 * 2160, + .frame_rate = 30, + .bitrate = 88, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, + { + .frame_size = 4096 * 2160, + .frame_rate = 60, + .bitrate = 141, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, + { + .frame_size = 4096 * 2304, + .frame_rate = 30, + .bitrate = 96, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, + { + .frame_size = 4096 * 2304, + .frame_rate = 60, + .bitrate = 154, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, +}; + +static inline u32 get_type_frm_name(const char *name) +{ + if (!strcmp(name, "venus-llcc")) + return LLCC; + else if (!strcmp(name, "venus-ddr")) + return DDR; + else + return PERF; +} + +#define DUMP_HEADER_MAGIC 0xdeadbeef +#define DUMP_FP_FMT "%FP" /* special format for fp_t */ + +struct dump { + char *key; + char *format; + size_t val; +}; + +struct msm_vidc_bus_data { + unsigned long total_bw_ddr; + unsigned long total_bw_llcc; +}; + +int calc_bw_ar50lt(struct vidc_bus_vote_data *vidc_data); + +int calc_bw_iris1(struct vidc_bus_vote_data *vidc_data); + +int calc_bw_iris2(struct vidc_bus_vote_data *vidc_data); + +struct lut const *__lut(int width, int height, int fps); +fp_t __compression_ratio(struct lut const *entry, int bpp); +void __dump(struct dump dump[], int len, u32 sid); + +static inline bool __ubwc(u32 format) +{ + switch (format) { + case HFI_COLOR_FORMAT_NV12_UBWC: + case HFI_COLOR_FORMAT_YUV420_TP10_UBWC: + return true; + default: + return false; + } +} + +static inline int __bpp(u32 format, u32 sid) +{ + switch (format) { + case HFI_COLOR_FORMAT_NV12: + case HFI_COLOR_FORMAT_NV21: + case HFI_COLOR_FORMAT_NV12_UBWC: + case HFI_COLOR_FORMAT_RGBA8888_UBWC: + return 8; + case HFI_COLOR_FORMAT_YUV420_TP10_UBWC: + case HFI_COLOR_FORMAT_P010: + return 10; + default: + s_vpr_e(sid, "Unsupported colorformat (%x)", format); + return INT_MAX; + } +} + +#endif // __H_MSM_VIDC_BUS_DEFS_H__ diff --git a/techpack/video/msm/vidc/msm_vidc_bus_ar50lite.c b/techpack/video/msm/vidc/msm_vidc_bus_ar50lite.c new file mode 100644 index 000000000000..496e379083fb --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_bus_ar50lite.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#include "msm_vidc_bus.h" +#include "msm_vidc_internal.h" + +static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d) +{ + /* Encoder Parameters */ + int width, height, fps, bitrate, lcu_size, lcu_per_frame, + collocated_bytes_per_lcu, search_range_v, search_range_h, + vertical_tile_size, num_tiles; + + unsigned int bins_to_bit_factor; + fp_t y_bw; + bool is_h264_category = true; + fp_t orig_read_factor, recon_write_factor, + ref_y_read_factor, ref_c_read_factor, overhead_factor; + + /* Output parameters */ + fp_t orig_read, recon_write, + ref_y_read, ref_c_read, + bse_lb_read, bse_lb_write, + collocated_read, collocated_write, + bitstream_read, bitstream_write, + total_read, total_write, + total; + + unsigned long ret = 0; + + /* Encoder Fixed Parameters setup */ + search_range_h = 96; + search_range_v = 48; + bins_to_bit_factor = 4; + overhead_factor = FP(1, 3, 100); + orig_read_factor = FP(1, 50, 100); /* L + C */ + recon_write_factor = FP(1, 50, 100); /* L + C */ + ref_c_read_factor = FP(0, 75, 100); /* 1.5/2 ( 1.5 Cache efficiency )*/ + + fps = d->fps; + width = max(d->output_width, BASELINE_DIMENSIONS.width); + height = max(d->output_height, BASELINE_DIMENSIONS.height); + bitrate = d->bitrate > 0 ? (d->bitrate + 1000000 - 1) / 1000000 : + __lut(width, height, fps)->bitrate; + lcu_size = d->lcu_size; + + /* Derived Parameters Setup*/ + lcu_per_frame = DIV_ROUND_UP(width, lcu_size) * + DIV_ROUND_UP(height, lcu_size); + + if (d->codec == HAL_VIDEO_CODEC_HEVC || + d->codec == HAL_VIDEO_CODEC_VP9) { + /* H264, VP8, MPEG2 use the same settings */ + /* HEVC, VP9 use the same setting */ + is_h264_category = false; + } + + collocated_bytes_per_lcu = lcu_size == 16 ? 16 : + lcu_size == 32 ? 64 : 256; + + if (width >= 1296 && width <= 1536) + vertical_tile_size = 768; + else + vertical_tile_size = 640; + + num_tiles = DIV_ROUND_UP(width, vertical_tile_size); + y_bw = fp_mult(fp_mult(FP_INT(width), FP_INT(height)), FP_INT(fps)); + y_bw = fp_div(y_bw, FP_INT(bps(1))); + + /* -1 for 1 less tile boundary penalty */ + ref_y_read_factor = (num_tiles - 1) * 2; + ref_y_read_factor = fp_div(fp_mult(FP_INT(ref_y_read_factor), + FP_INT(search_range_h)), FP_INT(width)); + ref_y_read_factor = ref_y_read_factor + FP_INT(1); + + orig_read = fp_mult(y_bw, orig_read_factor); + recon_write = fp_mult(y_bw, recon_write_factor); + ref_y_read = fp_mult(y_bw, ref_y_read_factor); + ref_c_read = fp_mult(y_bw, ref_c_read_factor); + + bse_lb_read = fp_div(FP_INT(16 * fps * lcu_per_frame), + FP_INT(bps(1))); + bse_lb_write = bse_lb_read; + + collocated_read = fp_div(FP_INT(lcu_per_frame * + collocated_bytes_per_lcu * fps), FP_INT(bps(1))); + collocated_write = collocated_read; + + bitstream_read = fp_mult(fp_div(FP_INT(bitrate), FP_INT(8)), + FP_INT(bins_to_bit_factor)); + bitstream_write = fp_div(FP_INT(bitrate), FP_INT(8)); + bitstream_write = bitstream_write + bitstream_read; + + total_read = orig_read + ref_y_read + ref_c_read + + bse_lb_read + collocated_read + bitstream_read; + total_write = recon_write + bse_lb_write + + collocated_write + bitstream_write; + + total = total_read + total_write; + total = fp_mult(total, overhead_factor); + + if (msm_vidc_debug & VIDC_BUS) { + struct dump dump[] = { + {"ENCODER PARAMETERS", "", DUMP_HEADER_MAGIC}, + {"width", "%d", width}, + {"height", "%d", height}, + {"fps", "%d", fps}, + {"bitrate (Mbit/sec)", "%lu", bitrate}, + {"lcu size", "%d", lcu_size}, + {"collocated byter per lcu", "%d", collocated_bytes_per_lcu}, + {"horizontal search range", "%d", search_range_h}, + {"vertical search range", "%d", search_range_v}, + {"bins to bit factor", "%d", bins_to_bit_factor}, + + {"DERIVED PARAMETERS", "", DUMP_HEADER_MAGIC}, + {"lcu/frame", "%d", lcu_per_frame}, + {"vertical tile size", "%d", vertical_tile_size}, + {"number of tiles", "%d", num_tiles}, + {"Y BW", DUMP_FP_FMT, y_bw}, + + {"original read factor", DUMP_FP_FMT, orig_read_factor}, + {"recon write factor", DUMP_FP_FMT, recon_write_factor}, + {"ref read Y factor", DUMP_FP_FMT, ref_y_read_factor}, + {"ref read C factor", DUMP_FP_FMT, ref_c_read_factor}, + {"overhead_factor", DUMP_FP_FMT, overhead_factor}, + + {"INTERMEDIATE B/W DDR", "", DUMP_HEADER_MAGIC}, + {"orig read", DUMP_FP_FMT, orig_read}, + {"recon write", DUMP_FP_FMT, recon_write}, + {"ref read Y", DUMP_FP_FMT, ref_y_read}, + {"ref read C", DUMP_FP_FMT, ref_c_read}, + {"BSE lb read", DUMP_FP_FMT, bse_lb_read}, + {"BSE lb write", DUMP_FP_FMT, bse_lb_write}, + {"collocated read", DUMP_FP_FMT, collocated_read}, + {"collocated write", DUMP_FP_FMT, collocated_write}, + {"bitstream read", DUMP_FP_FMT, bitstream_read}, + {"bitstream write", DUMP_FP_FMT, bitstream_write}, + {"total read", DUMP_FP_FMT, total_read}, + {"total write", DUMP_FP_FMT, total_write}, + {"total", DUMP_FP_FMT, total}, + }; + __dump(dump, ARRAY_SIZE(dump), d->sid); + } + + + d->calc_bw_ddr = kbps(fp_round(total)); + + return ret; +} + +static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d) +{ + /* Decoder parameters */ + int width, height, fps, bitrate, lcu_size, + lcu_per_frame, collocated_bytes_per_lcu, + motion_complexity; + + unsigned int bins_to_bits_factor, vsp_read_factor; + fp_t y_bw; + bool is_h264_category = true; + fp_t recon_write_factor, ref_read_factor, + opb_factor, overhead_factor; + + /* Output parameters */ + fp_t opb_write, recon_write, + ref_read, + bse_lb_read, bse_lb_write, + collocated_read, collocated_write, + bitstream_read, bitstream_write, + total_read, total_write, + total; + + unsigned long ret = 0; + + /* Decoder Fixed Parameters */ + overhead_factor = FP(1, 3, 100); + recon_write_factor = FP(1, 50, 100); /* L + C */ + opb_factor = FP(1, 50, 100); /* L + C */ + motion_complexity = 5; /* worst case complexity */ + bins_to_bits_factor = 4; + vsp_read_factor = 6; + + fps = d->fps; + width = max(d->output_width, BASELINE_DIMENSIONS.width); + height = max(d->output_height, BASELINE_DIMENSIONS.height); + bitrate = d->bitrate > 0 ? (d->bitrate + 1000000 - 1) / 1000000 : + __lut(width, height, fps)->bitrate; + lcu_size = d->lcu_size; + + /* Derived Parameters Setup*/ + lcu_per_frame = DIV_ROUND_UP(width, lcu_size) * + DIV_ROUND_UP(height, lcu_size); + + if (d->codec == HAL_VIDEO_CODEC_HEVC || + d->codec == HAL_VIDEO_CODEC_VP9) { + /* H264, VP8, MPEG2 use the same settings */ + /* HEVC, VP9 use the same setting */ + is_h264_category = false; + } + + collocated_bytes_per_lcu = lcu_size == 16 ? 16 : + lcu_size == 32 ? 64 : 256; + + y_bw = fp_mult(fp_mult(FP_INT(width), FP_INT(height)), FP_INT(fps)); + y_bw = fp_div(y_bw, FP_INT(bps(1))); + + ref_read_factor = FP(1, 50, 100); /* L + C */ + ref_read_factor = fp_mult(ref_read_factor, FP_INT(motion_complexity)); + + recon_write = fp_mult(y_bw, recon_write_factor); + ref_read = fp_mult(y_bw, ref_read_factor); + + if (d->codec == HAL_VIDEO_CODEC_HEVC) + bse_lb_read = FP_INT(lcu_size == 32 ? 64 : + lcu_size == 16 ? 32 : 128); + else + bse_lb_read = FP_INT(128); + bse_lb_read = fp_div(fp_mult(FP_INT(lcu_per_frame * fps), bse_lb_read), + FP_INT(bps(1))); + bse_lb_write = bse_lb_read; + + collocated_read = fp_div(FP_INT(lcu_per_frame * + collocated_bytes_per_lcu * fps), FP_INT(bps(1))); + collocated_write = collocated_read; + + bitstream_read = fp_mult(fp_div(FP_INT(bitrate), FP_INT(8)), + FP_INT(vsp_read_factor)); + bitstream_write = fp_mult(fp_div(FP_INT(bitrate), FP_INT(8)), + FP_INT(bins_to_bits_factor)); + + opb_write = fp_mult(y_bw, opb_factor); + + total_read = ref_read + bse_lb_read + collocated_read + + bitstream_read; + total_write = recon_write + bse_lb_write + bitstream_write + + opb_write; + + total = total_read + total_write; + total = fp_mult(total, overhead_factor); + + if (msm_vidc_debug & VIDC_BUS) { + struct dump dump[] = { + {"DECODER PARAMETERS", "", DUMP_HEADER_MAGIC}, + {"width", "%d", width}, + {"height", "%d", height}, + {"fps", "%d", fps}, + {"bitrate (Mbit/sec)", "%lu", bitrate}, + {"lcu size", "%d", lcu_size}, + {"collocated byter per lcu", "%d", collocated_bytes_per_lcu}, + {"vsp read factor", "%d", vsp_read_factor}, + {"bins to bits factor", "%d", bins_to_bits_factor}, + {"motion complexity", "%d", motion_complexity}, + + {"DERIVED PARAMETERS", "", DUMP_HEADER_MAGIC}, + {"lcu/frame", "%d", lcu_per_frame}, + {"Y BW", DUMP_FP_FMT, y_bw}, + {"recon write factor", DUMP_FP_FMT, recon_write_factor}, + {"ref_read_factor", DUMP_FP_FMT, ref_read_factor}, + {"opb factor", DUMP_FP_FMT, opb_factor}, + {"overhead_factor", DUMP_FP_FMT, overhead_factor}, + + {"INTERMEDIATE B/W DDR", "", DUMP_HEADER_MAGIC}, + {"recon write", DUMP_FP_FMT, recon_write}, + {"ref read", DUMP_FP_FMT, ref_read}, + {"BSE lb read", DUMP_FP_FMT, bse_lb_read}, + {"BSE lb write", DUMP_FP_FMT, bse_lb_write}, + {"collocated read", DUMP_FP_FMT, collocated_read}, + {"collocated write", DUMP_FP_FMT, collocated_write}, + {"bitstream read", DUMP_FP_FMT, bitstream_read}, + {"bitstream write", DUMP_FP_FMT, bitstream_write}, + {"opb write", DUMP_FP_FMT, opb_write}, + {"total read", DUMP_FP_FMT, total_read}, + {"total write", DUMP_FP_FMT, total_write}, + {"total", DUMP_FP_FMT, total}, + }; + __dump(dump, ARRAY_SIZE(dump), d->sid); + } + + d->calc_bw_ddr = kbps(fp_round(total)); + + return ret; +} + +static unsigned long __calculate(struct vidc_bus_vote_data *d) +{ + unsigned long value = 0; + + switch (d->domain) { + case HAL_VIDEO_DOMAIN_ENCODER: + value = __calculate_encoder(d); + break; + case HAL_VIDEO_DOMAIN_DECODER: + value = __calculate_decoder(d); + break; + default: + s_vpr_e(d->sid, "Unknown Domain %#x", d->domain); + } + + return value; +} + +int calc_bw_ar50lt(struct vidc_bus_vote_data *vidc_data) +{ + int ret = 0; + + if (!vidc_data) + return ret; + + ret = __calculate(vidc_data); + + return ret; +} diff --git a/techpack/video/msm/vidc/msm_vidc_bus_iris2.c b/techpack/video/msm/vidc/msm_vidc_bus_iris2.c new file mode 100644 index 000000000000..187820fc41a5 --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_bus_iris2.c @@ -0,0 +1,570 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + */ + +#include "msm_vidc_bus.h" +#include "msm_vidc_internal.h" + +static unsigned long __calculate_vpe(struct vidc_bus_vote_data *d) +{ + return 0; +} + +static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d) +{ + /* + * XXX: Don't fool around with any of the hardcoded numbers unless you + * know /exactly/ what you're doing. Many of these numbers are + * measured heuristics and hardcoded numbers taken from the firmware. + */ + /* Decoder parameters */ + int width, height, lcu_size, fps, dpb_bpp; + bool unified_dpb_opb, dpb_compression_enabled = true, + opb_compression_enabled = false, + llc_ref_read_l2_cache_enabled = false, + llc_top_line_buf_enabled = false; + fp_t dpb_read_compression_factor, dpb_opb_scaling_ratio, + dpb_write_compression_factor, opb_write_compression_factor, + qsmmu_bw_overhead_factor; + bool is_h264_category = true; + + /* Derived parameters */ + int lcu_per_frame, collocated_bytes_per_lcu, tnbr_per_lcu; + unsigned long bitrate; + unsigned int num_vpp_pipes; + + fp_t bins_to_bit_factor, vsp_read_factor, vsp_write_factor, + dpb_factor, dpb_write_factor, y_bw_no_ubwc_8bpp; + fp_t y_bw_no_ubwc_10bpp = 0, y_bw_10bpp_p010 = 0, + motion_vector_complexity = 0; + fp_t dpb_total = 0; + + /* Output parameters */ + struct { + fp_t vsp_read, vsp_write, collocated_read, collocated_write, + dpb_read, dpb_write, opb_read, opb_write, + line_buffer_read, line_buffer_write, + total; + } ddr = {0}; + + struct { + fp_t dpb_read, line_buffer_read, line_buffer_write, total; + } llc = {0}; + + unsigned long ret = 0; + unsigned int integer_part, frac_part; + + width = max(d->input_width, BASELINE_DIMENSIONS.width); + height = max(d->input_height, BASELINE_DIMENSIONS.height); + + fps = d->fps; + + lcu_size = d->lcu_size; + + dpb_bpp = __bpp(d->color_formats[0], d->sid); + + unified_dpb_opb = d->num_formats == 1; + + dpb_opb_scaling_ratio = fp_div(FP_INT(d->input_width * d->input_height), + FP_INT(d->output_width * d->output_height)); + + opb_compression_enabled = d->num_formats >= 2 && + __ubwc(d->color_formats[1]); + + integer_part = Q16_INT(d->compression_ratio); + frac_part = Q16_FRAC(d->compression_ratio); + dpb_read_compression_factor = FP(integer_part, frac_part, 100); + + integer_part = Q16_INT(d->complexity_factor); + frac_part = Q16_FRAC(d->complexity_factor); + motion_vector_complexity = FP(integer_part, frac_part, 100); + + dpb_write_compression_factor = dpb_read_compression_factor; + opb_write_compression_factor = opb_compression_enabled ? + dpb_write_compression_factor : FP_ONE; + + num_vpp_pipes = d->num_vpp_pipes; + + if (d->codec == HAL_VIDEO_CODEC_HEVC || + d->codec == HAL_VIDEO_CODEC_VP9) { + /* H264, VP8, MPEG2 use the same settings */ + /* HEVC, VP9 use the same setting */ + is_h264_category = false; + } + if (d->use_sys_cache) { + llc_ref_read_l2_cache_enabled = true; + if (is_h264_category) + llc_top_line_buf_enabled = true; + } + + /* Derived parameters setup */ + lcu_per_frame = DIV_ROUND_UP(width, lcu_size) * + DIV_ROUND_UP(height, lcu_size); + + bitrate = DIV_ROUND_UP(d->bitrate, 1000000); + + bins_to_bit_factor = FP_INT(4); + + vsp_write_factor = bins_to_bit_factor; + vsp_read_factor = bins_to_bit_factor + FP_INT(2); + + collocated_bytes_per_lcu = lcu_size == 16 ? 16 : + lcu_size == 32 ? 64 : 256; + + dpb_factor = FP(1, 50, 100); + dpb_write_factor = FP(1, 5, 100); + + /* Ref A: This change is applicable for all + * IRIS2 targets, but currently being done for + * 1 pipe only due to timeline constraints. + */ + if (num_vpp_pipes == 1) + tnbr_per_lcu = lcu_size == 16 ? 64 : + lcu_size == 32 ? 64 : 128; + else + tnbr_per_lcu = lcu_size == 16 ? 128 : + lcu_size == 32 ? 64 : 128; + + /* .... For DDR & LLC ...... */ + ddr.vsp_read = fp_div(fp_mult(FP_INT(bitrate), + vsp_read_factor), FP_INT(8)); + ddr.vsp_write = fp_div(fp_mult(FP_INT(bitrate), + vsp_write_factor), FP_INT(8)); + + ddr.collocated_read = fp_div(FP_INT(lcu_per_frame * + collocated_bytes_per_lcu * fps), FP_INT(bps(1))); + ddr.collocated_write = ddr.collocated_read; + + y_bw_no_ubwc_8bpp = fp_div(FP_INT(width * height * fps), + FP_INT(1000 * 1000)); + + if (dpb_bpp != 8) { + y_bw_no_ubwc_10bpp = + fp_div(fp_mult(y_bw_no_ubwc_8bpp, FP_INT(256)), + FP_INT(192)); + y_bw_10bpp_p010 = y_bw_no_ubwc_8bpp * 2; + } + + ddr.dpb_read = dpb_bpp == 8 ? y_bw_no_ubwc_8bpp : y_bw_no_ubwc_10bpp; + ddr.dpb_read = fp_div(fp_mult(ddr.dpb_read, + fp_mult(dpb_factor, motion_vector_complexity)), + dpb_read_compression_factor); + + ddr.dpb_write = dpb_bpp == 8 ? y_bw_no_ubwc_8bpp : y_bw_no_ubwc_10bpp; + ddr.dpb_write = fp_div(fp_mult(ddr.dpb_write, + fp_mult(dpb_factor, dpb_write_factor)), + dpb_write_compression_factor); + + dpb_total = ddr.dpb_read + ddr.dpb_write; + + if (llc_ref_read_l2_cache_enabled) { + ddr.dpb_read = fp_div(ddr.dpb_read, is_h264_category ? + FP(1, 30, 100) : FP(1, 14, 100)); + llc.dpb_read = dpb_total - ddr.dpb_write - ddr.dpb_read; + } + + ddr.opb_read = FP_ZERO; + ddr.opb_write = unified_dpb_opb ? FP_ZERO : (dpb_bpp == 8 ? + y_bw_no_ubwc_8bpp : (opb_compression_enabled ? + y_bw_no_ubwc_10bpp : y_bw_10bpp_p010)); + ddr.opb_write = fp_div(fp_mult(dpb_factor, ddr.opb_write), + fp_mult(dpb_opb_scaling_ratio, opb_write_compression_factor)); + + ddr.line_buffer_read = + fp_div(FP_INT(tnbr_per_lcu * lcu_per_frame * fps), + FP_INT(bps(1))); + /* This change is applicable when 'Ref A' code change + * is missing. But currently being done for IRIS2 + * with 2 pipes only due to timeline constraints. + */ + if ((num_vpp_pipes == 2) && (is_h264_category)) + ddr.line_buffer_write = fp_div(ddr.line_buffer_read,FP_INT(2)); + else + ddr.line_buffer_write = ddr.line_buffer_read; + if (llc_top_line_buf_enabled) { + llc.line_buffer_read = ddr.line_buffer_read; + llc.line_buffer_write = ddr.line_buffer_write; + ddr.line_buffer_write = ddr.line_buffer_read = FP_ZERO; + } + + ddr.total = ddr.vsp_read + ddr.vsp_write + + ddr.collocated_read + ddr.collocated_write + + ddr.dpb_read + ddr.dpb_write + + ddr.opb_read + ddr.opb_write + + ddr.line_buffer_read + ddr.line_buffer_write; + + qsmmu_bw_overhead_factor = FP(1, 3, 100); + + ddr.total = fp_mult(ddr.total, qsmmu_bw_overhead_factor); + llc.total = llc.dpb_read + llc.line_buffer_read + + llc.line_buffer_write + ddr.total; + + /* Dump all the variables for easier debugging */ + if (msm_vidc_debug & VIDC_BUS) { + struct dump dump[] = { + {"DECODER PARAMETERS", "", DUMP_HEADER_MAGIC}, + {"lcu size", "%d", lcu_size}, + {"dpb bitdepth", "%d", dpb_bpp}, + {"frame rate", "%d", fps}, + {"dpb/opb unified", "%d", unified_dpb_opb}, + {"dpb/opb downscaling ratio", DUMP_FP_FMT, + dpb_opb_scaling_ratio}, + {"dpb compression", "%d", dpb_compression_enabled}, + {"opb compression", "%d", opb_compression_enabled}, + {"dpb read compression factor", DUMP_FP_FMT, + dpb_read_compression_factor}, + {"dpb write compression factor", DUMP_FP_FMT, + dpb_write_compression_factor}, + {"frame width", "%d", width}, + {"frame height", "%d", height}, + {"llc ref read l2 cache enabled", "%d", + llc_ref_read_l2_cache_enabled}, + {"llc top line buf enabled", "%d", + llc_top_line_buf_enabled}, + + {"DERIVED PARAMETERS (1)", "", DUMP_HEADER_MAGIC}, + {"lcus/frame", "%d", lcu_per_frame}, + {"bitrate (Mbit/sec)", "%d", bitrate}, + {"bins to bit factor", DUMP_FP_FMT, bins_to_bit_factor}, + {"dpb write factor", DUMP_FP_FMT, dpb_write_factor}, + {"vsp read factor", DUMP_FP_FMT, vsp_read_factor}, + {"vsp write factor", DUMP_FP_FMT, vsp_write_factor}, + {"tnbr/lcu", "%d", tnbr_per_lcu}, + {"collocated bytes/LCU", "%d", collocated_bytes_per_lcu}, + {"bw for NV12 8bpc)", DUMP_FP_FMT, y_bw_no_ubwc_8bpp}, + {"bw for NV12 10bpc)", DUMP_FP_FMT, y_bw_no_ubwc_10bpp}, + + {"DERIVED PARAMETERS (2)", "", DUMP_HEADER_MAGIC}, + {"mv complexity", DUMP_FP_FMT, motion_vector_complexity}, + {"qsmmu_bw_overhead_factor", DUMP_FP_FMT, + qsmmu_bw_overhead_factor}, + + {"INTERMEDIATE DDR B/W", "", DUMP_HEADER_MAGIC}, + {"vsp read", DUMP_FP_FMT, ddr.vsp_read}, + {"vsp write", DUMP_FP_FMT, ddr.vsp_write}, + {"collocated read", DUMP_FP_FMT, ddr.collocated_read}, + {"collocated write", DUMP_FP_FMT, ddr.collocated_write}, + {"line buffer read", DUMP_FP_FMT, ddr.line_buffer_read}, + {"line buffer write", DUMP_FP_FMT, ddr.line_buffer_write}, + {"opb read", DUMP_FP_FMT, ddr.opb_read}, + {"opb write", DUMP_FP_FMT, ddr.opb_write}, + {"dpb read", DUMP_FP_FMT, ddr.dpb_read}, + {"dpb write", DUMP_FP_FMT, ddr.dpb_write}, + {"dpb total", DUMP_FP_FMT, dpb_total}, + {"INTERMEDIATE LLC B/W", "", DUMP_HEADER_MAGIC}, + {"llc dpb read", DUMP_FP_FMT, llc.dpb_read}, + {"llc line buffer read", DUMP_FP_FMT, llc.line_buffer_read}, + {"llc line buffer write", DUMP_FP_FMT, llc.line_buffer_write}, + + }; + __dump(dump, ARRAY_SIZE(dump), d->sid); + } + + d->calc_bw_ddr = kbps(fp_round(ddr.total)); + d->calc_bw_llcc = kbps(fp_round(llc.total)); + + return ret; +} + +static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d) +{ + /* + * XXX: Don't fool around with any of the hardcoded numbers unless you + * know /exactly/ what you're doing. Many of these numbers are + * measured heuristics and hardcoded numbers taken from the firmware. + */ + /* Encoder Parameters */ + int width, height, fps, lcu_size, bitrate, lcu_per_frame, + collocated_bytes_per_lcu, tnbr_per_lcu, dpb_bpp, + original_color_format, vertical_tile_width, rotation; + bool work_mode_1, original_compression_enabled, + low_power, cropping_or_scaling, + b_frames_enabled = false, + llc_ref_chroma_cache_enabled = false, + llc_top_line_buf_enabled = false, + llc_vpss_rot_line_buf_enabled = false; + + unsigned int bins_to_bit_factor; + fp_t dpb_compression_factor, + original_compression_factor, + original_compression_factor_y, + y_bw_no_ubwc_8bpp, y_bw_no_ubwc_10bpp = 0, y_bw_10bpp_p010 = 0, + input_compression_factor, + downscaling_ratio, + ref_y_read_bw_factor, ref_cbcr_read_bw_factor, + recon_write_bw_factor, + total_ref_read_crcb, + qsmmu_bw_overhead_factor; + fp_t integer_part, frac_part; + unsigned int num_vpp_pipes; + unsigned long ret = 0; + + /* Output parameters */ + struct { + fp_t vsp_read, vsp_write, collocated_read, collocated_write, + ref_read_y, ref_read_crcb, ref_write, + ref_write_overlap, orig_read, + line_buffer_read, line_buffer_write, + total; + } ddr = {0}; + + struct { + fp_t ref_read_crcb, line_buffer, total; + } llc = {0}; + + /* Encoder Parameters setup */ + rotation = d->rotation; + cropping_or_scaling = false; + /* + * recon_write_bw_factor varies according to resolution and bit-depth, + * here use 1.08(1.075) for worst case. + * Similar for ref_y_read_bw_factor, it can reach 1.375 for worst case, + * here use 1.3 for average case, and can somewhat balance the + * worst case assumption for UBWC CR factors. + */ + recon_write_bw_factor = FP(1, 8, 100); + ref_y_read_bw_factor = FP(1, 30, 100); + ref_cbcr_read_bw_factor = FP(1, 50, 100); + + + /* Derived Parameters */ + num_vpp_pipes = d->num_vpp_pipes; + fps = d->fps; + width = max(d->output_width, BASELINE_DIMENSIONS.width); + height = max(d->output_height, BASELINE_DIMENSIONS.height); + downscaling_ratio = fp_div(FP_INT(d->input_width * d->input_height), + FP_INT(d->output_width * d->output_height)); + downscaling_ratio = max(downscaling_ratio, FP_ONE); + bitrate = d->bitrate > 0 ? DIV_ROUND_UP(d->bitrate, 1000000) : + __lut(width, height, fps)->bitrate; + lcu_size = d->lcu_size; + lcu_per_frame = DIV_ROUND_UP(width, lcu_size) * + DIV_ROUND_UP(height, lcu_size); + tnbr_per_lcu = 16; + + dpb_bpp = __bpp(d->color_formats[0], d->sid); + + y_bw_no_ubwc_8bpp = fp_div(FP_INT(width * height * fps), + FP_INT(1000 * 1000)); + + if (dpb_bpp != 8) { + y_bw_no_ubwc_10bpp = fp_div(fp_mult(y_bw_no_ubwc_8bpp, + FP_INT(256)), FP_INT(192)); + y_bw_10bpp_p010 = y_bw_no_ubwc_8bpp * 2; + } + + b_frames_enabled = d->b_frames_enabled; + if (num_vpp_pipes == 1 && b_frames_enabled) + vertical_tile_width = 480; + else if (num_vpp_pipes == 1 && !b_frames_enabled) + vertical_tile_width = 672; + else + vertical_tile_width = 960; + + original_color_format = d->num_formats >= 1 ? + d->color_formats[0] : HFI_COLOR_FORMAT_NV12_UBWC; + original_compression_enabled = __ubwc(original_color_format); + + work_mode_1 = d->work_mode == HFI_WORKMODE_1; + low_power = d->power_mode == VIDC_POWER_LOW; + bins_to_bit_factor = 4; + + if (d->use_sys_cache) { + llc_ref_chroma_cache_enabled = true; + llc_top_line_buf_enabled = true, + llc_vpss_rot_line_buf_enabled = true; + } + + integer_part = Q16_INT(d->compression_ratio); + frac_part = Q16_FRAC(d->compression_ratio); + dpb_compression_factor = FP(integer_part, frac_part, 100); + + integer_part = Q16_INT(d->input_cr); + frac_part = Q16_FRAC(d->input_cr); + input_compression_factor = FP(integer_part, frac_part, 100); + + original_compression_factor = original_compression_factor_y = + !original_compression_enabled ? FP_ONE : + __compression_ratio(__lut(width, height, fps), dpb_bpp); + /* use input cr if it is valid (not 1), otherwise use lut */ + if (original_compression_enabled && + input_compression_factor != FP_ONE) { + original_compression_factor = input_compression_factor; + /* Luma usually has lower compression factor than Chroma, + * input cf is overall cf, add 1.08 factor for Luma cf + */ + original_compression_factor_y = + input_compression_factor > FP(1, 8, 100) ? + fp_div(input_compression_factor, FP(1, 8, 100)) : + input_compression_factor; + } + + ddr.vsp_read = fp_div(FP_INT(bitrate * bins_to_bit_factor), FP_INT(8)); + ddr.vsp_write = ddr.vsp_read + fp_div(FP_INT(bitrate), FP_INT(8)); + + collocated_bytes_per_lcu = lcu_size == 16 ? 16 : + lcu_size == 32 ? 64 : 256; + + ddr.collocated_read = fp_div(FP_INT(lcu_per_frame * + collocated_bytes_per_lcu * fps), FP_INT(bps(1))); + + ddr.collocated_write = ddr.collocated_read; + + ddr.ref_read_y = dpb_bpp == 8 ? + y_bw_no_ubwc_8bpp : y_bw_no_ubwc_10bpp; + if (b_frames_enabled) + ddr.ref_read_y = ddr.ref_read_y * 2; + ddr.ref_read_y = fp_div(ddr.ref_read_y, dpb_compression_factor); + + ddr.ref_read_crcb = fp_mult((ddr.ref_read_y / 2), + ref_cbcr_read_bw_factor); + + if (width > vertical_tile_width) { + ddr.ref_read_y = fp_mult(ddr.ref_read_y, + ref_y_read_bw_factor); + } + + if (llc_ref_chroma_cache_enabled) { + total_ref_read_crcb = ddr.ref_read_crcb; + ddr.ref_read_crcb = fp_div(ddr.ref_read_crcb, + ref_cbcr_read_bw_factor); + llc.ref_read_crcb = total_ref_read_crcb - ddr.ref_read_crcb; + } + + ddr.ref_write = dpb_bpp == 8 ? y_bw_no_ubwc_8bpp : y_bw_no_ubwc_10bpp; + ddr.ref_write = fp_div(fp_mult(ddr.ref_write, FP(1, 50, 100)), + dpb_compression_factor); + + if (width > vertical_tile_width) { + ddr.ref_write_overlap = fp_mult(ddr.ref_write, + (recon_write_bw_factor - FP_ONE)); + ddr.ref_write = fp_mult(ddr.ref_write, recon_write_bw_factor); + } + + ddr.orig_read = dpb_bpp == 8 ? y_bw_no_ubwc_8bpp : + (original_compression_enabled ? y_bw_no_ubwc_10bpp : + y_bw_10bpp_p010); + ddr.orig_read = fp_div(fp_mult(fp_mult(ddr.orig_read, FP(1, 50, 100)), + downscaling_ratio), original_compression_factor); + if (rotation == 90 || rotation == 270) + ddr.orig_read *= lcu_size == 32 ? (dpb_bpp == 8 ? 1 : 3) : 2; + + ddr.line_buffer_read = + fp_div(FP_INT(tnbr_per_lcu * lcu_per_frame * fps), + FP_INT(bps(1))); + + ddr.line_buffer_write = ddr.line_buffer_read; + if (llc_top_line_buf_enabled) { + llc.line_buffer = ddr.line_buffer_read + ddr.line_buffer_write; + ddr.line_buffer_read = ddr.line_buffer_write = FP_ZERO; + } + + ddr.total = ddr.vsp_read + ddr.vsp_write + + ddr.collocated_read + ddr.collocated_write + + ddr.ref_read_y + ddr.ref_read_crcb + + ddr.ref_write + ddr.ref_write_overlap + + ddr.orig_read + + ddr.line_buffer_read + ddr.line_buffer_write; + + qsmmu_bw_overhead_factor = FP(1, 3, 100); + ddr.total = fp_mult(ddr.total, qsmmu_bw_overhead_factor); + llc.total = llc.ref_read_crcb + llc.line_buffer + ddr.total; + + if (msm_vidc_debug & VIDC_BUS) { + struct dump dump[] = { + {"ENCODER PARAMETERS", "", DUMP_HEADER_MAGIC}, + {"width", "%d", width}, + {"height", "%d", height}, + {"fps", "%d", fps}, + {"dpb bitdepth", "%d", dpb_bpp}, + {"input downscaling ratio", DUMP_FP_FMT, downscaling_ratio}, + {"rotation", "%d", rotation}, + {"cropping or scaling", "%d", cropping_or_scaling}, + {"low power mode", "%d", low_power}, + {"work Mode", "%d", work_mode_1}, + {"B frame enabled", "%d", b_frames_enabled}, + {"original frame format", "%#x", original_color_format}, + {"original compression enabled", "%d", + original_compression_enabled}, + {"dpb compression factor", DUMP_FP_FMT, + dpb_compression_factor}, + {"input compression factor", DUMP_FP_FMT, + input_compression_factor}, + {"llc ref chroma cache enabled", DUMP_FP_FMT, + llc_ref_chroma_cache_enabled}, + {"llc top line buf enabled", DUMP_FP_FMT, + llc_top_line_buf_enabled}, + {"llc vpss rot line buf enabled ", DUMP_FP_FMT, + llc_vpss_rot_line_buf_enabled}, + + {"DERIVED PARAMETERS", "", DUMP_HEADER_MAGIC}, + {"lcu size", "%d", lcu_size}, + {"bitrate (Mbit/sec)", "%lu", bitrate}, + {"bins to bit factor", "%u", bins_to_bit_factor}, + {"original compression factor", DUMP_FP_FMT, + original_compression_factor}, + {"original compression factor y", DUMP_FP_FMT, + original_compression_factor_y}, + {"qsmmu_bw_overhead_factor", + DUMP_FP_FMT, qsmmu_bw_overhead_factor}, + {"bw for NV12 8bpc)", DUMP_FP_FMT, y_bw_no_ubwc_8bpp}, + {"bw for NV12 10bpc)", DUMP_FP_FMT, y_bw_no_ubwc_10bpp}, + + {"INTERMEDIATE B/W DDR", "", DUMP_HEADER_MAGIC}, + {"vsp read", DUMP_FP_FMT, ddr.vsp_read}, + {"vsp write", DUMP_FP_FMT, ddr.vsp_write}, + {"collocated read", DUMP_FP_FMT, ddr.collocated_read}, + {"collocated write", DUMP_FP_FMT, ddr.collocated_write}, + {"ref read y", DUMP_FP_FMT, ddr.ref_read_y}, + {"ref read crcb", DUMP_FP_FMT, ddr.ref_read_crcb}, + {"ref write", DUMP_FP_FMT, ddr.ref_write}, + {"ref write overlap", DUMP_FP_FMT, ddr.ref_write_overlap}, + {"original read", DUMP_FP_FMT, ddr.orig_read}, + {"line buffer read", DUMP_FP_FMT, ddr.line_buffer_read}, + {"line buffer write", DUMP_FP_FMT, ddr.line_buffer_write}, + {"INTERMEDIATE LLC B/W", "", DUMP_HEADER_MAGIC}, + {"llc ref read crcb", DUMP_FP_FMT, llc.ref_read_crcb}, + {"llc line buffer", DUMP_FP_FMT, llc.line_buffer}, + }; + __dump(dump, ARRAY_SIZE(dump), d->sid); + } + + d->calc_bw_ddr = kbps(fp_round(ddr.total)); + d->calc_bw_llcc = kbps(fp_round(llc.total)); + + return ret; +} + +static unsigned long __calculate(struct vidc_bus_vote_data *d) +{ + unsigned long value = 0; + + switch (d->domain) { + case HAL_VIDEO_DOMAIN_VPE: + value = __calculate_vpe(d); + break; + case HAL_VIDEO_DOMAIN_ENCODER: + value = __calculate_encoder(d); + break; + case HAL_VIDEO_DOMAIN_DECODER: + value = __calculate_decoder(d); + break; + default: + s_vpr_e(d->sid, "Unknown Domain %#x", d->domain); + } + + return value; +} + +int calc_bw_iris2(struct vidc_bus_vote_data *vidc_data) +{ + int ret = 0; + + if (!vidc_data) + return ret; + + ret = __calculate(vidc_data); + + return ret; +} diff --git a/techpack/video/msm/vidc/msm_vidc_clocks.c b/techpack/video/msm/vidc/msm_vidc_clocks.c new file mode 100644 index 000000000000..168abc2f8080 --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_clocks.c @@ -0,0 +1,1545 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#include "msm_vidc_common.h" +#include "vidc_hfi_api.h" +#include "msm_vidc_debug.h" +#include "msm_vidc_clocks.h" +#include "msm_vidc_buffer_calculations.h" +#include "msm_vidc_bus.h" +#include "vidc_hfi.h" + +#define MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR (1 << 16) +#define MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR (4 << 16) + +#define MSM_VIDC_MIN_UBWC_COMPRESSION_RATIO (1 << 16) +#define MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO (5 << 16) + +#define MSM_VIDC_SESSION_INACTIVE_THRESHOLD_MS 1000 + +static int msm_vidc_decide_work_mode_ar50_lt(struct msm_vidc_inst *inst); +static unsigned long msm_vidc_calc_freq_ar50_lt(struct msm_vidc_inst *inst, + u32 filled_len); +static unsigned long msm_vidc_calc_freq_iris2(struct msm_vidc_inst *inst, + u32 filled_len); + +struct msm_vidc_core_ops core_ops_ar50_lt = { + .calc_freq = msm_vidc_calc_freq_ar50_lt, + .decide_work_route = NULL, + .decide_work_mode = msm_vidc_decide_work_mode_ar50_lt, + .decide_core_and_power_mode = + msm_vidc_decide_core_and_power_mode_ar50lt, + .calc_bw = calc_bw_ar50lt, +}; + +struct msm_vidc_core_ops core_ops_iris2 = { + .calc_freq = msm_vidc_calc_freq_iris2, + .decide_work_route = msm_vidc_decide_work_route_iris2, + .decide_work_mode = msm_vidc_decide_work_mode_iris2, + .decide_core_and_power_mode = msm_vidc_decide_core_and_power_mode_iris2, + .calc_bw = calc_bw_iris2, +}; + +static inline unsigned long get_ubwc_compression_ratio( + struct ubwc_cr_stats_info_type ubwc_stats_info) +{ + unsigned long sum = 0, weighted_sum = 0; + unsigned long compression_ratio = 0; + + weighted_sum = + 32 * ubwc_stats_info.cr_stats_info0 + + 64 * ubwc_stats_info.cr_stats_info1 + + 96 * ubwc_stats_info.cr_stats_info2 + + 128 * ubwc_stats_info.cr_stats_info3 + + 160 * ubwc_stats_info.cr_stats_info4 + + 192 * ubwc_stats_info.cr_stats_info5 + + 256 * ubwc_stats_info.cr_stats_info6; + + sum = + ubwc_stats_info.cr_stats_info0 + + ubwc_stats_info.cr_stats_info1 + + ubwc_stats_info.cr_stats_info2 + + ubwc_stats_info.cr_stats_info3 + + ubwc_stats_info.cr_stats_info4 + + ubwc_stats_info.cr_stats_info5 + + ubwc_stats_info.cr_stats_info6; + + compression_ratio = (weighted_sum && sum) ? + ((256 * sum) << 16) / weighted_sum : compression_ratio; + + return compression_ratio; +} + +bool res_is_less_than(u32 width, u32 height, + u32 ref_width, u32 ref_height) +{ + u32 num_mbs = NUM_MBS_PER_FRAME(height, width); + u32 max_side = max(ref_width, ref_height); + + if (num_mbs < NUM_MBS_PER_FRAME(ref_height, ref_width) && + width < max_side && + height < max_side) + return true; + else + return false; +} + +bool res_is_greater_than(u32 width, u32 height, + u32 ref_width, u32 ref_height) +{ + u32 num_mbs = NUM_MBS_PER_FRAME(height, width); + u32 max_side = max(ref_width, ref_height); + + if (num_mbs > NUM_MBS_PER_FRAME(ref_height, ref_width) || + width > max_side || + height > max_side) + return true; + else + return false; +} + +bool res_is_greater_than_or_equal_to(u32 width, u32 height, + u32 ref_width, u32 ref_height) +{ + u32 num_mbs = NUM_MBS_PER_FRAME(height, width); + u32 max_side = max(ref_width, ref_height); + + if (num_mbs >= NUM_MBS_PER_FRAME(ref_height, ref_width) || + width >= max_side || + height >= max_side) + return true; + else + return false; +} + +bool res_is_less_than_or_equal_to(u32 width, u32 height, + u32 ref_width, u32 ref_height) +{ + u32 num_mbs = NUM_MBS_PER_FRAME(height, width); + u32 max_side = max(ref_width, ref_height); + + if (num_mbs <= NUM_MBS_PER_FRAME(ref_height, ref_width) && + width <= max_side && + height <= max_side) + return true; + else + return false; +} + +bool is_vpp_delay_allowed(struct msm_vidc_inst *inst) +{ + u32 codec = get_v4l2_codec(inst); + u32 mbpf = msm_vidc_get_mbs_per_frame(inst); + + return (inst->core->resources.has_vpp_delay && + is_decode_session(inst) && + !is_thumbnail_session(inst) && + (mbpf >= NUM_MBS_PER_FRAME(7680, 3840)) && + (codec == V4L2_PIX_FMT_H264 + || codec == V4L2_PIX_FMT_HEVC)); +} + +int msm_vidc_get_mbs_per_frame(struct msm_vidc_inst *inst) +{ + int height, width; + struct v4l2_format *out_f; + struct v4l2_format *inp_f; + + out_f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + inp_f = &inst->fmts[INPUT_PORT].v4l2_fmt; + height = max(out_f->fmt.pix_mp.height, + inp_f->fmt.pix_mp.height); + width = max(out_f->fmt.pix_mp.width, + inp_f->fmt.pix_mp.width); + + return NUM_MBS_PER_FRAME(height, width); +} + +int msm_vidc_get_fps(struct msm_vidc_inst *inst) +{ + int fps; + + if (inst->clk_data.operating_rate > inst->clk_data.frame_rate) + fps = (inst->clk_data.operating_rate >> 16) ? + (inst->clk_data.operating_rate >> 16) : 1; + else + fps = inst->clk_data.frame_rate >> 16; + + return fps; +} + +static inline bool is_active_session(u64 prev, u64 curr) +{ + u64 ts_delta; + + if (!prev || !curr) + return true; + + ts_delta = (prev < curr) ? curr - prev : prev - curr; + + return ((ts_delta / NSEC_PER_MSEC) <= + MSM_VIDC_SESSION_INACTIVE_THRESHOLD_MS); +} + +void update_recon_stats(struct msm_vidc_inst *inst, + struct recon_stats_type *recon_stats) +{ + struct v4l2_ctrl *ctrl; + struct recon_buf *binfo; + u32 CR = 0, CF = 0; + u32 frame_size; + + if (inst->core->resources.ubwc_stats_in_fbd == 1) + return; + + /* do not consider recon stats in case of superframe */ + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_SUPERFRAME); + if (ctrl->val) + return; + + CR = get_ubwc_compression_ratio(recon_stats->ubwc_stats_info); + + frame_size = (msm_vidc_get_mbs_per_frame(inst) / (32 * 8) * 3) / 2; + + if (frame_size) + CF = recon_stats->complexity_number / frame_size; + else + CF = MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR; + mutex_lock(&inst->refbufs.lock); + list_for_each_entry(binfo, &inst->refbufs.list, list) { + if (binfo->buffer_index == + recon_stats->buffer_index) { + binfo->CR = CR; + binfo->CF = CF; + break; + } + } + mutex_unlock(&inst->refbufs.lock); +} + +static int fill_dynamic_stats(struct msm_vidc_inst *inst, + struct vidc_bus_vote_data *vote_data) +{ + struct recon_buf *binfo, *nextb; + struct vidc_input_cr_data *temp, *next; + u32 max_cr = MSM_VIDC_MIN_UBWC_COMPRESSION_RATIO; + u32 max_cf = MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR; + u32 max_input_cr = MSM_VIDC_MIN_UBWC_COMPRESSION_RATIO; + u32 min_cf = MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR; + u32 min_input_cr = MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO; + u32 min_cr = MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO; + + if (inst->core->resources.ubwc_stats_in_fbd == 1) { + mutex_lock(&inst->ubwc_stats_lock); + if (inst->ubwc_stats.is_valid == 1) { + min_cr = inst->ubwc_stats.worst_cr; + max_cf = inst->ubwc_stats.worst_cf; + min_input_cr = inst->ubwc_stats.worst_cr; + } + mutex_unlock(&inst->ubwc_stats_lock); + } else { + mutex_lock(&inst->refbufs.lock); + list_for_each_entry_safe(binfo, nextb, + &inst->refbufs.list, list) { + if (binfo->CR) { + min_cr = min(min_cr, binfo->CR); + max_cr = max(max_cr, binfo->CR); + } + if (binfo->CF) { + min_cf = min(min_cf, binfo->CF); + max_cf = max(max_cf, binfo->CF); + } + } + mutex_unlock(&inst->refbufs.lock); + + mutex_lock(&inst->input_crs.lock); + list_for_each_entry_safe(temp, next, + &inst->input_crs.list, list) { + min_input_cr = min(min_input_cr, temp->input_cr); + max_input_cr = max(max_input_cr, temp->input_cr); + } + mutex_unlock(&inst->input_crs.lock); + } + + /* Sanitize CF values from HW . */ + max_cf = min_t(u32, max_cf, MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR); + min_cf = max_t(u32, min_cf, MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR); + max_cr = min_t(u32, max_cr, MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO); + min_cr = max_t(u32, min_cr, MSM_VIDC_MIN_UBWC_COMPRESSION_RATIO); + max_input_cr = min_t(u32, + max_input_cr, MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO); + min_input_cr = max_t(u32, + min_input_cr, MSM_VIDC_MIN_UBWC_COMPRESSION_RATIO); + + vote_data->compression_ratio = min_cr; + vote_data->complexity_factor = max_cf; + vote_data->input_cr = min_input_cr; + + s_vpr_p(inst->sid, + "Input CR = %d Recon CR = %d Complexity Factor = %d\n", + vote_data->input_cr, vote_data->compression_ratio, + vote_data->complexity_factor); + + return 0; +} + +int msm_comm_set_buses(struct msm_vidc_core *core, u32 sid) +{ + int rc = 0; + struct msm_vidc_inst *inst = NULL; + struct hfi_device *hdev; + unsigned long total_bw_ddr = 0, total_bw_llcc = 0; + u64 curr_time_ns; + + if (!core || !core->device) { + s_vpr_e(sid, "%s: Invalid args: %pK\n", __func__, core); + return -EINVAL; + } + hdev = core->device; + curr_time_ns = ktime_get_ns(); + + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) { + struct msm_vidc_buffer *temp, *next; + u32 filled_len = 0; + u32 device_addr = 0; + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry_safe(temp, next, + &inst->registeredbufs.list, list) { + if (temp->vvb.vb2_buf.type == INPUT_MPLANE) { + filled_len = max(filled_len, + temp->vvb.vb2_buf.planes[0].bytesused); + device_addr = temp->smem[0].device_addr; + } + } + mutex_unlock(&inst->registeredbufs.lock); + + if (!filled_len || !device_addr) { + s_vpr_l(sid, "%s: no input\n", __func__); + continue; + } + + /* skip inactive session bus bandwidth */ + if (!is_active_session(inst->last_qbuf_time_ns, curr_time_ns)) { + inst->active = false; + continue; + } + + if (inst->bus_data.power_mode == VIDC_POWER_TURBO) { + total_bw_ddr = total_bw_llcc = INT_MAX; + break; + } + total_bw_ddr += inst->bus_data.calc_bw_ddr; + total_bw_llcc += inst->bus_data.calc_bw_llcc; + } + mutex_unlock(&core->lock); + + rc = call_hfi_op(hdev, vote_bus, hdev->hfi_device_data, + total_bw_ddr, total_bw_llcc, sid); + + return rc; +} + +int msm_comm_vote_bus(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct msm_vidc_core *core; + struct vidc_bus_vote_data *vote_data = NULL; + bool is_turbo = false; + struct v4l2_format *out_f; + struct v4l2_format *inp_f; + struct msm_vidc_buffer *temp, *next; + u32 filled_len = 0; + u32 device_addr = 0; + int codec = 0; + + if (!inst || !inst->core) { + d_vpr_e("%s: Invalid args: %pK\n", __func__, inst); + return -EINVAL; + } + core = inst->core; + vote_data = &inst->bus_data; + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry_safe(temp, next, + &inst->registeredbufs.list, list) { + if (temp->vvb.vb2_buf.type == INPUT_MPLANE) { + filled_len = max(filled_len, + temp->vvb.vb2_buf.planes[0].bytesused); + device_addr = temp->smem[0].device_addr; + } + if (inst->session_type == MSM_VIDC_ENCODER && + (temp->vvb.flags & V4L2_BUF_FLAG_PERF_MODE)) { + is_turbo = true; + } + } + mutex_unlock(&inst->registeredbufs.lock); + + if (!filled_len || !device_addr) { + s_vpr_l(inst->sid, "%s: no input\n", __func__); + return 0; + } + + vote_data->sid = inst->sid; + vote_data->domain = get_hal_domain(inst->session_type, inst->sid); + vote_data->power_mode = 0; + if (inst->clk_data.buffer_counter < DCVS_FTB_WINDOW) + vote_data->power_mode = VIDC_POWER_TURBO; + if (msm_vidc_clock_voting || is_turbo || is_turbo_session(inst)) + vote_data->power_mode = VIDC_POWER_TURBO; + + if (vote_data->power_mode != VIDC_POWER_TURBO) { + out_f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + inp_f = &inst->fmts[INPUT_PORT].v4l2_fmt; + switch (inst->session_type) { + case MSM_VIDC_DECODER: + codec = inp_f->fmt.pix_mp.pixelformat; + break; + case MSM_VIDC_ENCODER: + codec = out_f->fmt.pix_mp.pixelformat; + break; + default: + s_vpr_e(inst->sid, "%s: invalid session_type %#x\n", + __func__, inst->session_type); + break; + } + + vote_data->codec = get_hal_codec(codec, inst->sid); + vote_data->input_width = inp_f->fmt.pix_mp.width; + vote_data->input_height = inp_f->fmt.pix_mp.height; + vote_data->output_width = out_f->fmt.pix_mp.width; + vote_data->output_height = out_f->fmt.pix_mp.height; + vote_data->lcu_size = (codec == V4L2_PIX_FMT_HEVC || + codec == V4L2_PIX_FMT_VP9) ? 32 : 16; + + vote_data->fps = msm_vidc_get_fps(inst); + if (inst->session_type == MSM_VIDC_ENCODER) { + vote_data->bitrate = inst->clk_data.bitrate; + vote_data->rotation = + msm_comm_g_ctrl_for_id(inst, V4L2_CID_ROTATE); + vote_data->b_frames_enabled = + msm_comm_g_ctrl_for_id(inst, + V4L2_CID_MPEG_VIDEO_B_FRAMES) != 0; + /* scale bitrate if operating rate is larger than fps */ + if (vote_data->fps > (inst->clk_data.frame_rate >> 16) + && (inst->clk_data.frame_rate >> 16)) { + vote_data->bitrate = vote_data->bitrate / + (inst->clk_data.frame_rate >> 16) * + vote_data->fps; + } + } else if (inst->session_type == MSM_VIDC_DECODER) { + vote_data->bitrate = + filled_len * vote_data->fps * 8; + } + + if (msm_comm_get_stream_output_mode(inst) == + HAL_VIDEO_DECODER_PRIMARY) { + vote_data->color_formats[0] = + msm_comm_get_hfi_uncompressed( + inst->clk_data.opb_fourcc, inst->sid); + vote_data->num_formats = 1; + } else { + vote_data->color_formats[0] = + msm_comm_get_hfi_uncompressed( + inst->clk_data.dpb_fourcc, inst->sid); + vote_data->color_formats[1] = + msm_comm_get_hfi_uncompressed( + inst->clk_data.opb_fourcc, inst->sid); + vote_data->num_formats = 2; + } + vote_data->work_mode = inst->clk_data.work_mode; + fill_dynamic_stats(inst, vote_data); + + if (core->resources.sys_cache_res_set) + vote_data->use_sys_cache = true; + + vote_data->num_vpp_pipes = + inst->core->platform_data->num_vpp_pipes; + + call_core_op(core, calc_bw, vote_data); + } + + rc = msm_comm_set_buses(core, inst->sid); + + return rc; +} + +static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst, + unsigned long freq) +{ + int rc = 0; + int bufs_with_fw = 0; + struct msm_vidc_format *fmt; + struct clock_data *dcvs; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + + if (!inst->clk_data.dcvs_mode || inst->batch.enable) { + s_vpr_l(inst->sid, "Skip DCVS (dcvs %d, batching %d)\n", + inst->clk_data.dcvs_mode, inst->batch.enable); + inst->clk_data.dcvs_flags = 0; + return 0; + } + + dcvs = &inst->clk_data; + + if (is_decode_session(inst)) { + bufs_with_fw = msm_comm_num_queued_bufs(inst, OUTPUT_MPLANE); + fmt = &inst->fmts[OUTPUT_PORT]; + } else { + bufs_with_fw = msm_comm_num_queued_bufs(inst, INPUT_MPLANE); + fmt = &inst->fmts[INPUT_PORT]; + } + + /* +1 as one buffer is going to be queued after the function */ + bufs_with_fw += 1; + + /* + * DCVS decides clock level based on below algo + + * Limits : + * min_threshold : Buffers required for reference by FW. + * nom_threshold : Midpoint of Min and Max thresholds + * max_threshold : Min Threshold + DCVS extra buffers, allocated + * for smooth flow. + * 1) When buffers outside FW are reaching client's extra buffers, + * FW is slow and will impact pipeline, Increase clock. + * 2) When pending buffers with FW are less than FW requested, + * pipeline has cushion to absorb FW slowness, Decrease clocks. + * 3) When DCVS has engaged(Inc or Dec) and pending buffers with FW + * transitions past the nom_threshold, switch to calculated load. + * This smoothens the clock transitions. + * 4) Otherwise maintain previous Load config. + */ + + if (bufs_with_fw >= dcvs->max_threshold) { + dcvs->dcvs_flags = MSM_VIDC_DCVS_INCR; + } else if (bufs_with_fw < dcvs->min_threshold) { + dcvs->dcvs_flags = MSM_VIDC_DCVS_DECR; + } else if ((dcvs->dcvs_flags & MSM_VIDC_DCVS_DECR && + bufs_with_fw >= dcvs->nom_threshold) || + (dcvs->dcvs_flags & MSM_VIDC_DCVS_INCR && + bufs_with_fw <= dcvs->nom_threshold) || + (inst->session_type == MSM_VIDC_ENCODER && + dcvs->dcvs_flags & MSM_VIDC_DCVS_DECR && + bufs_with_fw >= dcvs->min_threshold)) + dcvs->dcvs_flags = 0; + + s_vpr_p(inst->sid, "DCVS: bufs_with_fw %d Th[%d %d %d] Flag %#x\n", + bufs_with_fw, dcvs->min_threshold, + dcvs->nom_threshold, dcvs->max_threshold, + dcvs->dcvs_flags); + + return rc; +} + +static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core, u32 sid) +{ + struct allowed_clock_rates_table *allowed_clks_tbl = NULL; + unsigned long freq = 0; + + allowed_clks_tbl = core->resources.allowed_clks_tbl; + freq = allowed_clks_tbl[0].clock_rate; + s_vpr_l(sid, "Max rate = %lu\n", freq); + return freq; +} + +void msm_comm_free_input_cr_table(struct msm_vidc_inst *inst) +{ + struct vidc_input_cr_data *temp, *next; + + mutex_lock(&inst->input_crs.lock); + list_for_each_entry_safe(temp, next, &inst->input_crs.list, list) { + list_del(&temp->list); + kfree(temp); + } + INIT_LIST_HEAD(&inst->input_crs.list); + mutex_unlock(&inst->input_crs.lock); +} + +void msm_comm_update_input_cr(struct msm_vidc_inst *inst, + u32 index, u32 cr) +{ + struct vidc_input_cr_data *temp, *next; + bool found = false; + + mutex_lock(&inst->input_crs.lock); + list_for_each_entry_safe(temp, next, &inst->input_crs.list, list) { + if (temp->index == index) { + temp->input_cr = cr; + found = true; + break; + } + } + + if (!found) { + temp = kzalloc(sizeof(*temp), GFP_KERNEL); + if (!temp) { + s_vpr_e(inst->sid, "%s: malloc failure.\n", __func__); + goto exit; + } + temp->index = index; + temp->input_cr = cr; + list_add_tail(&temp->list, &inst->input_crs.list); + } +exit: + mutex_unlock(&inst->input_crs.lock); +} + +static unsigned long msm_vidc_calc_freq_ar50_lt(struct msm_vidc_inst *inst, + u32 filled_len) +{ + u64 freq = 0, vpp_cycles = 0, vsp_cycles = 0; + u64 fw_cycles = 0, fw_vpp_cycles = 0; + u32 vpp_cycles_per_mb; + u32 mbs_per_second; + struct msm_vidc_core *core = NULL; + int i = 0; + struct allowed_clock_rates_table *allowed_clks_tbl = NULL; + u64 rate = 0, fps; + struct clock_data *dcvs = NULL; + + core = inst->core; + dcvs = &inst->clk_data; + + mbs_per_second = msm_comm_get_inst_load_per_core(inst, + LOAD_POWER); + + fps = msm_vidc_get_fps(inst); + + /* + * Calculate vpp, vsp cycles separately for encoder and decoder. + * Even though, most part is common now, in future it may change + * between them. + */ + + fw_cycles = fps * inst->core->resources.fw_cycles; + fw_vpp_cycles = fps * inst->core->resources.fw_vpp_cycles; + + if (inst->session_type == MSM_VIDC_ENCODER) { + vpp_cycles_per_mb = inst->flags & VIDC_LOW_POWER ? + inst->clk_data.entry->low_power_cycles : + inst->clk_data.entry->vpp_cycles; + + vpp_cycles = mbs_per_second * vpp_cycles_per_mb; + /* 21 / 20 is minimum overhead factor */ + vpp_cycles += max(vpp_cycles / 20, fw_vpp_cycles); + + vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles; + + /* 10 / 7 is overhead factor */ + vsp_cycles += (inst->clk_data.bitrate * 10) / 7; + } else if (inst->session_type == MSM_VIDC_DECODER) { + vpp_cycles = mbs_per_second * inst->clk_data.entry->vpp_cycles; + /* 21 / 20 is minimum overhead factor */ + vpp_cycles += max(vpp_cycles / 20, fw_vpp_cycles); + + vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles; + /* 10 / 7 is overhead factor */ + vsp_cycles += div_u64((fps * filled_len * 8 * 10), 7); + + } else { + s_vpr_e(inst->sid, "%s: Unknown session type\n", __func__); + return msm_vidc_max_freq(inst->core, inst->sid); + } + + freq = max(vpp_cycles, vsp_cycles); + freq = max(freq, fw_cycles); + + s_vpr_l(inst->sid, "Update DCVS Load\n"); + allowed_clks_tbl = core->resources.allowed_clks_tbl; + for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) { + rate = allowed_clks_tbl[i].clock_rate; + if (rate >= freq) + break; + } + + if (i < 0) + i = 0; + + s_vpr_p(inst->sid, "%s: Inst %pK : Filled Len = %d Freq = %llu\n", + __func__, inst, filled_len, freq); + + return (unsigned long) freq; +} + +static unsigned long msm_vidc_calc_freq_iris2(struct msm_vidc_inst *inst, + u32 filled_len) +{ + u64 vsp_cycles = 0, vpp_cycles = 0, fw_cycles = 0, freq = 0; + u64 fw_vpp_cycles = 0; + u32 vpp_cycles_per_mb; + u32 mbs_per_second; + struct msm_vidc_core *core = NULL; + u32 fps; + struct clock_data *dcvs = NULL; + u32 operating_rate, vsp_factor_num = 1, vsp_factor_den = 1; + u32 base_cycles = 0; + u32 codec = 0; + u64 bitrate = 0; + + core = inst->core; + dcvs = &inst->clk_data; + + mbs_per_second = msm_comm_get_inst_load_per_core(inst, + LOAD_POWER); + + fps = msm_vidc_get_fps(inst); + + /* + * Calculate vpp, vsp, fw cycles separately for encoder and decoder. + * Even though, most part is common now, in future it may change + * between them. + */ + + fw_cycles = fps * inst->core->resources.fw_cycles; + fw_vpp_cycles = fps * inst->core->resources.fw_vpp_cycles; + + if (inst->session_type == MSM_VIDC_ENCODER) { + vpp_cycles_per_mb = inst->flags & VIDC_LOW_POWER ? + inst->clk_data.entry->low_power_cycles : + inst->clk_data.entry->vpp_cycles; + + vpp_cycles = mbs_per_second * vpp_cycles_per_mb / + inst->clk_data.work_route; + /* Factor 1.25 for IbP and 1.375 for I1B2b1P GOP structure */ + if (is_hier_b_session(inst)) { + vpp_cycles += (vpp_cycles / 4) + (vpp_cycles / 8); + } else if (msm_comm_g_ctrl_for_id(inst, + V4L2_CID_MPEG_VIDEO_B_FRAMES)) { + vpp_cycles += vpp_cycles / 4; + } + + /* 21 / 20 is minimum overhead factor */ + vpp_cycles += max(div_u64(vpp_cycles, 20), fw_vpp_cycles); + /* 1.01 is multi-pipe overhead */ + if (inst->clk_data.work_route > 1) + vpp_cycles += div_u64(vpp_cycles, 100); + /* + * 1080p@480fps usecase needs exactly 338MHz + * without any margin left. Hence, adding 2 percent + * extra to bump it to next level (366MHz). + */ + if (fps == 480) + vpp_cycles += div_u64(vpp_cycles * 2, 100); + + /* + * Add 5 percent extra for 720p@960fps use case + * to bump it to next level (366MHz). + */ + if (fps == 960) + vpp_cycles += div_u64(vpp_cycles * 5, 100); + + /* VSP */ + /* bitrate is based on fps, scale it using operating rate */ + operating_rate = inst->clk_data.operating_rate >> 16; + if (operating_rate > (inst->clk_data.frame_rate >> 16) && + (inst->clk_data.frame_rate >> 16)) { + vsp_factor_num = operating_rate; + vsp_factor_den = inst->clk_data.frame_rate >> 16; + } + vsp_cycles = div_u64(((u64)inst->clk_data.bitrate * + vsp_factor_num), vsp_factor_den); + + codec = get_v4l2_codec(inst); + base_cycles = inst->clk_data.entry->vsp_cycles; + if (codec == V4L2_PIX_FMT_VP9) { + vsp_cycles = div_u64(vsp_cycles * 170, 100); + } else if (inst->entropy_mode == HFI_H264_ENTROPY_CABAC) { + vsp_cycles = div_u64(vsp_cycles * 135, 100); + } else { + base_cycles = 0; + vsp_cycles = div_u64(vsp_cycles, 2); + } + /* VSP FW Overhead 1.05 */ + vsp_cycles = div_u64(vsp_cycles * 21, 20); + + if (inst->clk_data.work_mode == HFI_WORKMODE_1) + vsp_cycles = vsp_cycles * 3; + + vsp_cycles += mbs_per_second * base_cycles; + + } else if (inst->session_type == MSM_VIDC_DECODER) { + /* VPP */ + vpp_cycles = mbs_per_second * inst->clk_data.entry->vpp_cycles / + inst->clk_data.work_route; + /* 21 / 20 is minimum overhead factor */ + vpp_cycles += max(vpp_cycles / 20, fw_vpp_cycles); + /* 1.059 is multi-pipe overhead */ + if (inst->clk_data.work_route > 1) + vpp_cycles += div_u64(vpp_cycles * 59, 1000); + + /* VSP */ + codec = get_v4l2_codec(inst); + base_cycles = inst->has_bframe ? + 80 : inst->clk_data.entry->vsp_cycles; + bitrate = fps * filled_len * 8; + vsp_cycles = bitrate; + + if (codec == V4L2_PIX_FMT_VP9) { + vsp_cycles = div_u64(vsp_cycles * 170, 100); + } else if (inst->entropy_mode == HFI_H264_ENTROPY_CABAC) { + vsp_cycles = div_u64(vsp_cycles * 135, 100); + } else { + base_cycles = 0; + vsp_cycles = div_u64(vsp_cycles, 2); + } + /* VSP FW Overhead 1.05 */ + vsp_cycles = div_u64(vsp_cycles * 21, 20); + + if (inst->clk_data.work_mode == HFI_WORKMODE_1) + vsp_cycles = vsp_cycles * 3; + + vsp_cycles += mbs_per_second * base_cycles; + + if (codec == V4L2_PIX_FMT_VP9 && + inst->clk_data.work_mode == HFI_WORKMODE_2 && + inst->clk_data.work_route == 4 && + bitrate > 90000000) + vsp_cycles = msm_vidc_max_freq(inst->core, inst->sid); + } else { + s_vpr_e(inst->sid, "%s: Unknown session type\n", __func__); + return msm_vidc_max_freq(inst->core, inst->sid); + } + + freq = max(vpp_cycles, vsp_cycles); + freq = max(freq, fw_cycles); + + s_vpr_p(inst->sid, "%s: inst %pK: filled len %d required freq %llu\n", + __func__, inst, filled_len, freq); + + return (unsigned long) freq; +} + +int msm_vidc_set_clocks(struct msm_vidc_core *core, u32 sid) +{ + struct hfi_device *hdev; + unsigned long freq_core_1 = 0, freq_core_2 = 0, rate = 0; + unsigned long freq_core_max = 0; + struct msm_vidc_inst *inst = NULL; + struct msm_vidc_buffer *temp, *next; + u32 device_addr, filled_len; + int rc = 0, i = 0; + struct allowed_clock_rates_table *allowed_clks_tbl = NULL; + bool increment, decrement; + u64 curr_time_ns; + + hdev = core->device; + curr_time_ns = ktime_get_ns(); + allowed_clks_tbl = core->resources.allowed_clks_tbl; + if (!allowed_clks_tbl) { + s_vpr_e(sid, "%s: Invalid parameters\n", __func__); + return -EINVAL; + } + + mutex_lock(&core->lock); + increment = false; + decrement = true; + list_for_each_entry(inst, &core->instances, list) { + device_addr = 0; + filled_len = 0; + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry_safe(temp, next, + &inst->registeredbufs.list, list) { + if (temp->vvb.vb2_buf.type == INPUT_MPLANE) { + filled_len = max(filled_len, + temp->vvb.vb2_buf.planes[0].bytesused); + device_addr = temp->smem[0].device_addr; + } + } + mutex_unlock(&inst->registeredbufs.lock); + + if (!filled_len || !device_addr) { + s_vpr_l(sid, "%s: no input\n", __func__); + continue; + } + + /* skip inactive session clock rate */ + if (!is_active_session(inst->last_qbuf_time_ns, curr_time_ns)) { + inst->active = false; + continue; + } + + if (inst->clk_data.core_id == VIDC_CORE_ID_1) + freq_core_1 += inst->clk_data.min_freq; + else if (inst->clk_data.core_id == VIDC_CORE_ID_2) + freq_core_2 += inst->clk_data.min_freq; + else if (inst->clk_data.core_id == VIDC_CORE_ID_3) { + freq_core_1 += inst->clk_data.min_freq; + freq_core_2 += inst->clk_data.min_freq; + } + + freq_core_max = max_t(unsigned long, freq_core_1, freq_core_2); + + if (msm_vidc_clock_voting) { + s_vpr_l(sid, "msm_vidc_clock_voting %d\n", + msm_vidc_clock_voting); + freq_core_max = msm_vidc_clock_voting; + decrement = false; + break; + } + + /* increment even if one session requested for it */ + if (inst->clk_data.dcvs_flags & MSM_VIDC_DCVS_INCR) + increment = true; + /* decrement only if all sessions requested for it */ + if (!(inst->clk_data.dcvs_flags & MSM_VIDC_DCVS_DECR)) + decrement = false; + } + + /* + * keep checking from lowest to highest rate until + * table rate >= requested rate + */ + for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) { + rate = allowed_clks_tbl[i].clock_rate; + if (rate >= freq_core_max) + break; + } + + if (i < 0) + i = 0; + + if (increment) { + if (i > 0) + rate = allowed_clks_tbl[i-1].clock_rate; + } else if (decrement) { + if (i < (int) (core->resources.allowed_clks_tbl_size - 1)) + rate = allowed_clks_tbl[i+1].clock_rate; + } + + core->min_freq = freq_core_max; + core->curr_freq = rate; + mutex_unlock(&core->lock); + + s_vpr_p(sid, + "%s: clock rate %lu requested %lu increment %d decrement %d\n", + __func__, core->curr_freq, core->min_freq, + increment, decrement); + rc = call_hfi_op(hdev, scale_clocks, + hdev->hfi_device_data, core->curr_freq, sid); + + return rc; +} + +int msm_comm_scale_clocks(struct msm_vidc_inst *inst) +{ + struct msm_vidc_buffer *temp, *next; + unsigned long freq = 0; + u32 filled_len = 0; + u32 device_addr = 0; + bool is_turbo = false; + + if (!inst || !inst->core) { + d_vpr_e("%s: Invalid args: Inst = %pK\n", __func__, inst); + return -EINVAL; + } + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) { + if (temp->vvb.vb2_buf.type == INPUT_MPLANE) { + filled_len = max(filled_len, + temp->vvb.vb2_buf.planes[0].bytesused); + if (temp->vvb.flags & V4L2_BUF_FLAG_PERF_MODE) + is_turbo = true; + device_addr = temp->smem[0].device_addr; + } + } + mutex_unlock(&inst->registeredbufs.lock); + + if (!filled_len || !device_addr) { + s_vpr_l(inst->sid, "%s: no input\n", __func__); + return 0; + } + + if (inst->clk_data.buffer_counter < DCVS_FTB_WINDOW || is_turbo || + is_turbo_session(inst)) { + inst->clk_data.min_freq = + msm_vidc_max_freq(inst->core, inst->sid); + inst->clk_data.dcvs_flags = 0; + } else if (msm_vidc_clock_voting) { + inst->clk_data.min_freq = msm_vidc_clock_voting; + inst->clk_data.dcvs_flags = 0; + } else { + freq = call_core_op(inst->core, calc_freq, inst, filled_len); + inst->clk_data.min_freq = freq; + msm_dcvs_scale_clocks(inst, freq); + } + + msm_vidc_set_clocks(inst->core, inst->sid); + + return 0; +} + +int msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst, bool do_bw_calc) +{ + struct msm_vidc_core *core; + struct hfi_device *hdev; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + core = inst->core; + hdev = core->device; + + if (!inst->active) { + /* do not skip bw voting for inactive -> active session */ + do_bw_calc = true; + inst->active = true; + } + + if (msm_comm_scale_clocks(inst)) { + s_vpr_e(inst->sid, + "Failed to scale clocks. May impact performance\n"); + } + + if (do_bw_calc) { + if (msm_comm_vote_bus(inst)) { + s_vpr_e(inst->sid, + "Failed to scale DDR bus. May impact perf\n"); + } + } + + return 0; +} + +int msm_dcvs_try_enable(struct msm_vidc_inst *inst) +{ + if (!inst || !inst->core) { + d_vpr_e("%s: Invalid args: %pK\n", __func__, inst); + return -EINVAL; + } + + inst->clk_data.dcvs_mode = + !(msm_vidc_clock_voting || + !inst->core->resources.dcvs || + inst->flags & VIDC_THUMBNAIL || + is_low_latency_hint(inst) || + inst->clk_data.low_latency_mode || + inst->batch.enable || + is_turbo_session(inst) || + inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ || + is_encode_batching(inst)); + + s_vpr_hp(inst->sid, "DCVS %s: %pK\n", + inst->clk_data.dcvs_mode ? "enabled" : "disabled", inst); + + return 0; +} + +void msm_dcvs_reset(struct msm_vidc_inst *inst) +{ + struct msm_vidc_format *fmt; + struct clock_data *dcvs; + + if (!inst) { + d_vpr_e("%s: Invalid params\n", __func__); + return; + } + + dcvs = &inst->clk_data; + if (inst->session_type == MSM_VIDC_ENCODER) { + fmt = &inst->fmts[INPUT_PORT]; + } else if (inst->session_type == MSM_VIDC_DECODER) { + fmt = &inst->fmts[OUTPUT_PORT]; + } else { + s_vpr_e(inst->sid, "%s: invalid session type %#x\n", + __func__, inst->session_type); + return; + } + + dcvs->min_threshold = fmt->count_min; + if (inst->session_type == MSM_VIDC_ENCODER) + dcvs->max_threshold = + min((fmt->count_min + DCVS_ENC_EXTRA_INPUT_BUFFERS), + fmt->count_actual); + else + dcvs->max_threshold = + min((fmt->count_min + DCVS_DEC_EXTRA_OUTPUT_BUFFERS), + fmt->count_actual); + + dcvs->dcvs_window = + dcvs->max_threshold < dcvs->min_threshold ? 0 : + dcvs->max_threshold - dcvs->min_threshold; + dcvs->nom_threshold = dcvs->min_threshold + + (dcvs->dcvs_window ? + (dcvs->dcvs_window / 2) : 0); + + dcvs->dcvs_flags = 0; + + s_vpr_p(inst->sid, "DCVS: Th[%d %d %d] Flag %#x\n", + dcvs->min_threshold, + dcvs->nom_threshold, dcvs->max_threshold, + dcvs->dcvs_flags); + +} + +int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst) +{ + int rc = 0, j = 0; + int fourcc, count; + + if (!inst || !inst->core) { + d_vpr_e("%s: Invalid args: Inst = %pK\n", + __func__, inst); + return -EINVAL; + } + + count = inst->core->resources.codec_data_count; + fourcc = get_v4l2_codec(inst); + + for (j = 0; j < count; j++) { + if (inst->core->resources.codec_data[j].session_type == + inst->session_type && + inst->core->resources.codec_data[j].fourcc == + fourcc) { + inst->clk_data.entry = + &inst->core->resources.codec_data[j]; + break; + } + } + + if (!inst->clk_data.entry) { + s_vpr_e(inst->sid, "%s: No match found\n", __func__); + rc = -EINVAL; + } + + return rc; +} + +void msm_clock_data_reset(struct msm_vidc_inst *inst) +{ + struct msm_vidc_core *core; + int i = 0, rc = 0; + struct allowed_clock_rates_table *allowed_clks_tbl = NULL; + u64 total_freq = 0, rate = 0, load; + int cycles; + + if (!inst || !inst->core || !inst->clk_data.entry) { + d_vpr_e("%s: Invalid args: Inst = %pK\n", + __func__, inst); + return; + } + s_vpr_h(inst->sid, "Init DCVS Load\n"); + + core = inst->core; + load = msm_comm_get_inst_load_per_core(inst, LOAD_POWER); + cycles = inst->clk_data.entry->vpp_cycles; + allowed_clks_tbl = core->resources.allowed_clks_tbl; + if (inst->session_type == MSM_VIDC_ENCODER && + inst->flags & VIDC_LOW_POWER) + cycles = inst->clk_data.entry->low_power_cycles; + + msm_dcvs_reset(inst); + + total_freq = cycles * load; + + for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) { + rate = allowed_clks_tbl[i].clock_rate; + if (rate >= total_freq) + break; + } + + if (i < 0) + i = 0; + + inst->clk_data.buffer_counter = 0; + inst->ubwc_stats.is_valid = 0; + + rc = msm_comm_scale_clocks_and_bus(inst, 1); + + if (rc) + s_vpr_e(inst->sid, "%s: Failed to scale Clocks and Bus\n", + __func__); +} + +int msm_vidc_decide_work_route_iris2(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_video_work_route pdata; + bool is_legacy_cbr; + u32 codec; + uint32_t vpu; + + if (!inst || !inst->core || !inst->core->device || + !inst->core->platform_data) { + d_vpr_e("%s: Invalid args: Inst = %pK\n", + __func__, inst); + return -EINVAL; + } + + vpu = inst->core->platform_data->vpu_ver; + hdev = inst->core->device; + is_legacy_cbr = inst->clk_data.is_legacy_cbr; + pdata.video_work_route = inst->core->platform_data->num_vpp_pipes; + + if (vpu == VPU_VERSION_IRIS2_1) { + pdata.video_work_route = 1; + goto decision_done; + } + codec = get_v4l2_codec(inst); + if (inst->session_type == MSM_VIDC_DECODER) { + if (codec == V4L2_PIX_FMT_MPEG2 || + inst->pic_struct != MSM_VIDC_PIC_STRUCT_PROGRESSIVE) + pdata.video_work_route = 1; + } else if (inst->session_type == MSM_VIDC_ENCODER) { + u32 slice_mode, width, height; + struct v4l2_format *f; + + slice_mode = msm_comm_g_ctrl_for_id(inst, + V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE); + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + height = f->fmt.pix_mp.height; + width = f->fmt.pix_mp.width; + + if (slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES || + is_legacy_cbr) { + pdata.video_work_route = 1; + } + } else { + return -EINVAL; + } + +decision_done: + s_vpr_h(inst->sid, "Configurng work route = %u", + pdata.video_work_route); + + rc = call_hfi_op(hdev, session_set_property, + (void *)inst->session, HFI_PROPERTY_PARAM_WORK_ROUTE, + (void *)&pdata, sizeof(pdata)); + if (rc) + s_vpr_e(inst->sid, "Failed to configure work route\n"); + else + inst->clk_data.work_route = pdata.video_work_route; + + return rc; +} + +static int msm_vidc_decide_work_mode_ar50_lt(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_video_work_mode pdata; + struct hfi_enable latency; + struct v4l2_format *f; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: Invalid args: Inst = %pK\n", + __func__, inst); + return -EINVAL; + } + + latency.enable = false; + hdev = inst->core->device; + if (inst->clk_data.low_latency_mode) { + pdata.video_work_mode = HFI_WORKMODE_1; + latency.enable = true; + goto decision_done; + } + + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + if (inst->session_type == MSM_VIDC_DECODER) { + pdata.video_work_mode = HFI_WORKMODE_2; + switch (f->fmt.pix_mp.pixelformat) { + case V4L2_PIX_FMT_MPEG2: + pdata.video_work_mode = HFI_WORKMODE_1; + break; + case V4L2_PIX_FMT_H264: + case V4L2_PIX_FMT_HEVC: + if (f->fmt.pix_mp.height * + f->fmt.pix_mp.width <= 1280 * 720) + pdata.video_work_mode = HFI_WORKMODE_1; + break; + } + } else if (inst->session_type == MSM_VIDC_ENCODER) { + pdata.video_work_mode = HFI_WORKMODE_1; + /* For WORK_MODE_1, set Low Latency mode by default */ + latency.enable = true; + if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR || + inst->rc_type == + V4L2_MPEG_VIDEO_BITRATE_MODE_MBR || + inst->rc_type == + V4L2_MPEG_VIDEO_BITRATE_MODE_MBR_VFR || + inst->rc_type == + V4L2_MPEG_VIDEO_BITRATE_MODE_CQ) { + pdata.video_work_mode = HFI_WORKMODE_2; + latency.enable = false; + } + } else { + return -EINVAL; + } + +decision_done: + s_vpr_h(inst->sid, "Configuring work mode = %u low latency = %u", + pdata.video_work_mode, latency.enable); + inst->clk_data.work_mode = pdata.video_work_mode; + rc = call_hfi_op(hdev, session_set_property, + (void *)inst->session, HFI_PROPERTY_PARAM_WORK_MODE, + (void *)&pdata, sizeof(pdata)); + if (rc) + s_vpr_e(inst->sid, "Failed to configure Work Mode\n"); + + if (inst->session_type == MSM_VIDC_ENCODER && latency.enable) { + rc = call_hfi_op(hdev, session_set_property, + (void *)inst->session, + HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE, + (void *)&latency, sizeof(latency)); + } + rc = msm_comm_scale_clocks_and_bus(inst, 1); + + return rc; +} + +int msm_vidc_set_bse_vpp_delay(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + u32 delay = DEFAULT_BSE_VPP_DELAY; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + + /* Set VPP delay only upto first reconfig */ + if (inst->first_reconfig_done) { + s_vpr_hp(inst->sid, "%s: Skip bse-vpp\n", __func__); + return 0; + } + + if (in_port_reconfig(inst)) + inst->first_reconfig_done = 1; + + if (!inst->core->resources.has_vpp_delay || + !is_decode_session(inst) || + is_thumbnail_session(inst) || + is_heif_decoder(inst) || + inst->clk_data.work_mode != HFI_WORKMODE_2) { + s_vpr_hp(inst->sid, "%s: Skip bse-vpp\n", __func__); + return 0; + } + + hdev = inst->core->device; + + if (is_vpp_delay_allowed(inst)) + delay = MAX_BSE_VPP_DELAY; + + /* DebugFS override [1-31] */ + if (msm_vidc_vpp_delay & 0x1F) + delay = msm_vidc_vpp_delay & 0x1F; + + s_vpr_hp(inst->sid, "%s: bse-vpp delay %u\n", __func__, delay); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_VDEC_VSP_VPP_DELAY, &delay, + sizeof(u32)); + if (rc) + s_vpr_e(inst->sid, "%s: set property failed\n", __func__); + else + inst->bse_vpp_delay = delay; + + return rc; +} + +int msm_vidc_decide_work_mode_iris2(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hfi_video_work_mode pdata; + struct hfi_enable latency; + u32 width, height; + bool res_ok = false; + struct v4l2_format *out_f; + struct v4l2_format *inp_f; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: Invalid args: Inst = %pK\n", + __func__, inst); + return -EINVAL; + } + + hdev = inst->core->device; + pdata.video_work_mode = HFI_WORKMODE_2; + latency.enable = inst->clk_data.low_latency_mode; + out_f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + inp_f = &inst->fmts[INPUT_PORT].v4l2_fmt; + if (inst->session_type == MSM_VIDC_DECODER) { + height = out_f->fmt.pix_mp.height; + width = out_f->fmt.pix_mp.width; + res_ok = res_is_less_than_or_equal_to(width, height, 1280, 720); + if (inp_f->fmt.pix_mp.pixelformat == V4L2_PIX_FMT_MPEG2 || + inst->pic_struct != MSM_VIDC_PIC_STRUCT_PROGRESSIVE || + inst->clk_data.low_latency_mode || res_ok) { + pdata.video_work_mode = HFI_WORKMODE_1; + } + } else if (inst->session_type == MSM_VIDC_ENCODER) { + height = inp_f->fmt.pix_mp.height; + width = inp_f->fmt.pix_mp.width; + res_ok = !res_is_greater_than(width, height, 4096, 2160); + if (res_ok && + (inst->clk_data.low_latency_mode)) { + pdata.video_work_mode = HFI_WORKMODE_1; + /* For WORK_MODE_1, set Low Latency mode by default */ + latency.enable = true; + } + if (inst->rc_type == RATE_CONTROL_LOSSLESS || inst->all_intra) { + pdata.video_work_mode = HFI_WORKMODE_2; + latency.enable = false; + } + } else { + return -EINVAL; + } + + s_vpr_h(inst->sid, "Configuring work mode = %u low latency = %u", + pdata.video_work_mode, latency.enable); + + if (inst->session_type == MSM_VIDC_ENCODER) { + rc = call_hfi_op(hdev, session_set_property, + (void *)inst->session, + HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE, + (void *)&latency, sizeof(latency)); + if (rc) + s_vpr_e(inst->sid, "Failed to configure low latency\n"); + else + inst->clk_data.low_latency_mode = latency.enable; + } + + rc = call_hfi_op(hdev, session_set_property, + (void *)inst->session, HFI_PROPERTY_PARAM_WORK_MODE, + (void *)&pdata, sizeof(pdata)); + if (rc) + s_vpr_e(inst->sid, "Failed to configure Work Mode\n"); + else + inst->clk_data.work_mode = pdata.video_work_mode; + + return rc; +} + +static inline int msm_vidc_power_save_mode_enable(struct msm_vidc_inst *inst, + bool enable) +{ + u32 rc = 0; + u32 prop_id = 0; + void *pdata = NULL; + struct hfi_device *hdev = NULL; + u32 hfi_perf_mode; + struct v4l2_ctrl *ctrl; + + hdev = inst->core->device; + if (inst->session_type != MSM_VIDC_ENCODER) { + s_vpr_l(inst->sid, + "%s: Not an encoder session. Nothing to do\n", + __func__); + return 0; + } + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_COMPLEXITY); + if (!is_realtime_session(inst) && !ctrl->val) + enable = true; + prop_id = HFI_PROPERTY_CONFIG_VENC_PERF_MODE; + hfi_perf_mode = enable ? HFI_VENC_PERFMODE_POWER_SAVE : + HFI_VENC_PERFMODE_MAX_QUALITY; + pdata = &hfi_perf_mode; + rc = call_hfi_op(hdev, session_set_property, + (void *)inst->session, prop_id, pdata, + sizeof(hfi_perf_mode)); + if (rc) { + s_vpr_e(inst->sid, "%s: Failed to set power save mode\n", + __func__); + return rc; + } + inst->flags = enable ? + inst->flags | VIDC_LOW_POWER : + inst->flags & ~VIDC_LOW_POWER; + + s_vpr_h(inst->sid, + "Power Save Mode for inst: %pK Enable = %d\n", inst, enable); + + return rc; +} + +int msm_vidc_decide_core_and_power_mode_ar50lt(struct msm_vidc_inst *inst) +{ + inst->clk_data.core_id = VIDC_CORE_ID_1; + return 0; +} + +int msm_vidc_decide_core_and_power_mode_iris2(struct msm_vidc_inst *inst) +{ + u32 mbpf, mbps, max_hq_mbpf, max_hq_mbps; + bool enable = true; + int rc = 0; + + inst->clk_data.core_id = VIDC_CORE_ID_1; + + mbpf = msm_vidc_get_mbs_per_frame(inst); + mbps = mbpf * msm_vidc_get_fps(inst); + max_hq_mbpf = inst->core->resources.max_hq_mbs_per_frame; + max_hq_mbps = inst->core->resources.max_hq_mbs_per_sec; + + /* Power saving always disabled for CQ and LOSSLESS RC modes. */ + if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ || + inst->rc_type == RATE_CONTROL_LOSSLESS || + (mbpf <= max_hq_mbpf && mbps <= max_hq_mbps)) + enable = false; + + rc = msm_vidc_power_save_mode_enable(inst, enable); + msm_print_core_status(inst->core, VIDC_CORE_ID_1, inst->sid); + + return rc; +} + +void msm_vidc_init_core_clk_ops(struct msm_vidc_core *core) +{ + uint32_t vpu; + + if (!core) + return; + + vpu = core->platform_data->vpu_ver; + + if (vpu == VPU_VERSION_AR50_LITE) + core->core_ops = &core_ops_ar50_lt; + else + core->core_ops = &core_ops_iris2; +} + +void msm_print_core_status(struct msm_vidc_core *core, u32 core_id, u32 sid) +{ + struct msm_vidc_inst *inst = NULL; + struct v4l2_format *out_f; + struct v4l2_format *inp_f; + + s_vpr_p(sid, "Instances running on core %u", core_id); + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) { + if ((inst->clk_data.core_id != core_id) && + (inst->clk_data.core_id != VIDC_CORE_ID_3)) + continue; + out_f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + inp_f = &inst->fmts[INPUT_PORT].v4l2_fmt; + s_vpr_p(sid, + "inst %pK (%4ux%4u) to (%4ux%4u) %3u %s %s %u %s %s %lu\n", + inst, + inp_f->fmt.pix_mp.width, + inp_f->fmt.pix_mp.height, + out_f->fmt.pix_mp.width, + out_f->fmt.pix_mp.height, + inst->clk_data.frame_rate >> 16, + inst->session_type == MSM_VIDC_ENCODER ? "ENC" : "DEC", + inst->clk_data.work_mode == HFI_WORKMODE_1 ? + "WORK_MODE_1" : "WORK_MODE_2", + inst->clk_data.work_route, + inst->flags & VIDC_LOW_POWER ? "LP" : "HQ", + is_realtime_session(inst) ? "RealTime" : "NonRTime", + inst->clk_data.min_freq); + } + mutex_unlock(&core->lock); +} diff --git a/techpack/video/msm/vidc/msm_vidc_clocks.h b/techpack/video/msm/vidc/msm_vidc_clocks.h new file mode 100644 index 000000000000..30a2d40a039f --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_clocks.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _MSM_VIDC_CLOCKS_H_ +#define _MSM_VIDC_CLOCKS_H_ +#include "msm_vidc_internal.h" + +void msm_clock_data_reset(struct msm_vidc_inst *inst); +void msm_dcvs_reset(struct msm_vidc_inst *inst); +int msm_vidc_set_clocks(struct msm_vidc_core *core, u32 sid); +int msm_comm_vote_bus(struct msm_vidc_inst *inst); +int msm_dcvs_try_enable(struct msm_vidc_inst *inst); +bool res_is_less_than(u32 width, u32 height, u32 ref_width, u32 ref_height); +bool res_is_greater_than(u32 width, u32 height, u32 ref_width, u32 ref_height); +bool res_is_less_than_or_equal_to(u32 width, u32 height, + u32 ref_width, u32 ref_height); +bool res_is_greater_than_or_equal_to(u32 width, u32 height, + u32 ref_width, u32 ref_height); +int msm_vidc_get_mbs_per_frame(struct msm_vidc_inst *inst); +int msm_vidc_get_fps(struct msm_vidc_inst *inst); +int msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst, bool do_bw_calc); +int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst); +int msm_vidc_decide_work_route_iris1(struct msm_vidc_inst *inst); +int msm_vidc_decide_work_mode_iris1(struct msm_vidc_inst *inst); +int msm_vidc_decide_work_route_iris2(struct msm_vidc_inst *inst); +int msm_vidc_decide_work_mode_iris2(struct msm_vidc_inst *inst); +int msm_vidc_decide_core_and_power_mode_ar50lt(struct msm_vidc_inst *inst); +int msm_vidc_decide_core_and_power_mode_iris1(struct msm_vidc_inst *inst); +int msm_vidc_decide_core_and_power_mode_iris2(struct msm_vidc_inst *inst); +void msm_print_core_status(struct msm_vidc_core *core, u32 core_id, u32 sid); +void msm_comm_free_input_cr_table(struct msm_vidc_inst *inst); +void msm_comm_update_input_cr(struct msm_vidc_inst *inst, u32 index, + u32 cr); +void update_recon_stats(struct msm_vidc_inst *inst, + struct recon_stats_type *recon_stats); +void msm_vidc_init_core_clk_ops(struct msm_vidc_core *core); +bool res_is_greater_than(u32 width, u32 height, + u32 ref_width, u32 ref_height); +bool res_is_less_than(u32 width, u32 height, + u32 ref_width, u32 ref_height); +int msm_vidc_set_bse_vpp_delay(struct msm_vidc_inst *inst); +bool is_vpp_delay_allowed(struct msm_vidc_inst *inst); +#endif diff --git a/techpack/video/msm/vidc/msm_vidc_common.c b/techpack/video/msm/vidc/msm_vidc_common.c new file mode 100644 index 000000000000..b8a97f53c778 --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_common.c @@ -0,0 +1,8048 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include "msm_vidc_common.h" +#include "vidc_hfi_api.h" +#include "vidc_hfi.h" +#include "msm_vidc_debug.h" +#include "msm_vidc_clocks.h" +#include "msm_vidc_buffer_calculations.h" + +#define IS_ALREADY_IN_STATE(__p, __d) (\ + (__p >= __d)\ +) + +#define V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT \ + V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_INSUFFICIENT +#define V4L2_EVENT_RELEASE_BUFFER_REFERENCE \ + V4L2_EVENT_MSM_VIDC_RELEASE_BUFFER_REFERENCE + +static void handle_session_error(enum hal_command_response cmd, void *data); +static void msm_vidc_print_running_insts(struct msm_vidc_core *core); + +#define V4L2_VP9_LEVEL_61 V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61 +#define TIMESTAMPS_WINDOW_SIZE 32 +#define SSR_TYPE 0x0000000F +#define SSR_TYPE_SHIFT 0 +#define SSR_SUB_CLIENT_ID 0x000000F0 +#define SSR_SUB_CLIENT_ID_SHIFT 4 +#define SSR_ADDR_ID 0xFFFFFFFF00000000 +#define SSR_ADDR_SHIFT 32 + +int msm_comm_g_ctrl_for_id(struct msm_vidc_inst *inst, int id) +{ + struct v4l2_ctrl *ctrl; + + ctrl = get_ctrl(inst, id); + return ctrl->val; +} + +int msm_comm_hfi_to_v4l2(int id, int value, u32 sid) +{ + switch (id) { + /* H264 */ + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + switch (value) { + case HFI_H264_PROFILE_BASELINE: + return V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE; + case HFI_H264_PROFILE_CONSTRAINED_BASE: + return + V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE; + case HFI_H264_PROFILE_MAIN: + return V4L2_MPEG_VIDEO_H264_PROFILE_MAIN; + case HFI_H264_PROFILE_HIGH: + return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH; + case HFI_H264_PROFILE_STEREO_HIGH: + return V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH; + case HFI_H264_PROFILE_MULTIVIEW_HIGH: + return V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH; + case HFI_H264_PROFILE_CONSTRAINED_HIGH: + return V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH; + default: + goto unknown_value; + } + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + switch (value) { + case HFI_H264_LEVEL_1: + return V4L2_MPEG_VIDEO_H264_LEVEL_1_0; + case HFI_H264_LEVEL_1b: + return V4L2_MPEG_VIDEO_H264_LEVEL_1B; + case HFI_H264_LEVEL_11: + return V4L2_MPEG_VIDEO_H264_LEVEL_1_1; + case HFI_H264_LEVEL_12: + return V4L2_MPEG_VIDEO_H264_LEVEL_1_2; + case HFI_H264_LEVEL_13: + return V4L2_MPEG_VIDEO_H264_LEVEL_1_3; + case HFI_H264_LEVEL_2: + return V4L2_MPEG_VIDEO_H264_LEVEL_2_0; + case HFI_H264_LEVEL_21: + return V4L2_MPEG_VIDEO_H264_LEVEL_2_1; + case HFI_H264_LEVEL_22: + return V4L2_MPEG_VIDEO_H264_LEVEL_2_2; + case HFI_H264_LEVEL_3: + return V4L2_MPEG_VIDEO_H264_LEVEL_3_0; + case HFI_H264_LEVEL_31: + return V4L2_MPEG_VIDEO_H264_LEVEL_3_1; + case HFI_H264_LEVEL_32: + return V4L2_MPEG_VIDEO_H264_LEVEL_3_2; + case HFI_H264_LEVEL_4: + return V4L2_MPEG_VIDEO_H264_LEVEL_4_0; + case HFI_H264_LEVEL_41: + return V4L2_MPEG_VIDEO_H264_LEVEL_4_1; + case HFI_H264_LEVEL_42: + return V4L2_MPEG_VIDEO_H264_LEVEL_4_2; + case HFI_H264_LEVEL_5: + return V4L2_MPEG_VIDEO_H264_LEVEL_5_0; + case HFI_H264_LEVEL_51: + return V4L2_MPEG_VIDEO_H264_LEVEL_5_1; + case HFI_H264_LEVEL_52: + return V4L2_MPEG_VIDEO_H264_LEVEL_5_2; + case HFI_H264_LEVEL_6: + return V4L2_MPEG_VIDEO_H264_LEVEL_6_0; + case HFI_H264_LEVEL_61: + return V4L2_MPEG_VIDEO_H264_LEVEL_6_1; + case HFI_H264_LEVEL_62: + return V4L2_MPEG_VIDEO_H264_LEVEL_6_2; + default: + goto unknown_value; + } + + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: + switch (value) { + case HFI_H264_ENTROPY_CAVLC: + return V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC; + case HFI_H264_ENTROPY_CABAC: + return V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC; + default: + goto unknown_value; + } + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: + switch (value) { + case HFI_HEVC_PROFILE_MAIN: + return V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN; + case HFI_HEVC_PROFILE_MAIN10: + return V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10; + case HFI_HEVC_PROFILE_MAIN_STILL_PIC: + return V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE; + default: + goto unknown_value; + } + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: + switch (value) { + case HFI_HEVC_LEVEL_1: + return V4L2_MPEG_VIDEO_HEVC_LEVEL_1; + case HFI_HEVC_LEVEL_2: + return V4L2_MPEG_VIDEO_HEVC_LEVEL_2; + case HFI_HEVC_LEVEL_21: + return V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1; + case HFI_HEVC_LEVEL_3: + return V4L2_MPEG_VIDEO_HEVC_LEVEL_3; + case HFI_HEVC_LEVEL_31: + return V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1; + case HFI_HEVC_LEVEL_4: + return V4L2_MPEG_VIDEO_HEVC_LEVEL_4; + case HFI_HEVC_LEVEL_41: + return V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1; + case HFI_HEVC_LEVEL_5: + return V4L2_MPEG_VIDEO_HEVC_LEVEL_5; + case HFI_HEVC_LEVEL_51: + return V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1; + case HFI_HEVC_LEVEL_52: + return V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2; + case HFI_HEVC_LEVEL_6: + return V4L2_MPEG_VIDEO_HEVC_LEVEL_6; + case HFI_HEVC_LEVEL_61: + return V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1; + case HFI_HEVC_LEVEL_62: + return V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2; + default: + goto unknown_value; + } + case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: + switch (value) { + case HFI_VP9_PROFILE_P0: + return V4L2_MPEG_VIDEO_VP9_PROFILE_0; + case HFI_VP9_PROFILE_P2_10B: + return V4L2_MPEG_VIDEO_VP9_PROFILE_2; + default: + goto unknown_value; + } + case V4L2_CID_MPEG_VIDC_VIDEO_VP9_LEVEL: + switch (value) { + case HFI_VP9_LEVEL_1: + return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_1; + case HFI_VP9_LEVEL_11: + return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_11; + case HFI_VP9_LEVEL_2: + return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_2; + case HFI_VP9_LEVEL_21: + return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_21; + case HFI_VP9_LEVEL_3: + return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_3; + case HFI_VP9_LEVEL_31: + return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_31; + case HFI_VP9_LEVEL_4: + return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_4; + case HFI_VP9_LEVEL_41: + return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_41; + case HFI_VP9_LEVEL_5: + return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_5; + case HFI_VP9_LEVEL_51: + return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_51; + case HFI_VP9_LEVEL_6: + return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_6; + case HFI_VP9_LEVEL_61: + return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61; + case HFI_LEVEL_UNKNOWN: + return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_UNUSED; + default: + goto unknown_value; + } + case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE: + switch (value) { + case HFI_MPEG2_PROFILE_SIMPLE: + return V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_SIMPLE; + case HFI_MPEG2_PROFILE_MAIN: + return V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_MAIN; + default: + goto unknown_value; + } + case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL: + /* This mapping is not defined properly in V4L2 */ + switch (value) { + case HFI_MPEG2_LEVEL_LL: + return V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_0; + case HFI_MPEG2_LEVEL_ML: + return V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_1; + case HFI_MPEG2_LEVEL_HL: + return V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_2; + default: + goto unknown_value; + } + } + +unknown_value: + s_vpr_e(sid, "Unknown control (%x, %d)\n", id, value); + return -EINVAL; +} + +static int h264_level_v4l2_to_hfi(int value, u32 sid) +{ + switch (value) { + case V4L2_MPEG_VIDEO_H264_LEVEL_1_0: + return HFI_H264_LEVEL_1; + case V4L2_MPEG_VIDEO_H264_LEVEL_1B: + return HFI_H264_LEVEL_1b; + case V4L2_MPEG_VIDEO_H264_LEVEL_1_1: + return HFI_H264_LEVEL_11; + case V4L2_MPEG_VIDEO_H264_LEVEL_1_2: + return HFI_H264_LEVEL_12; + case V4L2_MPEG_VIDEO_H264_LEVEL_1_3: + return HFI_H264_LEVEL_13; + case V4L2_MPEG_VIDEO_H264_LEVEL_2_0: + return HFI_H264_LEVEL_2; + case V4L2_MPEG_VIDEO_H264_LEVEL_2_1: + return HFI_H264_LEVEL_21; + case V4L2_MPEG_VIDEO_H264_LEVEL_2_2: + return HFI_H264_LEVEL_22; + case V4L2_MPEG_VIDEO_H264_LEVEL_3_0: + return HFI_H264_LEVEL_3; + case V4L2_MPEG_VIDEO_H264_LEVEL_3_1: + return HFI_H264_LEVEL_31; + case V4L2_MPEG_VIDEO_H264_LEVEL_3_2: + return HFI_H264_LEVEL_32; + case V4L2_MPEG_VIDEO_H264_LEVEL_4_0: + return HFI_H264_LEVEL_4; + case V4L2_MPEG_VIDEO_H264_LEVEL_4_1: + return HFI_H264_LEVEL_41; + case V4L2_MPEG_VIDEO_H264_LEVEL_4_2: + return HFI_H264_LEVEL_42; + case V4L2_MPEG_VIDEO_H264_LEVEL_5_0: + return HFI_H264_LEVEL_5; + case V4L2_MPEG_VIDEO_H264_LEVEL_5_1: + return HFI_H264_LEVEL_51; + case V4L2_MPEG_VIDEO_H264_LEVEL_5_2: + return HFI_H264_LEVEL_52; + case V4L2_MPEG_VIDEO_H264_LEVEL_6_0: + return HFI_H264_LEVEL_6; + case V4L2_MPEG_VIDEO_H264_LEVEL_6_1: + return HFI_H264_LEVEL_61; + case V4L2_MPEG_VIDEO_H264_LEVEL_6_2: + return HFI_H264_LEVEL_62; + default: + goto unknown_value; + } + +unknown_value: + s_vpr_e(sid, "Unknown level (%d)\n", value); + return -EINVAL; +} + +static int hevc_level_v4l2_to_hfi(int value, u32 sid) +{ + switch (value) { + case V4L2_MPEG_VIDEO_HEVC_LEVEL_1: + return HFI_HEVC_LEVEL_1; + case V4L2_MPEG_VIDEO_HEVC_LEVEL_2: + return HFI_HEVC_LEVEL_2; + case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1: + return HFI_HEVC_LEVEL_21; + case V4L2_MPEG_VIDEO_HEVC_LEVEL_3: + return HFI_HEVC_LEVEL_3; + case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1: + return HFI_HEVC_LEVEL_31; + case V4L2_MPEG_VIDEO_HEVC_LEVEL_4: + return HFI_HEVC_LEVEL_4; + case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1: + return HFI_HEVC_LEVEL_41; + case V4L2_MPEG_VIDEO_HEVC_LEVEL_5: + return HFI_HEVC_LEVEL_5; + case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1: + return HFI_HEVC_LEVEL_51; + case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2: + return HFI_HEVC_LEVEL_52; + case V4L2_MPEG_VIDEO_HEVC_LEVEL_6: + return HFI_HEVC_LEVEL_6; + case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1: + return HFI_HEVC_LEVEL_61; + case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2: + return HFI_HEVC_LEVEL_62; + default: + goto unknown_value; + } + +unknown_value: + s_vpr_e(sid, "Unknown level (%d)\n", value); + return -EINVAL; +} + +static int vp9_level_v4l2_to_hfi(int value, u32 sid) +{ + switch (value) { + case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_1: + return HFI_VP9_LEVEL_1; + case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_11: + return HFI_VP9_LEVEL_11; + case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_2: + return HFI_VP9_LEVEL_2; + case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_21: + return HFI_VP9_LEVEL_21; + case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_3: + return HFI_VP9_LEVEL_3; + case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_31: + return HFI_VP9_LEVEL_31; + case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_4: + return HFI_VP9_LEVEL_4; + case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_41: + return HFI_VP9_LEVEL_41; + case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_5: + return HFI_VP9_LEVEL_5; + case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_51: + return HFI_VP9_LEVEL_51; + case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_6: + return HFI_VP9_LEVEL_6; + case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61: + return HFI_VP9_LEVEL_61; + case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_UNUSED: + return HFI_LEVEL_UNKNOWN; + default: + goto unknown_value; + } + +unknown_value: + s_vpr_e(sid, "Unknown level (%d)\n", value); + return -EINVAL; +} + +int msm_comm_v4l2_to_hfi(int id, int value, u32 sid) +{ + switch (id) { + /* H264 */ + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + switch (value) { + case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE: + return HFI_H264_PROFILE_BASELINE; + case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE: + return HFI_H264_PROFILE_CONSTRAINED_BASE; + case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN: + return HFI_H264_PROFILE_MAIN; + case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH: + return HFI_H264_PROFILE_HIGH; + case V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH: + return HFI_H264_PROFILE_STEREO_HIGH; + case V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH: + return HFI_H264_PROFILE_MULTIVIEW_HIGH; + case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH: + return HFI_H264_PROFILE_CONSTRAINED_HIGH; + default: + return HFI_H264_PROFILE_HIGH; + } + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + return h264_level_v4l2_to_hfi(value, sid); + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: + switch (value) { + case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC: + return HFI_H264_ENTROPY_CAVLC; + case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC: + return HFI_H264_ENTROPY_CABAC; + default: + return HFI_H264_ENTROPY_CABAC; + } + case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: + switch (value) { + case V4L2_MPEG_VIDEO_VP9_PROFILE_0: + return HFI_VP9_PROFILE_P0; + case V4L2_MPEG_VIDEO_VP9_PROFILE_2: + return HFI_VP9_PROFILE_P2_10B; + default: + return HFI_VP9_PROFILE_P0; + } + case V4L2_CID_MPEG_VIDC_VIDEO_VP9_LEVEL: + return vp9_level_v4l2_to_hfi(value, sid); + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: + switch (value) { + case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN: + return HFI_HEVC_PROFILE_MAIN; + case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10: + return HFI_HEVC_PROFILE_MAIN10; + case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE: + return HFI_HEVC_PROFILE_MAIN_STILL_PIC; + default: + return HFI_HEVC_PROFILE_MAIN; + } + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: + return hevc_level_v4l2_to_hfi(value, sid); + case V4L2_CID_MPEG_VIDEO_HEVC_TIER: + switch (value) { + case V4L2_MPEG_VIDEO_HEVC_TIER_MAIN: + return HFI_HEVC_TIER_MAIN; + case V4L2_MPEG_VIDEO_HEVC_TIER_HIGH: + return HFI_HEVC_TIER_HIGH; + default: + return HFI_HEVC_TIER_HIGH; + } + case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE: + switch (value) { + case V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_SIMPLE: + return HFI_MPEG2_PROFILE_SIMPLE; + case V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_MAIN: + return HFI_MPEG2_PROFILE_MAIN; + default: + return HFI_MPEG2_PROFILE_MAIN; + } + case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL: + /* This mapping is not defined properly in V4L2 */ + switch (value) { + case V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_0: + return HFI_MPEG2_LEVEL_LL; + case V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_1: + return HFI_MPEG2_LEVEL_ML; + case V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_2: + return HFI_MPEG2_LEVEL_HL; + default: + return HFI_MPEG2_LEVEL_HL; + } + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: + switch (value) { + case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED: + return HFI_H264_DB_MODE_DISABLE; + case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED: + return HFI_H264_DB_MODE_ALL_BOUNDARY; + case DB_DISABLE_SLICE_BOUNDARY: + return HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY; + default: + return HFI_H264_DB_MODE_ALL_BOUNDARY; + } + } + s_vpr_e(sid, "Unknown control (%x, %d)\n", id, value); + return -EINVAL; +} + +int msm_comm_get_v4l2_profile(int fourcc, int profile, u32 sid) +{ + switch (fourcc) { + case V4L2_PIX_FMT_H264: + return msm_comm_hfi_to_v4l2( + V4L2_CID_MPEG_VIDEO_H264_PROFILE, + profile, sid); + case V4L2_PIX_FMT_HEVC: + return msm_comm_hfi_to_v4l2( + V4L2_CID_MPEG_VIDEO_HEVC_PROFILE, + profile, sid); + case V4L2_PIX_FMT_VP9: + case V4L2_PIX_FMT_MPEG2: + return 0; + default: + s_vpr_e(sid, "Unknown codec id %x\n", fourcc); + return 0; + } +} + +int msm_comm_get_v4l2_level(int fourcc, int level, u32 sid) +{ + switch (fourcc) { + case V4L2_PIX_FMT_H264: + return msm_comm_hfi_to_v4l2( + V4L2_CID_MPEG_VIDEO_H264_LEVEL, + level, sid); + case V4L2_PIX_FMT_HEVC: + level &= ~(0xF << 28); + return msm_comm_hfi_to_v4l2( + V4L2_CID_MPEG_VIDEO_HEVC_LEVEL, + level, sid); + case V4L2_PIX_FMT_VP9: + case V4L2_PIX_FMT_MPEG2: + return 0; + default: + s_vpr_e(sid, "Unknown codec id %x\n", fourcc); + return 0; + } +} + +static bool is_priv_ctrl(u32 id) +{ + if (IS_PRIV_CTRL(id)) + return true; + + /* + * Treat below standard controls as private because + * we have added custom values to the controls + */ + switch (id) { + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + return true; + } + + return false; +} + +int msm_comm_ctrl_init(struct msm_vidc_inst *inst, + struct msm_vidc_ctrl *drv_ctrls, u32 num_ctrls, + const struct v4l2_ctrl_ops *ctrl_ops) +{ + int idx = 0; + struct v4l2_ctrl_config ctrl_cfg = {0}; + int ret_val = 0; + + if (!inst || !drv_ctrls || !ctrl_ops || !num_ctrls) { + d_vpr_e("%s: invalid input\n", __func__); + return -EINVAL; + } + + inst->ctrls = kcalloc(num_ctrls, sizeof(struct v4l2_ctrl *), + GFP_KERNEL); + if (!inst->ctrls) { + s_vpr_e(inst->sid, "%s: failed to allocate ctrl\n", __func__); + return -ENOMEM; + } + + ret_val = v4l2_ctrl_handler_init(&inst->ctrl_handler, num_ctrls); + + if (ret_val) { + s_vpr_e(inst->sid, "Control handler init failed, %d\n", + inst->ctrl_handler.error); + return ret_val; + } + + for (; idx < (int) num_ctrls; idx++) { + struct v4l2_ctrl *ctrl = NULL; + + if (is_priv_ctrl(drv_ctrls[idx].id)) { + /*add private control*/ + ctrl_cfg.def = drv_ctrls[idx].default_value; + ctrl_cfg.flags = 0; + ctrl_cfg.id = drv_ctrls[idx].id; + ctrl_cfg.max = drv_ctrls[idx].maximum; + ctrl_cfg.min = drv_ctrls[idx].minimum; + ctrl_cfg.menu_skip_mask = + drv_ctrls[idx].menu_skip_mask; + ctrl_cfg.name = drv_ctrls[idx].name; + ctrl_cfg.ops = ctrl_ops; + ctrl_cfg.step = drv_ctrls[idx].step; + ctrl_cfg.type = drv_ctrls[idx].type; + ctrl_cfg.qmenu = drv_ctrls[idx].qmenu; + + ctrl = v4l2_ctrl_new_custom(&inst->ctrl_handler, + &ctrl_cfg, NULL); + } else { + if (drv_ctrls[idx].type == V4L2_CTRL_TYPE_MENU) { + ctrl = v4l2_ctrl_new_std_menu( + &inst->ctrl_handler, + ctrl_ops, + drv_ctrls[idx].id, + (u8) drv_ctrls[idx].maximum, + drv_ctrls[idx].menu_skip_mask, + (u8) drv_ctrls[idx].default_value); + } else { + ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, + ctrl_ops, + drv_ctrls[idx].id, + drv_ctrls[idx].minimum, + drv_ctrls[idx].maximum, + drv_ctrls[idx].step, + drv_ctrls[idx].default_value); + } + } + + if (!ctrl) { + s_vpr_e(inst->sid, "%s: invalid ctrl %s\n", __func__, + drv_ctrls[idx].name); + return -EINVAL; + } + + ret_val = inst->ctrl_handler.error; + if (ret_val) { + s_vpr_e(inst->sid, + "Error adding ctrl (%s) to ctrl handle, %d\n", + drv_ctrls[idx].name, inst->ctrl_handler.error); + return ret_val; + } + + ctrl->flags |= drv_ctrls[idx].flags; + ctrl->flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + inst->ctrls[idx] = ctrl; + } + inst->num_ctrls = num_ctrls; + + return ret_val; +} + +int msm_comm_ctrl_deinit(struct msm_vidc_inst *inst) +{ + if (!inst) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + kfree(inst->ctrls); + v4l2_ctrl_handler_free(&inst->ctrl_handler); + + return 0; +} + +int msm_comm_set_stream_output_mode(struct msm_vidc_inst *inst, + enum multi_stream mode) +{ + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + if (!is_decode_session(inst)) { + s_vpr_h(inst->sid, "%s: not a decode session\n", __func__); + return -EINVAL; + } + + if (mode == HAL_VIDEO_DECODER_SECONDARY) + inst->stream_output_mode = HAL_VIDEO_DECODER_SECONDARY; + else + inst->stream_output_mode = HAL_VIDEO_DECODER_PRIMARY; + + return 0; +} + +enum multi_stream msm_comm_get_stream_output_mode(struct msm_vidc_inst *inst) +{ + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return HAL_VIDEO_DECODER_PRIMARY; + } + + if (!is_decode_session(inst)) + return HAL_VIDEO_DECODER_PRIMARY; + + if (inst->stream_output_mode == HAL_VIDEO_DECODER_SECONDARY) + return HAL_VIDEO_DECODER_SECONDARY; + else + return HAL_VIDEO_DECODER_PRIMARY; +} + +bool vidc_scalar_enabled(struct msm_vidc_inst *inst) +{ + struct v4l2_format *f; + u32 output_height, output_width, input_height, input_width; + bool scalar_enable = false; + + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + output_height = f->fmt.pix_mp.height; + output_width = f->fmt.pix_mp.width; + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + input_height = f->fmt.pix_mp.height; + input_width = f->fmt.pix_mp.width; + + if (output_height != input_height || output_width != input_width) + scalar_enable = true; + + return scalar_enable; +} + +bool is_single_session(struct msm_vidc_inst *inst, u32 ignore_flags) +{ + bool single = true; + struct msm_vidc_core *core; + struct msm_vidc_inst *temp; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return false; + } + core = inst->core; + + mutex_lock(&core->lock); + list_for_each_entry(temp, &core->instances, list) { + /* ignore invalid session */ + if (temp->state == MSM_VIDC_CORE_INVALID) + continue; + if ((ignore_flags & VIDC_THUMBNAIL) && + is_thumbnail_session(temp)) + continue; + if (temp != inst) { + single = false; + break; + } + } + mutex_unlock(&core->lock); + + return single; +} + +static int msm_comm_get_mbs_per_sec(struct msm_vidc_inst *inst, + enum load_calc_quirks quirks) +{ + int input_port_mbs, output_port_mbs; + int fps; + struct v4l2_format *f; + + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + input_port_mbs = NUM_MBS_PER_FRAME(f->fmt.pix_mp.width, + f->fmt.pix_mp.height); + + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + output_port_mbs = NUM_MBS_PER_FRAME(f->fmt.pix_mp.width, + f->fmt.pix_mp.height); + + fps = inst->clk_data.frame_rate; + + /* For admission control operating rate is ignored */ + if (quirks == LOAD_POWER) + fps = max(inst->clk_data.operating_rate, + inst->clk_data.frame_rate); + + /* In case of fps < 1 we assume 1 */ + fps = max(fps >> 16, 1); + + return max(input_port_mbs, output_port_mbs) * fps; +} + +int msm_comm_get_inst_load(struct msm_vidc_inst *inst, + enum load_calc_quirks quirks) +{ + int load = 0; + + mutex_lock(&inst->lock); + + if (!(inst->state >= MSM_VIDC_OPEN_DONE && + inst->state < MSM_VIDC_STOP_DONE)) + goto exit; + + /* Clock and Load calculations for REALTIME/NON-REALTIME + * Operating rate will either Default or Client value. + * Session admission control will be based on Load. + * Power requests based of calculated Clock/Freq. + * ----------------|----------------------------| + * REALTIME | Admission Control Load = | + * | res * fps | + * | Power Request Load = | + * | res * max(op, fps)| + * ----------------|----------------------------| + * NON-REALTIME | Admission Control Load = 0 | + * | Power Request Load = | + * | res * max(op, fps)| + * ----------------|----------------------------| + * THUMBNAIL | Always Load = 0 | + * | Perf mode added for | + * | thumbnail session buffers | + * | for faster decoding. | + * ----------------|----------------------------| + */ + + if (is_thumbnail_session(inst) || + (!is_realtime_session(inst) && + quirks == LOAD_ADMISSION_CONTROL)) { + load = 0; + } else { + load = msm_comm_get_mbs_per_sec(inst, quirks); + } + +exit: + mutex_unlock(&inst->lock); + return load; +} + +int msm_comm_get_inst_load_per_core(struct msm_vidc_inst *inst, + enum load_calc_quirks quirks) +{ + int load = msm_comm_get_inst_load(inst, quirks); + + if (inst->clk_data.core_id == VIDC_CORE_ID_3) + load = load / 2; + + return load; +} + +int msm_comm_get_device_load(struct msm_vidc_core *core, + enum session_type sess_type, enum load_type load_type, + enum load_calc_quirks quirks) +{ + struct msm_vidc_inst *inst = NULL; + int num_mbs_per_sec = 0; + + if (!core) { + d_vpr_e("Invalid args: %pK\n", core); + return -EINVAL; + } + + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) { + if (inst->session_type != sess_type) + continue; + + if (load_type == MSM_VIDC_VIDEO && !is_video_session(inst)) + continue; + else if (load_type == MSM_VIDC_IMAGE && !is_grid_session(inst)) + continue; + + num_mbs_per_sec += msm_comm_get_inst_load(inst, quirks); + } + mutex_unlock(&core->lock); + + return num_mbs_per_sec; +} + +enum hal_domain get_hal_domain(int session_type, u32 sid) +{ + enum hal_domain domain; + + switch (session_type) { + case MSM_VIDC_ENCODER: + domain = HAL_VIDEO_DOMAIN_ENCODER; + break; + case MSM_VIDC_DECODER: + domain = HAL_VIDEO_DOMAIN_DECODER; + break; + default: + s_vpr_e(sid, "Wrong domain %d\n", session_type); + domain = HAL_UNUSED_DOMAIN; + break; + } + + return domain; +} + +enum hal_video_codec get_hal_codec(int fourcc, u32 sid) +{ + enum hal_video_codec codec; + + switch (fourcc) { + case V4L2_PIX_FMT_H264: + case V4L2_PIX_FMT_H264_NO_SC: + codec = HAL_VIDEO_CODEC_H264; + break; + case V4L2_PIX_FMT_H264_MVC: + codec = HAL_VIDEO_CODEC_MVC; + break; + case V4L2_PIX_FMT_MPEG1: + codec = HAL_VIDEO_CODEC_MPEG1; + break; + case V4L2_PIX_FMT_MPEG2: + codec = HAL_VIDEO_CODEC_MPEG2; + break; + case V4L2_PIX_FMT_VP9: + codec = HAL_VIDEO_CODEC_VP9; + break; + case V4L2_PIX_FMT_HEVC: + codec = HAL_VIDEO_CODEC_HEVC; + break; + default: + s_vpr_e(sid, "Wrong codec: %#x\n", fourcc); + codec = HAL_UNUSED_CODEC; + break; + } + + return codec; +} + +u32 msm_comm_get_hfi_uncompressed(int fourcc, u32 sid) +{ + u32 format; + + switch (fourcc) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV12_128: + case V4L2_PIX_FMT_NV12_512: + format = HFI_COLOR_FORMAT_NV12; + break; + case V4L2_PIX_FMT_NV21: + format = HFI_COLOR_FORMAT_NV21; + break; + case V4L2_PIX_FMT_NV12_UBWC: + format = HFI_COLOR_FORMAT_NV12_UBWC; + break; + case V4L2_PIX_FMT_NV12_TP10_UBWC: + format = HFI_COLOR_FORMAT_YUV420_TP10_UBWC; + break; + case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS: + format = HFI_COLOR_FORMAT_P010; + break; + case V4L2_PIX_FMT_RGBA8888_UBWC: + format = HFI_COLOR_FORMAT_RGBA8888_UBWC; + break; + default: + format = HFI_COLOR_FORMAT_NV12_UBWC; + s_vpr_e(sid, "Invalid format, defaulting to UBWC"); + break; + } + + return format; +} +struct msm_vidc_core *get_vidc_core(int core_id) +{ + struct msm_vidc_core *core; + int found = 0; + + if (core_id > MSM_VIDC_CORES_MAX) { + d_vpr_e("Core id = %d is greater than max = %d\n", + core_id, MSM_VIDC_CORES_MAX); + return NULL; + } + mutex_lock(&vidc_driver->lock); + list_for_each_entry(core, &vidc_driver->cores, list) { + if (core->id == core_id) { + found = 1; + break; + } + } + mutex_unlock(&vidc_driver->lock); + if (found) + return core; + return NULL; +} + +const struct msm_vidc_format_desc *msm_comm_get_pixel_fmt_index( + const struct msm_vidc_format_desc fmt[], int size, int index, u32 sid) +{ + int i, k = 0; + + if (!fmt || index < 0) { + s_vpr_e(sid, "Invalid inputs, fmt = %pK, index = %d\n", + fmt, index); + return NULL; + } + for (i = 0; i < size; i++) { + if (k == index) + break; + k++; + } + if (i == size) { + s_vpr_h(sid, "Format not found\n"); + return NULL; + } + return &fmt[i]; +} +struct msm_vidc_format_desc *msm_comm_get_pixel_fmt_fourcc( + struct msm_vidc_format_desc fmt[], int size, int fourcc, u32 sid) +{ + int i; + + if (!fmt) { + s_vpr_e(sid, "Invalid inputs, fmt = %pK\n", fmt); + return NULL; + } + for (i = 0; i < size; i++) { + if (fmt[i].fourcc == fourcc) + break; + } + if (i == size) { + s_vpr_h(sid, "Format not found\n"); + return NULL; + } + return &fmt[i]; +} + +struct msm_vidc_format_constraint *msm_comm_get_pixel_fmt_constraints( + struct msm_vidc_format_constraint fmt[], int size, int fourcc, u32 sid) +{ + int i; + + if (!fmt) { + s_vpr_e(sid, "Invalid inputs, fmt = %pK\n", fmt); + return NULL; + } + for (i = 0; i < size; i++) { + if (fmt[i].fourcc == fourcc) + break; + } + if (i == size) { + s_vpr_h(sid, "Format constraint not found.\n"); + return NULL; + } + return &fmt[i]; +} + +struct buf_queue *msm_comm_get_vb2q( + struct msm_vidc_inst *inst, enum v4l2_buf_type type) +{ + if (type == OUTPUT_MPLANE) + return &inst->bufq[OUTPUT_PORT]; + if (type == INPUT_MPLANE) + return &inst->bufq[INPUT_PORT]; + return NULL; +} + +static void update_capability(struct msm_vidc_codec_capability *in, + struct msm_vidc_capability *capability) +{ + if (!in || !capability) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, in, capability); + return; + } + if (in->capability_type < CAP_MAX) { + capability->cap[in->capability_type].capability_type = + in->capability_type; + capability->cap[in->capability_type].min = in->min; + capability->cap[in->capability_type].max = in->max; + capability->cap[in->capability_type].step_size = in->step_size; + capability->cap[in->capability_type].default_value = + in->default_value; + } else { + d_vpr_e("%s: invalid capability_type %d\n", + __func__, in->capability_type); + } +} + +static int msm_vidc_capabilities(struct msm_vidc_core *core) +{ + int rc = 0; + struct msm_vidc_codec_capability *platform_caps; + int i, j, num_platform_caps; + + if (!core || !core->capabilities) { + d_vpr_e("%s: invalid params %pK\n", __func__, core); + return -EINVAL; + } + platform_caps = core->resources.codec_caps; + num_platform_caps = core->resources.codec_caps_count; + + d_vpr_h("%s: num caps %d\n", __func__, num_platform_caps); + /* loop over each platform capability */ + for (i = 0; i < num_platform_caps; i++) { + /* select matching core codec and update it */ + for (j = 0; j < core->resources.codecs_count; j++) { + if ((platform_caps[i].domains & + core->capabilities[j].domain) && + (platform_caps[i].codecs & + core->capabilities[j].codec)) { + /* update core capability */ + update_capability(&platform_caps[i], + &core->capabilities[j]); + } + } + } + + return rc; +} + +static void handle_sys_init_done(enum hal_command_response cmd, void *data) +{ + struct msm_vidc_cb_cmd_done *response = data; + struct msm_vidc_core *core; + + if (!IS_HAL_SYS_CMD(cmd)) { + d_vpr_e("%s: invalid cmd\n", __func__); + return; + } + if (!response) { + d_vpr_e("Failed to get valid response for sys init\n"); + return; + } + core = get_vidc_core(response->device_id); + if (!core) { + d_vpr_e("Wrong device_id received\n"); + return; + } + d_vpr_l("handled: SYS_INIT_DONE\n"); + complete(&(core->completions[SYS_MSG_INDEX(cmd)])); +} + +static void put_inst_helper(struct kref *kref) +{ + struct msm_vidc_inst *inst = container_of(kref, + struct msm_vidc_inst, kref); + + msm_vidc_destroy(inst); +} + +void put_inst(struct msm_vidc_inst *inst) +{ + if (!inst) + return; + + kref_put(&inst->kref, put_inst_helper); +} + +struct msm_vidc_inst *get_inst(struct msm_vidc_core *core, + void *inst_id) +{ + struct msm_vidc_inst *inst = NULL; + bool matches = false; + + if (!core || !inst_id) + return NULL; + + mutex_lock(&core->lock); + /* + * This is as good as !list_empty(!inst->list), but at this point + * we don't really know if inst was kfree'd via close syscall before + * hardware could respond. So manually walk thru the list of active + * sessions + */ + list_for_each_entry(inst, &core->instances, list) { + if (inst == inst_id) { + /* + * Even if the instance is valid, we really shouldn't + * be receiving or handling callbacks when we've deleted + * our session with HFI + */ + matches = !!inst->session; + break; + } + } + + /* + * kref_* is atomic_int backed, so no need for inst->lock. But we can + * always acquire inst->lock and release it in put_inst for a stronger + * locking system. + */ + inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL; + mutex_unlock(&core->lock); + + return inst; +} + +static void handle_session_release_buf_done(enum hal_command_response cmd, + void *data) +{ + struct msm_vidc_cb_cmd_done *response = data; + struct msm_vidc_inst *inst; + struct internal_buf *buf; + struct list_head *ptr, *next; + struct hal_buffer_info *buffer; + u32 buf_found = false; + u32 address; + + if (!response) { + d_vpr_e("Invalid release_buf_done response\n"); + return; + } + inst = get_inst(get_vidc_core(response->device_id), + response->inst_id); + if (!inst) { + d_vpr_e("Got a response for an inactive session\n"); + return; + } + + buffer = &response->data.buffer_info; + address = buffer->buffer_addr; + + mutex_lock(&inst->scratchbufs.lock); + list_for_each_safe(ptr, next, &inst->scratchbufs.list) { + buf = list_entry(ptr, struct internal_buf, list); + if (address == buf->smem.device_addr) { + s_vpr_h(inst->sid, "releasing scratch: %x\n", + buf->smem.device_addr); + buf_found = true; + } + } + mutex_unlock(&inst->scratchbufs.lock); + + mutex_lock(&inst->persistbufs.lock); + list_for_each_safe(ptr, next, &inst->persistbufs.list) { + buf = list_entry(ptr, struct internal_buf, list); + if (address == buf->smem.device_addr) { + s_vpr_h(inst->sid, "releasing persist: %x\n", + buf->smem.device_addr); + buf_found = true; + } + } + mutex_unlock(&inst->persistbufs.lock); + + if (!buf_found) + s_vpr_e(inst->sid, "invalid buffer received from firmware"); + if (IS_HAL_SESSION_CMD(cmd)) + complete(&inst->completions[SESSION_MSG_INDEX(cmd)]); + else + s_vpr_e(inst->sid, "Invalid inst cmd response: %d\n", cmd); + + s_vpr_l(inst->sid, "handled: SESSION_RELEASE_BUFFER_DONE\n"); + put_inst(inst); +} + +static void handle_sys_release_res_done( + enum hal_command_response cmd, void *data) +{ + struct msm_vidc_cb_cmd_done *response = data; + struct msm_vidc_core *core; + + if (!response) { + d_vpr_e("Failed to get valid response for sys init\n"); + return; + } + core = get_vidc_core(response->device_id); + if (!core) { + d_vpr_e("Wrong device_id received\n"); + return; + } + d_vpr_l("handled: SYS_RELEASE_RESOURCE_DONE\n"); + complete(&core->completions[ + SYS_MSG_INDEX(HAL_SYS_RELEASE_RESOURCE_DONE)]); +} + +void change_inst_state(struct msm_vidc_inst *inst, enum instance_state state) +{ + if (!inst) { + d_vpr_e("Invalid parameter %s\n", __func__); + return; + } + mutex_lock(&inst->lock); + if (inst->state == MSM_VIDC_CORE_INVALID) { + s_vpr_h(inst->sid, + "Inst: %pK is in bad state can't change state to %d\n", + inst, state); + goto exit; + } + s_vpr_h(inst->sid, "Moved inst: %pK from state: %d to state: %d\n", + inst, inst->state, state); + inst->state = state; +exit: + mutex_unlock(&inst->lock); +} + +static int signal_session_msg_receipt(enum hal_command_response cmd, + struct msm_vidc_inst *inst) +{ + if (!inst) { + d_vpr_e("Invalid(%pK) instance id\n", inst); + return -EINVAL; + } + if (IS_HAL_SESSION_CMD(cmd)) { + complete(&inst->completions[SESSION_MSG_INDEX(cmd)]); + } else { + s_vpr_e(inst->sid, "Invalid inst cmd response: %d\n", cmd); + return -EINVAL; + } + return 0; +} + +static int wait_for_sess_signal_receipt(struct msm_vidc_inst *inst, + enum hal_command_response cmd) +{ + int rc = 0; + struct hfi_device *hdev; + + if (!inst) { + d_vpr_e("Invalid(%pK) instance id\n", inst); + return -EINVAL; + } + if (!IS_HAL_SESSION_CMD(cmd)) { + s_vpr_e(inst->sid, "Invalid inst cmd response: %d\n", cmd); + return -EINVAL; + } + hdev = (struct hfi_device *)(inst->core->device); + rc = wait_for_completion_timeout( + &inst->completions[SESSION_MSG_INDEX(cmd)], + msecs_to_jiffies( + inst->core->resources.msm_vidc_hw_rsp_timeout)); + if (!rc) { + s_vpr_e(inst->sid, "Wait interrupted or timed out(sending ping cmd): %d\n", + SESSION_MSG_INDEX(cmd)); + rc = call_hfi_op(hdev, core_ping, hdev->hfi_device_data, inst->sid); + rc = wait_for_completion_timeout( + &inst->core->completions[SYS_MSG_INDEX(HAL_SYS_PING_ACK)], + msecs_to_jiffies( + inst->core->resources.msm_vidc_hw_rsp_timeout)); + if (rc) { + if (try_wait_for_completion(&inst->completions[SESSION_MSG_INDEX(cmd)])) { + s_vpr_e(inst->sid, "Received %d response. Continue session\n", + SESSION_MSG_INDEX(cmd)); + return 0; + } + } + msm_comm_kill_session(inst); + rc = -EIO; + } else { + rc = 0; + } + return rc; +} + +static int wait_for_state(struct msm_vidc_inst *inst, + enum instance_state flipped_state, + enum instance_state desired_state, + enum hal_command_response hal_cmd) +{ + int rc = 0; + + if (!inst) { + d_vpr_e("Invalid parameter %s\n", __func__); + return -EINVAL; + } + if (IS_ALREADY_IN_STATE(flipped_state, desired_state)) { + s_vpr_h(inst->sid, "inst: %pK is already in state: %d\n", + inst, inst->state); + goto err_same_state; + } + s_vpr_h(inst->sid, "Waiting for hal_cmd: %d\n", hal_cmd); + rc = wait_for_sess_signal_receipt(inst, hal_cmd); + if (!rc) + change_inst_state(inst, desired_state); +err_same_state: + return rc; +} + +void msm_vidc_queue_v4l2_event(struct msm_vidc_inst *inst, int event_type) +{ + struct v4l2_event event = {.id = 0, .type = event_type}; + + v4l2_event_queue_fh(&inst->event_handler, &event); +} + +static void msm_comm_generate_max_clients_error(struct msm_vidc_inst *inst) +{ + enum hal_command_response cmd = HAL_SESSION_ERROR; + struct msm_vidc_cb_cmd_done response = {0}; + + if (!inst) { + d_vpr_e("%s: invalid input parameters\n", __func__); + return; + } + s_vpr_e(inst->sid, "%s: Too many clients\n", __func__); + response.inst_id = inst; + response.status = VIDC_ERR_MAX_CLIENTS; + handle_session_error(cmd, (void *)&response); +} + +static void print_cap(u32 sid, const char *type, + struct hal_capability_supported *cap) +{ + s_vpr_h(sid, "%-24s: %-10d %-10d %-10d %-10d\n", + type, cap->min, cap->max, cap->step_size, cap->default_value); +} + +static int msm_vidc_comm_update_ctrl(struct msm_vidc_inst *inst, + u32 id, struct hal_capability_supported *cap) +{ + struct v4l2_ctrl *ctrl = NULL; + int rc = 0; + bool is_menu = false; + + ctrl = v4l2_ctrl_find(&inst->ctrl_handler, id); + if (!ctrl) { + s_vpr_e(inst->sid, + "%s: Conrol id %d not found\n", __func__, id); + return -EINVAL; + } + + if (ctrl->type == V4L2_CTRL_TYPE_MENU) + is_menu = true; + + /** + * For menu controls the step value is interpreted + * as a menu_skip_mask. + */ + rc = v4l2_ctrl_modify_range(ctrl, cap->min, cap->max, + is_menu ? ctrl->menu_skip_mask : cap->step_size, + cap->default_value); + if (rc) { + s_vpr_e(inst->sid, + "%s: failed: control name %s, min %d, max %d, %s %x, default_value %d\n", + __func__, ctrl->name, cap->min, cap->max, + is_menu ? "menu_skip_mask" : "step", + is_menu ? ctrl->menu_skip_mask : cap->step_size, + cap->default_value); + goto error; + } + + s_vpr_h(inst->sid, + "Updated control: %s: min %lld, max %lld, %s %x, default value = %lld\n", + ctrl->name, ctrl->minimum, ctrl->maximum, + is_menu ? "menu_skip_mask" : "step", + is_menu ? ctrl->menu_skip_mask : ctrl->step, + ctrl->default_value); + +error: + return rc; +} + +static void msm_vidc_comm_update_ctrl_limits(struct msm_vidc_inst *inst) +{ + if (inst->session_type == MSM_VIDC_ENCODER) { + msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE, + &inst->capability.cap[CAP_BITRATE]); + msm_vidc_comm_update_ctrl(inst, + V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT, + &inst->capability.cap[CAP_LTR_COUNT]); + msm_vidc_comm_update_ctrl(inst, + V4L2_CID_MPEG_VIDEO_B_FRAMES, + &inst->capability.cap[CAP_BFRAME]); + } + msm_vidc_comm_update_ctrl(inst, + V4L2_CID_MPEG_VIDEO_H264_LEVEL, + &inst->capability.cap[CAP_H264_LEVEL]); + msm_vidc_comm_update_ctrl(inst, + V4L2_CID_MPEG_VIDEO_HEVC_LEVEL, + &inst->capability.cap[CAP_HEVC_LEVEL]); + /* + * Default value of level is unknown, but since we are not + * using unknown value while updating level controls, we need + * to reinitialize inst->level to HFI unknown value. + */ + inst->level = HFI_LEVEL_UNKNOWN; +} + +static void handle_session_init_done(enum hal_command_response cmd, void *data) +{ + struct msm_vidc_cb_cmd_done *response = data; + struct msm_vidc_inst *inst = NULL; + + if (!response) { + d_vpr_e("Failed to get valid response for session init\n"); + return; + } + + inst = get_inst(get_vidc_core(response->device_id), + response->inst_id); + if (!inst) { + d_vpr_e("Got a response for an inactive session\n"); + return; + } + + if (response->status) { + s_vpr_e(inst->sid, "Session init response from FW: %#x\n", + response->status); + goto error; + } + + s_vpr_l(inst->sid, "handled: SESSION_INIT_DONE\n"); + signal_session_msg_receipt(cmd, inst); + put_inst(inst); + return; + +error: + if (response->status == VIDC_ERR_MAX_CLIENTS) + msm_comm_generate_max_clients_error(inst); + else + msm_comm_generate_session_error(inst); + + signal_session_msg_receipt(cmd, inst); + put_inst(inst); +} + +static int msm_comm_update_capabilities(struct msm_vidc_inst *inst) +{ + struct msm_vidc_core *core; + struct msm_vidc_capability *capability = NULL; + u32 i, codec; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + core = inst->core; + codec = get_v4l2_codec(inst); + + for (i = 0; i < core->resources.codecs_count; i++) { + if (core->capabilities[i].codec == + get_hal_codec(codec, inst->sid) && + core->capabilities[i].domain == + get_hal_domain(inst->session_type, inst->sid)) { + capability = &core->capabilities[i]; + break; + } + } + if (!capability) { + s_vpr_e(inst->sid, + "%s: capabilities not found for domain %#x codec %#x\n", + __func__, get_hal_domain(inst->session_type, inst->sid), + get_hal_codec(codec, inst->sid)); + return -EINVAL; + } + + s_vpr_h(inst->sid, "%s: capabilities for domain %#x codec %#x\n", + __func__, capability->domain, capability->codec); + memcpy(&inst->capability, capability, + sizeof(struct msm_vidc_capability)); + + s_vpr_h(inst->sid, + "Capability type : min max step_size default_value\n"); + print_cap(inst->sid, "width", &inst->capability.cap[CAP_FRAME_WIDTH]); + print_cap(inst->sid, "height", &inst->capability.cap[CAP_FRAME_HEIGHT]); + print_cap(inst->sid, "mbs_per_frame", + &inst->capability.cap[CAP_MBS_PER_FRAME]); + print_cap(inst->sid, "mbs_per_sec", + &inst->capability.cap[CAP_MBS_PER_SECOND]); + print_cap(inst->sid, "frame_rate", + &inst->capability.cap[CAP_FRAMERATE]); + print_cap(inst->sid, "bitrate", &inst->capability.cap[CAP_BITRATE]); + print_cap(inst->sid, "scale_x", &inst->capability.cap[CAP_SCALE_X]); + print_cap(inst->sid, "scale_y", &inst->capability.cap[CAP_SCALE_Y]); + print_cap(inst->sid, "hier_p", + &inst->capability.cap[CAP_HIER_P_NUM_ENH_LAYERS]); + print_cap(inst->sid, "ltr_count", &inst->capability.cap[CAP_LTR_COUNT]); + print_cap(inst->sid, "bframe", &inst->capability.cap[CAP_BFRAME]); + print_cap(inst->sid, "mbs_per_sec_low_power", + &inst->capability.cap[CAP_MBS_PER_SECOND_POWER_SAVE]); + print_cap(inst->sid, "i_qp", &inst->capability.cap[CAP_I_FRAME_QP]); + print_cap(inst->sid, "p_qp", &inst->capability.cap[CAP_P_FRAME_QP]); + print_cap(inst->sid, "b_qp", &inst->capability.cap[CAP_B_FRAME_QP]); + print_cap(inst->sid, "slice_bytes", + &inst->capability.cap[CAP_SLICE_BYTE]); + print_cap(inst->sid, "slice_mbs", &inst->capability.cap[CAP_SLICE_MB]); + print_cap(inst->sid, "max_videocores", + &inst->capability.cap[CAP_MAX_VIDEOCORES]); + /* Secure usecase specific */ + print_cap(inst->sid, "secure_width", + &inst->capability.cap[CAP_SECURE_FRAME_WIDTH]); + print_cap(inst->sid, "secure_height", + &inst->capability.cap[CAP_SECURE_FRAME_HEIGHT]); + print_cap(inst->sid, "secure_mbs_per_frame", + &inst->capability.cap[CAP_SECURE_MBS_PER_FRAME]); + print_cap(inst->sid, "secure_bitrate", + &inst->capability.cap[CAP_SECURE_BITRATE]); + /* Batch Mode Decode */ + print_cap(inst->sid, "batch_mbs_per_frame", + &inst->capability.cap[CAP_BATCH_MAX_MB_PER_FRAME]); + print_cap(inst->sid, "batch_frame_rate", + &inst->capability.cap[CAP_BATCH_MAX_FPS]); + /* Lossless encoding usecase specific */ + print_cap(inst->sid, "lossless_width", + &inst->capability.cap[CAP_LOSSLESS_FRAME_WIDTH]); + print_cap(inst->sid, "lossless_height", + &inst->capability.cap[CAP_LOSSLESS_FRAME_HEIGHT]); + print_cap(inst->sid, "lossless_mbs_per_frame", + &inst->capability.cap[CAP_LOSSLESS_MBS_PER_FRAME]); + /* All intra encoding usecase specific */ + print_cap(inst->sid, "all_intra_frame_rate", + &inst->capability.cap[CAP_ALLINTRA_MAX_FPS]); + + msm_vidc_comm_update_ctrl_limits(inst); + + return 0; +} + +static void msm_vidc_queue_rbr_event(struct msm_vidc_inst *inst, + int fd, u32 offset) +{ + struct v4l2_event buf_event = {0}; + u32 *ptr; + + buf_event.type = V4L2_EVENT_RELEASE_BUFFER_REFERENCE; + ptr = (u32 *)buf_event.u.data; + ptr[0] = fd; + ptr[1] = offset; + + v4l2_event_queue_fh(&inst->event_handler, &buf_event); +} + +static void handle_event_change_insufficient(struct msm_vidc_inst *inst, + struct msm_vidc_format *fmt, + struct msm_vidc_cb_event *event_notify, + u32 codec) +{ + int extra_buff_count = 0; + + s_vpr_h(inst->sid, + "seq: V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT\n"); + + /* decide batching as configuration changed */ + inst->batch.enable = is_batching_allowed(inst); + s_vpr_hp(inst->sid, "seq : batching %s\n", + inst->batch.enable ? "enabled" : "disabled"); + msm_dcvs_try_enable(inst); + extra_buff_count = msm_vidc_get_extra_buff_count(inst, + HAL_BUFFER_OUTPUT); + fmt->count_min = event_notify->fw_min_cnt; + + if (is_vpp_delay_allowed(inst)) { + fmt->count_min = + max(fmt->count_min, (u32)MAX_BSE_VPP_DELAY); + fmt->count_min = + max(fmt->count_min, + (u32)(msm_vidc_vpp_delay & 0x1F)); + } + + fmt->count_min_host = fmt->count_min + extra_buff_count; + s_vpr_h(inst->sid, + "seq: hal buffer[%d] count: min %d min_host %d\n", + HAL_BUFFER_OUTPUT, fmt->count_min, + fmt->count_min_host); +} + +static void handle_event_change(enum hal_command_response cmd, void *data) +{ + struct msm_vidc_inst *inst = NULL; + struct msm_vidc_cb_event *event_notify = data; + int event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT; + struct v4l2_event seq_changed_event = {0}; + int rc = 0; + struct hfi_device *hdev; + u32 *ptr = NULL; + struct msm_vidc_format *fmt; + u32 codec; + + if (!event_notify) { + d_vpr_e("Got an empty event from hfi\n"); + return; + } + + inst = get_inst(get_vidc_core(event_notify->device_id), + event_notify->inst_id); + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("Got a response for an inactive session\n"); + goto err_bad_event; + } + hdev = inst->core->device; + codec = get_v4l2_codec(inst); + + switch (event_notify->hal_event_type) { + case HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES: + { + /* + * Check if there is some parameter has changed + * If there is no change then no need to notify client + * If there is a change, then raise an insufficient event + */ + bool event_fields_changed = false; + + s_vpr_h(inst->sid, "seq: V4L2_EVENT_SEQ_CHANGED_SUFFICIENT\n"); + s_vpr_h(inst->sid, + "seq: event_notify->height = %d event_notify->width = %d\n", + event_notify->height, event_notify->width); + if (codec == V4L2_PIX_FMT_HEVC || codec == V4L2_PIX_FMT_VP9) + event_fields_changed |= (inst->bit_depth != + event_notify->bit_depth); + /* Check for change from hdr->non-hdr and vice versa */ + if (codec == V4L2_PIX_FMT_HEVC && + ((event_notify->colour_space == MSM_VIDC_BT2020 && + inst->colour_space != MSM_VIDC_BT2020) || + (event_notify->colour_space != MSM_VIDC_BT2020 && + inst->colour_space == MSM_VIDC_BT2020))) + event_fields_changed = true; + + /* + * Check for a change from progressive to interlace + * and vice versa + */ + if ((event_notify->pic_struct == MSM_VIDC_PIC_STRUCT_MAYBE_INTERLACED && + inst->pic_struct == MSM_VIDC_PIC_STRUCT_PROGRESSIVE) || + (event_notify->pic_struct == MSM_VIDC_PIC_STRUCT_PROGRESSIVE && + inst->pic_struct == MSM_VIDC_PIC_STRUCT_MAYBE_INTERLACED)) + event_fields_changed = true; + + fmt = &inst->fmts[OUTPUT_PORT]; + event_fields_changed |= + (fmt->v4l2_fmt.fmt.pix_mp.height != + event_notify->height); + event_fields_changed |= + (fmt->v4l2_fmt.fmt.pix_mp.width != event_notify->width); + + if (event_fields_changed) { + event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT; + } else { + inst->entropy_mode = event_notify->entropy_mode; + + /* configure work mode considering low latency*/ + if (is_low_latency_hint(inst)) { + rc = call_core_op(inst->core, decide_work_mode, + inst); + if (rc) + s_vpr_e(inst->sid, + "%s: Failed to decide work mode\n", + __func__); + } + + fmt->count_min = event_notify->fw_min_cnt; + msm_dcvs_reset(inst); + + s_vpr_h(inst->sid, + "seq: No parameter change continue session\n"); + rc = call_hfi_op(hdev, session_continue, + (void *)inst->session); + if (rc) { + s_vpr_e(inst->sid, + "failed to send session_continue\n"); + } + goto err_bad_event; + } + break; + } + case HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES: + event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT; + break; + case HAL_EVENT_RELEASE_BUFFER_REFERENCE: + { + struct msm_vidc_buffer *mbuf; + u32 planes[VIDEO_MAX_PLANES] = {0}; + + s_vpr_l(inst->sid, + "rbr: data_buffer: %x extradata_buffer: %x\n", + event_notify->packet_buffer, + event_notify->extra_data_buffer); + + planes[0] = event_notify->packet_buffer; + planes[1] = event_notify->extra_data_buffer; + mbuf = msm_comm_get_buffer_using_device_planes(inst, + OUTPUT_MPLANE, planes); + if (!mbuf || !kref_get_mbuf(inst, mbuf)) { + s_vpr_e(inst->sid, + "%s: data_addr %x, extradata_addr %x not found\n", + __func__, planes[0], planes[1]); + } else { + handle_release_buffer_reference(inst, mbuf); + kref_put_mbuf(mbuf); + } + goto err_bad_event; + } + default: + break; + } + + /* Bit depth and pic struct changed event are combined into a single + * event (insufficient event) for the userspace. Currently bitdepth + * changes is only for HEVC and interlaced support is for all + * codecs except HEVC + * event data is now as follows: + * u32 *ptr = seq_changed_event.u.data; + * ptr[MSM_VIDC_HEIGHT] = height + * ptr[MSM_VIDC_WIDTH] = width + * ptr[MSM_VIDC_BIT_DEPTH] = bit depth + * ptr[MSM_VIDC_PIC_STRUCT] = pic struct (progressive or interlaced) + * ptr[MSM_VIDC_COLOR_SPACE] = colour space + * ptr[MSM_VIDC_FW_MIN_COUNT] = fw min count + */ + + inst->profile = event_notify->profile; + inst->level = event_notify->level; + inst->entropy_mode = event_notify->entropy_mode; + /* HW returns progressive_only flag in pic_struct. */ + inst->pic_struct = + event_notify->pic_struct ? + MSM_VIDC_PIC_STRUCT_PROGRESSIVE : + MSM_VIDC_PIC_STRUCT_MAYBE_INTERLACED; + inst->colour_space = event_notify->colour_space; + + ptr = (u32 *)seq_changed_event.u.data; + ptr[MSM_VIDC_HEIGHT] = event_notify->height; + ptr[MSM_VIDC_WIDTH] = event_notify->width; + ptr[MSM_VIDC_BIT_DEPTH] = event_notify->bit_depth; + ptr[MSM_VIDC_PIC_STRUCT] = event_notify->pic_struct; + ptr[MSM_VIDC_COLOR_SPACE] = event_notify->colour_space; + ptr[MSM_VIDC_FW_MIN_COUNT] = event_notify->fw_min_cnt; + + s_vpr_h(inst->sid, "seq: height = %u width = %u\n", + event_notify->height, event_notify->width); + + s_vpr_h(inst->sid, + "seq: bit_depth = %u pic_struct = %u colour_space = %u\n", + event_notify->bit_depth, event_notify->pic_struct, + event_notify->colour_space); + + s_vpr_h(inst->sid, "seq: fw_min_count = %u\n", + event_notify->fw_min_cnt); + + mutex_lock(&inst->lock); + inst->in_reconfig = true; + fmt = &inst->fmts[INPUT_PORT]; + fmt->v4l2_fmt.fmt.pix_mp.height = event_notify->height; + fmt->v4l2_fmt.fmt.pix_mp.width = event_notify->width; + inst->bit_depth = event_notify->bit_depth; + + fmt = &inst->fmts[OUTPUT_PORT]; + fmt->v4l2_fmt.fmt.pix_mp.height = event_notify->height; + fmt->v4l2_fmt.fmt.pix_mp.width = event_notify->width; + mutex_unlock(&inst->lock); + + if (event == V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT) + handle_event_change_insufficient(inst, fmt, + event_notify, codec); + + rc = msm_vidc_check_session_supported(inst); + if (!rc) { + seq_changed_event.type = event; + v4l2_event_queue_fh(&inst->event_handler, &seq_changed_event); + } else if (rc == -ENOTSUPP) { + msm_vidc_queue_v4l2_event(inst, + V4L2_EVENT_MSM_VIDC_HW_UNSUPPORTED); + } else if (rc == -ENOMEM) { + msm_vidc_queue_v4l2_event(inst, + V4L2_EVENT_MSM_VIDC_HW_OVERLOAD); + } + s_vpr_l(inst->sid, "handled: SESSION_EVENT_CHANGE\n"); + +err_bad_event: + put_inst(inst); +} + +static void handle_session_prop_info(enum hal_command_response cmd, void *data) +{ + struct msm_vidc_cb_cmd_done *response = data; + struct getprop_buf *getprop; + struct msm_vidc_inst *inst; + + if (!response) { + d_vpr_e("Failed to get valid response for prop info\n"); + return; + } + + inst = get_inst(get_vidc_core(response->device_id), + response->inst_id); + if (!inst) { + d_vpr_e("Got a response for an inactive session\n"); + return; + } + + getprop = kzalloc(sizeof(*getprop), GFP_KERNEL); + if (!getprop) { + s_vpr_e(inst->sid, "%s: getprop kzalloc failed\n", __func__); + goto err_prop_info; + } + + getprop->data = kmemdup((void *) (&response->data.property), + sizeof(union hal_get_property), GFP_KERNEL); + if (!getprop->data) { + s_vpr_e(inst->sid, "%s: kmemdup failed\n", __func__); + kfree(getprop); + goto err_prop_info; + } + + mutex_lock(&inst->pending_getpropq.lock); + list_add_tail(&getprop->list, &inst->pending_getpropq.list); + mutex_unlock(&inst->pending_getpropq.lock); + s_vpr_l(inst->sid, "handled: SESSION_PROPERTY_INFO\n"); + signal_session_msg_receipt(cmd, inst); + +err_prop_info: + put_inst(inst); +} + +static void handle_load_resource_done(enum hal_command_response cmd, void *data) +{ + struct msm_vidc_cb_cmd_done *response = data; + struct msm_vidc_inst *inst; + + if (!response) { + d_vpr_e("Failed to get valid response for load resource\n"); + return; + } + + inst = get_inst(get_vidc_core(response->device_id), + response->inst_id); + if (!inst) { + d_vpr_e("Got a response for an inactive session\n"); + return; + } + + if (response->status) { + s_vpr_e(inst->sid, "Load resource response from FW : %#x\n", + response->status); + msm_comm_generate_session_error(inst); + } + + s_vpr_l(inst->sid, "handled: SESSION_LOAD_RESOURCE_DONE\n"); + put_inst(inst); +} + +static void handle_start_done(enum hal_command_response cmd, void *data) +{ + struct msm_vidc_cb_cmd_done *response = data; + struct msm_vidc_inst *inst; + + if (!response) { + d_vpr_e("Failed to get valid response for start\n"); + return; + } + + inst = get_inst(get_vidc_core(response->device_id), + response->inst_id); + if (!inst) { + d_vpr_e("Got a response for an inactive session\n"); + return; + } + s_vpr_l(inst->sid, "handled: SESSION_START_DONE\n"); + + signal_session_msg_receipt(cmd, inst); + put_inst(inst); +} + +static void handle_stop_done(enum hal_command_response cmd, void *data) +{ + struct msm_vidc_cb_cmd_done *response = data; + struct msm_vidc_inst *inst; + + if (!response) { + d_vpr_e("Failed to get valid response for stop\n"); + return; + } + + inst = get_inst(get_vidc_core(response->device_id), + response->inst_id); + if (!inst) { + d_vpr_e("Got a response for an inactive session\n"); + return; + } + + s_vpr_l(inst->sid, "handled: SESSION_STOP_DONE\n"); + signal_session_msg_receipt(cmd, inst); + put_inst(inst); +} + +static void handle_ping_done(enum hal_command_response cmd, void *data) +{ + struct msm_vidc_cb_cmd_done *response = data; + struct msm_vidc_inst *inst; + + if (!response) { + d_vpr_e("Failed to get valid response for stop\n"); + return; + } + + inst = get_inst(get_vidc_core(response->device_id), + response->inst_id); + if (!inst) { + d_vpr_e("Got a response for an inactive session\n"); + return; + } + + s_vpr_l(inst->sid, "handled: SYS_PING_DONE\n"); + complete(&inst->core->completions[SYS_MSG_INDEX(HAL_SYS_PING_ACK)]); + put_inst(inst); +} + +static void handle_release_res_done(enum hal_command_response cmd, void *data) +{ + struct msm_vidc_cb_cmd_done *response = data; + struct msm_vidc_inst *inst; + + if (!response) { + d_vpr_e("Failed to get valid response for release resource\n"); + return; + } + + inst = get_inst(get_vidc_core(response->device_id), + response->inst_id); + if (!inst) { + d_vpr_e("Got a response for an inactive session\n"); + return; + } + + s_vpr_l(inst->sid, "handled: SESSION_RELEASE_RESOURCE_DONE\n"); + signal_session_msg_receipt(cmd, inst); + put_inst(inst); +} + +void msm_comm_validate_output_buffers(struct msm_vidc_inst *inst) +{ + struct internal_buf *binfo; + u32 buffers_owned_by_driver = 0; + struct hal_buffer_requirements *dpb = NULL; + u32 i; + + for (i = 0; i < HAL_BUFFER_MAX; i++) { + if (inst->buff_req.buffer[i].buffer_type == HAL_BUFFER_OUTPUT) { + dpb = &inst->buff_req.buffer[i]; + break; + } + } + if (!dpb) { + s_vpr_e(inst->sid, "Couldn't retrieve dpb buf req\n"); + return; + } + + mutex_lock(&inst->outputbufs.lock); + if (list_empty(&inst->outputbufs.list)) { + s_vpr_h(inst->sid, "%s: no OUTPUT buffers allocated\n", + __func__); + mutex_unlock(&inst->outputbufs.lock); + return; + } + list_for_each_entry(binfo, &inst->outputbufs.list, list) { + if (binfo->buffer_ownership != DRIVER) { + s_vpr_h(inst->sid, "This buffer is with FW %x\n", + binfo->smem.device_addr); + continue; + } + buffers_owned_by_driver++; + } + mutex_unlock(&inst->outputbufs.lock); + + if (buffers_owned_by_driver != dpb->buffer_count_actual) { + s_vpr_e(inst->sid, "DPB buffer count mismatch %d of %d\n", + buffers_owned_by_driver, + dpb->buffer_count_actual); + msm_vidc_handle_hw_error(inst->core); + } +} + +int msm_comm_queue_dpb_only_buffers(struct msm_vidc_inst *inst) +{ + struct internal_buf *binfo, *extra_info; + struct hfi_device *hdev; + struct vidc_frame_data frame_data = {0}; + int rc = 0; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + hdev = inst->core->device; + + extra_info = inst->dpb_extra_binfo; + mutex_lock(&inst->outputbufs.lock); + list_for_each_entry(binfo, &inst->outputbufs.list, list) { + if (binfo->buffer_ownership != DRIVER) + continue; + if (binfo->mark_remove) + continue; + frame_data.alloc_len = binfo->smem.size; + frame_data.filled_len = 0; + frame_data.offset = 0; + frame_data.device_addr = binfo->smem.device_addr; + frame_data.flags = 0; + frame_data.extradata_addr = + extra_info ? extra_info->smem.device_addr : 0; + frame_data.buffer_type = HAL_BUFFER_OUTPUT; + frame_data.extradata_size = + extra_info ? extra_info->smem.size : 0; + rc = call_hfi_op(hdev, session_ftb, + (void *) inst->session, &frame_data); + binfo->buffer_ownership = FIRMWARE; + } + mutex_unlock(&inst->outputbufs.lock); + + return rc; +} + +static void handle_session_flush(enum hal_command_response cmd, void *data) +{ + struct msm_vidc_cb_cmd_done *response = data; + struct msm_vidc_inst *inst; + struct v4l2_event flush_event = {0}; + u32 *ptr = NULL; + enum hal_flush flush_type; + int rc; + + if (!response) { + d_vpr_e("Failed to get valid response for flush\n"); + return; + } + + inst = get_inst(get_vidc_core(response->device_id), + response->inst_id); + if (!inst) { + d_vpr_e("Got a response for an inactive session\n"); + return; + } + + if (response->data.flush_type & HAL_FLUSH_INPUT) + mutex_lock(&inst->bufq[INPUT_PORT].lock); + if (response->data.flush_type & HAL_FLUSH_OUTPUT) + mutex_lock(&inst->bufq[OUTPUT_PORT].lock); + if (msm_comm_get_stream_output_mode(inst) == + HAL_VIDEO_DECODER_SECONDARY) { + + if (!(get_v4l2_codec(inst) == V4L2_PIX_FMT_VP9 && + inst->in_reconfig)) + msm_comm_validate_output_buffers(inst); + + if (!inst->in_reconfig) { + rc = msm_comm_queue_dpb_only_buffers(inst); + if (rc) { + s_vpr_e(inst->sid, + "Failed to queue output buffers\n"); + } + } + } + flush_event.type = V4L2_EVENT_MSM_VIDC_FLUSH_DONE; + ptr = (u32 *)flush_event.u.data; + + flush_type = response->data.flush_type; + switch (flush_type) { + case HAL_FLUSH_INPUT: + inst->in_flush = false; + ptr[0] = V4L2_CMD_FLUSH_OUTPUT; + break; + case HAL_FLUSH_OUTPUT: + inst->out_flush = false; + ptr[0] = V4L2_CMD_FLUSH_CAPTURE; + break; + case HAL_FLUSH_ALL: + inst->in_flush = false; + inst->out_flush = false; + ptr[0] |= V4L2_CMD_FLUSH_CAPTURE; + ptr[0] |= V4L2_CMD_FLUSH_OUTPUT; + break; + default: + s_vpr_e(inst->sid, "Invalid flush type received!"); + goto exit; + } + + if (flush_type == HAL_FLUSH_ALL) { + msm_comm_clear_window_data(inst); + inst->clk_data.buffer_counter = 0; + } + + s_vpr_h(inst->sid, + "Notify flush complete, flush_type: %x\n", flush_type); + v4l2_event_queue_fh(&inst->event_handler, &flush_event); + +exit: + if (response->data.flush_type & HAL_FLUSH_OUTPUT) + mutex_unlock(&inst->bufq[OUTPUT_PORT].lock); + if (response->data.flush_type & HAL_FLUSH_INPUT) + mutex_unlock(&inst->bufq[INPUT_PORT].lock); + s_vpr_l(inst->sid, "handled: SESSION_FLUSH_DONE\n"); + put_inst(inst); +} + +static void handle_session_error(enum hal_command_response cmd, void *data) +{ + struct msm_vidc_cb_cmd_done *response = data; + struct hfi_device *hdev = NULL; + struct msm_vidc_inst *inst = NULL; + int event = V4L2_EVENT_MSM_VIDC_SYS_ERROR; + + if (!response) { + d_vpr_e("Failed to get valid response for session error\n"); + return; + } + + inst = get_inst(get_vidc_core(response->device_id), + response->inst_id); + if (!inst) { + d_vpr_e("Got a response for an inactive session\n"); + return; + } + + hdev = inst->core->device; + s_vpr_e(inst->sid, "Session error received for inst %pK\n", inst); + + if (response->status == VIDC_ERR_MAX_CLIENTS) { + s_vpr_e(inst->sid, "Too many clients, rejecting %pK", inst); + event = V4L2_EVENT_MSM_VIDC_MAX_CLIENTS; + + /* + * Clean the HFI session now. Since inst->state is moved to + * INVALID, forward thread doesn't know FW has valid session + * or not. This is the last place driver knows that there is + * no session in FW. Hence clean HFI session now. + */ + + msm_comm_session_clean(inst); + } else if (response->status == VIDC_ERR_NOT_SUPPORTED) { + s_vpr_e(inst->sid, "Unsupported bitstream in %pK", inst); + event = V4L2_EVENT_MSM_VIDC_HW_UNSUPPORTED; + } else { + s_vpr_e(inst->sid, "Unknown session error (%d) for %pK\n", + response->status, inst); + event = V4L2_EVENT_MSM_VIDC_SYS_ERROR; + } + + /* change state before sending error to client */ + change_inst_state(inst, MSM_VIDC_CORE_INVALID); + msm_vidc_queue_v4l2_event(inst, event); + s_vpr_l(inst->sid, "handled: SESSION_ERROR\n"); + put_inst(inst); +} + +static void msm_comm_clean_notify_client(struct msm_vidc_core *core) +{ + struct msm_vidc_inst *inst = NULL; + + if (!core) { + d_vpr_e("%s: invalid params\n", __func__); + return; + } + + d_vpr_e("%s: Core %pK\n", __func__, core); + mutex_lock(&core->lock); + + list_for_each_entry(inst, &core->instances, list) { + mutex_lock(&inst->lock); + inst->state = MSM_VIDC_CORE_INVALID; + mutex_unlock(&inst->lock); + s_vpr_e(inst->sid, + "%s: Send sys error for inst %pK\n", __func__, inst); + msm_vidc_queue_v4l2_event(inst, + V4L2_EVENT_MSM_VIDC_SYS_ERROR); + } + mutex_unlock(&core->lock); +} + +static void handle_sys_error(enum hal_command_response cmd, void *data) +{ + struct msm_vidc_cb_cmd_done *response = data; + struct msm_vidc_core *core = NULL; + struct hfi_device *hdev = NULL; + struct msm_vidc_inst *inst = NULL; + int rc = 0; + bool panic = false; + + subsystem_crashed("venus"); + if (!response) { + d_vpr_e("Failed to get valid response for sys error\n"); + return; + } + + core = get_vidc_core(response->device_id); + if (!core) { + d_vpr_e("Got SYS_ERR but unable to identify core\n"); + return; + } + hdev = core->device; + + mutex_lock(&core->lock); + if (core->state == VIDC_CORE_UNINIT) { + d_vpr_e("%s: Core %pK already moved to state %d\n", + __func__, core, core->state); + mutex_unlock(&core->lock); + return; + } + + d_vpr_e("SYS_ERROR received for core %pK\n", core); + msm_vidc_noc_error_info(core); + call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data); + list_for_each_entry(inst, &core->instances, list) { + s_vpr_e(inst->sid, + "%s: Send sys error for inst %pK\n", __func__, inst); + change_inst_state(inst, MSM_VIDC_CORE_INVALID); + msm_vidc_queue_v4l2_event(inst, V4L2_EVENT_MSM_VIDC_SYS_ERROR); + if (!core->trigger_ssr) + msm_comm_print_inst_info(inst); + } + + /* handle the hw error before core released to get full debug info */ + if (response->status == VIDC_ERR_NOC_ERROR) + panic = !!(msm_vidc_err_recovery_disable & VIDC_DISABLE_NOC_ERR_RECOV); + else + panic = !!(msm_vidc_err_recovery_disable & VIDC_DISABLE_NON_NOC_ERR_RECOV); + if (panic) { + d_vpr_e("Got unrecoverable video fw error"); + MSM_VIDC_ERROR(true); + } + + d_vpr_e("Calling core_release\n"); + rc = call_hfi_op(hdev, core_release, hdev->hfi_device_data); + if (rc) { + d_vpr_e("core_release failed\n"); + mutex_unlock(&core->lock); + return; + } + core->state = VIDC_CORE_UNINIT; + mutex_unlock(&core->lock); + + d_vpr_l("handled: SYS_ERROR\n"); +} + +void msm_comm_session_clean(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev = NULL; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return; + } + if (!inst->session) { + s_vpr_h(inst->sid, "%s: inst %pK session already cleaned\n", + __func__, inst); + return; + } + + hdev = inst->core->device; + mutex_lock(&inst->lock); + s_vpr_h(inst->sid, "%s: inst %pK\n", __func__, inst); + rc = call_hfi_op(hdev, session_clean, + (void *)inst->session); + if (rc) { + s_vpr_e(inst->sid, "Session clean failed :%pK\n", inst); + } + inst->session = NULL; + mutex_unlock(&inst->lock); +} + +static void handle_session_close(enum hal_command_response cmd, void *data) +{ + struct msm_vidc_cb_cmd_done *response = data; + struct msm_vidc_inst *inst; + + if (!response) { + d_vpr_e("Failed to get valid response for session close\n"); + return; + } + + inst = get_inst(get_vidc_core(response->device_id), + response->inst_id); + if (!inst) { + d_vpr_e("Got a response for an inactive session\n"); + return; + } + + s_vpr_l(inst->sid, "handled: SESSION_END_DONE\n"); + signal_session_msg_receipt(cmd, inst); + show_stats(inst); + put_inst(inst); +} + +struct vb2_buffer *msm_comm_get_vb_using_vidc_buffer( + struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf) +{ + u32 port = 0; + struct vb2_buffer *vb = NULL; + struct vb2_queue *q = NULL; + bool found = false; + + if (mbuf->vvb.vb2_buf.type == OUTPUT_MPLANE) { + port = OUTPUT_PORT; + } else if (mbuf->vvb.vb2_buf.type == INPUT_MPLANE) { + port = INPUT_PORT; + } else { + s_vpr_e(inst->sid, "%s: invalid type %d\n", + __func__, mbuf->vvb.vb2_buf.type); + return NULL; + } + + WARN_ON(!mutex_is_locked(&inst->bufq[port].lock)); + found = false; + q = &inst->bufq[port].vb2_bufq; + if (!q->streaming) { + s_vpr_e(inst->sid, "port %d is not streaming", port); + goto unlock; + } + list_for_each_entry(vb, &q->queued_list, queued_entry) { + if (vb->state != VB2_BUF_STATE_ACTIVE) + continue; + if (msm_comm_compare_vb2_planes(inst, mbuf, vb)) { + found = true; + break; + } + } +unlock: + if (!found) { + print_vidc_buffer(VIDC_ERR, "vb2 not found for", inst, mbuf); + return NULL; + } + + return vb; +} + +int msm_comm_vb2_buffer_done(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + struct vb2_buffer *vb2; + struct vb2_v4l2_buffer *vbuf; + u32 i, port; + int rc = 0; + + if (!inst || !mbuf) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, inst, mbuf); + return -EINVAL; + } + + if (mbuf->vvb.vb2_buf.type == OUTPUT_MPLANE) + port = OUTPUT_PORT; + else if (mbuf->vvb.vb2_buf.type == INPUT_MPLANE) + port = INPUT_PORT; + else + return -EINVAL; + + /* + * access vb2 buffer under q->lock and if streaming only to + * ensure the buffer was not free'd by vb2 framework while + * we are accessing it here. + */ + mutex_lock(&inst->bufq[port].lock); + vb2 = msm_comm_get_vb_using_vidc_buffer(inst, mbuf); + if (!vb2) { + s_vpr_e(inst->sid, "%s: port %d buffer not found\n", + __func__, port); + rc = -EINVAL; + goto unlock; + } + if (inst->bufq[port].vb2_bufq.streaming) { + vbuf = to_vb2_v4l2_buffer(vb2); + vbuf->flags = mbuf->vvb.flags; + vb2->timestamp = mbuf->vvb.vb2_buf.timestamp; + for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) { + vb2->planes[i].bytesused = + mbuf->vvb.vb2_buf.planes[i].bytesused; + vb2->planes[i].data_offset = + mbuf->vvb.vb2_buf.planes[i].data_offset; + } + vb2_buffer_done(vb2, VB2_BUF_STATE_DONE); + } else { + s_vpr_e(inst->sid, "%s: port %d is not streaming\n", + __func__, port); + } +unlock: + mutex_unlock(&inst->bufq[port].lock); + + return rc; +} + +static bool is_eos_buffer(struct msm_vidc_inst *inst, u32 device_addr) +{ + struct eos_buf *temp, *next; + bool found = false; + + mutex_lock(&inst->eosbufs.lock); + list_for_each_entry_safe(temp, next, &inst->eosbufs.list, list) { + if (temp->smem.device_addr == device_addr) { + found = true; + temp->is_queued = 0; + list_del(&temp->list); + msm_comm_smem_free(inst, &temp->smem); + kfree(temp); + break; + } + } + mutex_unlock(&inst->eosbufs.lock); + + return found; +} + +static void handle_ebd(enum hal_command_response cmd, void *data) +{ + struct msm_vidc_cb_data_done *response = data; + struct msm_vidc_buffer *mbuf; + struct vb2_buffer *vb; + struct msm_vidc_inst *inst; + struct vidc_hal_ebd *empty_buf_done; + u32 planes[VIDEO_MAX_PLANES] = {0}; + struct v4l2_format *f; + struct v4l2_ctrl *ctrl; + + if (!response) { + d_vpr_e("Invalid response from vidc_hal\n"); + return; + } + inst = get_inst(get_vidc_core(response->device_id), + response->inst_id); + if (!inst) { + d_vpr_e("Got a response for an inactive session\n"); + return; + } + + empty_buf_done = (struct vidc_hal_ebd *)&response->input_done; + /* If this is internal EOS buffer, handle it in driver */ + if (is_eos_buffer(inst, empty_buf_done->packet_buffer)) { + s_vpr_h(inst->sid, "Received EOS buffer 0x%x\n", + empty_buf_done->packet_buffer); + goto exit; + } + + planes[0] = empty_buf_done->packet_buffer; + planes[1] = empty_buf_done->extra_data_buffer; + + mbuf = msm_comm_get_buffer_using_device_planes(inst, + INPUT_MPLANE, planes); + if (!mbuf || !kref_get_mbuf(inst, mbuf)) { + s_vpr_e(inst->sid, + "%s: data_addr %x, extradata_addr %x not found\n", + __func__, planes[0], planes[1]); + goto exit; + } + vb = &mbuf->vvb.vb2_buf; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_SUPERFRAME); + if (ctrl->val && empty_buf_done->offset + + empty_buf_done->filled_len < vb->planes[0].length) { + s_vpr_h(inst->sid, + "%s: addr (%#x): offset (%d) + filled_len (%d) < length (%d)\n", + __func__, empty_buf_done->packet_buffer, + empty_buf_done->offset, + empty_buf_done->filled_len, + vb->planes[0].length); + kref_put_mbuf(mbuf); + goto exit; + } + + mbuf->flags &= ~MSM_VIDC_FLAG_QUEUED; + vb->planes[0].bytesused = response->input_done.filled_len; + if (vb->planes[0].bytesused > vb->planes[0].length) + s_vpr_l(inst->sid, "bytesused overflow length\n"); + + vb->planes[0].data_offset = response->input_done.offset; + if (vb->planes[0].data_offset > vb->planes[0].length) + s_vpr_l(inst->sid, "data_offset overflow length\n"); + + if (empty_buf_done->status == VIDC_ERR_NOT_SUPPORTED) { + s_vpr_l(inst->sid, "Failed : Unsupported input stream\n"); + mbuf->vvb.flags |= V4L2_BUF_INPUT_UNSUPPORTED; + } + if (empty_buf_done->status == VIDC_ERR_BITSTREAM_ERR) { + s_vpr_l(inst->sid, "Failed : Corrupted input stream\n"); + mbuf->vvb.flags |= V4L2_BUF_FLAG_DATA_CORRUPT; + } + + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + if (f->fmt.pix_mp.num_planes > 1) + vb->planes[1].bytesused = vb->planes[1].length; + + update_recon_stats(inst, &empty_buf_done->recon_stats); + inst->clk_data.buffer_counter++; + /* + * dma cache operations need to be performed before dma_unmap + * which is done inside msm_comm_put_vidc_buffer() + */ + msm_comm_dqbuf_cache_operations(inst, mbuf); + /* + * put_buffer should be done before vb2_buffer_done else + * client might queue the same buffer before it is unmapped + * in put_buffer. + */ + msm_comm_put_vidc_buffer(inst, mbuf); + msm_comm_vb2_buffer_done(inst, mbuf); + msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_EBD); + kref_put_mbuf(mbuf); +exit: + s_vpr_l(inst->sid, "handled: SESSION_ETB_DONE\n"); + put_inst(inst); +} + +static int handle_multi_stream_buffers(struct msm_vidc_inst *inst, + u32 dev_addr) +{ + struct internal_buf *binfo; + struct msm_smem *smem; + bool found = false; + + mutex_lock(&inst->outputbufs.lock); + list_for_each_entry(binfo, &inst->outputbufs.list, list) { + smem = &binfo->smem; + if (smem && dev_addr == smem->device_addr) { + if (binfo->buffer_ownership == DRIVER) { + s_vpr_e(inst->sid, + "FW returned same buffer: %x\n", + dev_addr); + break; + } + binfo->buffer_ownership = DRIVER; + found = true; + break; + } + } + mutex_unlock(&inst->outputbufs.lock); + + if (!found) { + s_vpr_e(inst->sid, + "Failed to find output buffer in queued list: %x\n", + dev_addr); + } + + return 0; +} + +enum hal_buffer msm_comm_get_hal_output_buffer(struct msm_vidc_inst *inst) +{ + if (msm_comm_get_stream_output_mode(inst) == + HAL_VIDEO_DECODER_SECONDARY) + return HAL_BUFFER_OUTPUT2; + else + return HAL_BUFFER_OUTPUT; +} + +static void handle_fbd(enum hal_command_response cmd, void *data) +{ + struct msm_vidc_cb_data_done *response = data; + struct msm_vidc_buffer *mbuf; + struct msm_vidc_inst *inst; + struct vb2_buffer *vb; + struct vidc_hal_fbd *fill_buf_done; + enum hal_buffer buffer_type; + u64 time_usec = 0; + u32 planes[VIDEO_MAX_PLANES] = {0}; + struct v4l2_format *f; + int rc = 0; + + if (!response) { + d_vpr_e("Invalid response from vidc_hal\n"); + return; + } + + inst = get_inst(get_vidc_core(response->device_id), + response->inst_id); + if (!inst) { + d_vpr_e("Got a response for an inactive session\n"); + return; + } + + fill_buf_done = (struct vidc_hal_fbd *)&response->output_done; + planes[0] = fill_buf_done->packet_buffer1; + planes[1] = fill_buf_done->extra_data_buffer; + + buffer_type = msm_comm_get_hal_output_buffer(inst); + if (fill_buf_done->buffer_type == buffer_type) { + mbuf = msm_comm_get_buffer_using_device_planes(inst, + OUTPUT_MPLANE, planes); + if (!mbuf || !kref_get_mbuf(inst, mbuf)) { + s_vpr_e(inst->sid, + "%s: data_addr %x, extradata_addr %x not found\n", + __func__, planes[0], planes[1]); + goto exit; + } + } else { + if (handle_multi_stream_buffers(inst, + fill_buf_done->packet_buffer1)) + s_vpr_e(inst->sid, + "Failed : Output buffer not found %pa\n", + &fill_buf_done->packet_buffer1); + goto exit; + } + mbuf->flags &= ~MSM_VIDC_FLAG_QUEUED; + vb = &mbuf->vvb.vb2_buf; + + if (fill_buf_done->buffer_type == HAL_BUFFER_OUTPUT2 && + fill_buf_done->flags1 & HAL_BUFFERFLAG_READONLY) { + s_vpr_e(inst->sid, "%s: Read only buffer not allowed for OPB\n", + __func__); + goto exit; + } + + if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME) + fill_buf_done->filled_len1 = 0; + vb->planes[0].bytesused = fill_buf_done->filled_len1; + if (vb->planes[0].bytesused > vb->planes[0].length) + s_vpr_l(inst->sid, "fbd:Overflow bytesused = %d; length = %d\n", + vb->planes[0].bytesused, + vb->planes[0].length); + vb->planes[0].data_offset = fill_buf_done->offset1; + if (vb->planes[0].data_offset > vb->planes[0].length) + s_vpr_l(inst->sid, + "fbd:Overflow data_offset = %d; length = %d\n", + vb->planes[0].data_offset, vb->planes[0].length); + + time_usec = fill_buf_done->timestamp_hi; + time_usec = (time_usec << 32) | fill_buf_done->timestamp_lo; + + vb->timestamp = (time_usec * NSEC_PER_USEC); + + rc = msm_comm_store_input_tag(&inst->fbd_data, vb->index, + fill_buf_done->input_tag, + fill_buf_done->input_tag2, inst->sid); + if (rc) + s_vpr_e(inst->sid, "Failed to store input tag"); + + if (inst->session_type == MSM_VIDC_ENCODER) { + if (inst->max_filled_len < fill_buf_done->filled_len1) + inst->max_filled_len = fill_buf_done->filled_len1; + } + + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + if (f->fmt.pix_mp.num_planes > 1) + vb->planes[1].bytesused = vb->planes[1].length; + + mbuf->vvb.flags = 0; + if (fill_buf_done->flags1 & HAL_BUFFERFLAG_READONLY) + mbuf->vvb.flags |= V4L2_BUF_FLAG_READONLY; + if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOS) + mbuf->vvb.flags |= V4L2_BUF_FLAG_EOS; + if (fill_buf_done->flags1 & HAL_BUFFERFLAG_CODECCONFIG) + mbuf->vvb.flags |= V4L2_BUF_FLAG_CODECCONFIG; + if (fill_buf_done->flags1 & HAL_BUFFERFLAG_SYNCFRAME) + mbuf->vvb.flags |= V4L2_BUF_FLAG_KEYFRAME; + if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DATACORRUPT) + mbuf->vvb.flags |= V4L2_BUF_FLAG_DATA_CORRUPT; + if (fill_buf_done->flags1 & HAL_BUFFERFLAG_ENDOFSUBFRAME) + mbuf->vvb.flags |= V4L2_BUF_FLAG_END_OF_SUBFRAME; + switch (fill_buf_done->picture_type) { + case HFI_PICTURE_TYPE_P: + mbuf->vvb.flags |= V4L2_BUF_FLAG_PFRAME; + break; + case HFI_PICTURE_TYPE_B: + mbuf->vvb.flags |= V4L2_BUF_FLAG_BFRAME; + inst->has_bframe = 1; + break; + case HFI_FRAME_NOTCODED: + case HFI_UNUSED_PICT: + /* Do we need to care about these? */ + case HFI_FRAME_YUV: + break; + default: + break; + } + + if (inst->core->resources.ubwc_stats_in_fbd == 1) { + u32 frame_size = + (msm_vidc_get_mbs_per_frame(inst) / (32 * 8) * 3) / 2; + + mutex_lock(&inst->ubwc_stats_lock); + inst->ubwc_stats.is_valid = + fill_buf_done->ubwc_cr_stat.is_valid; + inst->ubwc_stats.worst_cr = + fill_buf_done->ubwc_cr_stat.worst_cr; + inst->ubwc_stats.worst_cf = + fill_buf_done->ubwc_cr_stat.worst_cf; + if (frame_size) + inst->ubwc_stats.worst_cf /= frame_size; + mutex_unlock(&inst->ubwc_stats_lock); + } + + /* + * dma cache operations need to be performed before dma_unmap + * which is done inside msm_comm_put_vidc_buffer() + */ + msm_comm_dqbuf_cache_operations(inst, mbuf); + /* + * put_buffer should be done before vb2_buffer_done else + * client might queue the same buffer before it is unmapped + * in put_buffer. + */ + msm_comm_put_vidc_buffer(inst, mbuf); + msm_comm_vb2_buffer_done(inst, mbuf); + msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FBD); + kref_put_mbuf(mbuf); + +exit: + s_vpr_l(inst->sid, "handled: SESSION_FTB_DONE\n"); + put_inst(inst); +} + +void handle_cmd_response(enum hal_command_response cmd, void *data) +{ + switch (cmd) { + case HAL_SYS_INIT_DONE: + handle_sys_init_done(cmd, data); + break; + case HAL_SYS_RELEASE_RESOURCE_DONE: + handle_sys_release_res_done(cmd, data); + break; + case HAL_SESSION_INIT_DONE: + handle_session_init_done(cmd, data); + break; + case HAL_SESSION_PROPERTY_INFO: + handle_session_prop_info(cmd, data); + break; + case HAL_SESSION_LOAD_RESOURCE_DONE: + handle_load_resource_done(cmd, data); + break; + case HAL_SESSION_START_DONE: + handle_start_done(cmd, data); + break; + case HAL_SESSION_ETB_DONE: + handle_ebd(cmd, data); + break; + case HAL_SESSION_FTB_DONE: + handle_fbd(cmd, data); + break; + case HAL_SESSION_STOP_DONE: + handle_stop_done(cmd, data); + break; + case HAL_SESSION_RELEASE_RESOURCE_DONE: + handle_release_res_done(cmd, data); + break; + case HAL_SESSION_END_DONE: + case HAL_SESSION_ABORT_DONE: + handle_session_close(cmd, data); + break; + case HAL_SYS_PING_ACK: + handle_ping_done(cmd, data); + break; + case HAL_SESSION_EVENT_CHANGE: + handle_event_change(cmd, data); + break; + case HAL_SESSION_FLUSH_DONE: + handle_session_flush(cmd, data); + break; + case HAL_SYS_WATCHDOG_TIMEOUT: + case HAL_SYS_ERROR: + handle_sys_error(cmd, data); + break; + case HAL_SESSION_ERROR: + handle_session_error(cmd, data); + break; + case HAL_SESSION_RELEASE_BUFFER_DONE: + handle_session_release_buf_done(cmd, data); + break; + default: + d_vpr_l("response unhandled: %d\n", cmd); + break; + } +} + +static inline enum msm_vidc_thermal_level msm_comm_vidc_thermal_level(int level) +{ + switch (level) { + case 0: + return VIDC_THERMAL_NORMAL; + case 1: + return VIDC_THERMAL_LOW; + case 2: + return VIDC_THERMAL_HIGH; + default: + return VIDC_THERMAL_CRITICAL; + } +} + +static bool is_core_turbo(struct msm_vidc_core *core, unsigned long freq) +{ + unsigned int i = 0; + struct allowed_clock_rates_table *allowed_clks_tbl = NULL; + u32 max_freq = 0; + + allowed_clks_tbl = core->resources.allowed_clks_tbl; + for (i = 0; i < core->resources.allowed_clks_tbl_size; i++) { + if (max_freq < allowed_clks_tbl[i].clock_rate) + max_freq = allowed_clks_tbl[i].clock_rate; + } + return freq >= max_freq; +} + +static bool is_thermal_permissible(struct msm_vidc_core *core) +{ + enum msm_vidc_thermal_level tl; + unsigned long freq = 0; + bool is_turbo = false; + + if (!core->resources.thermal_mitigable) + return true; + + if (msm_vidc_thermal_mitigation_disabled) { + d_vpr_h("Thermal mitigation not enabled. debugfs %d\n", + msm_vidc_thermal_mitigation_disabled); + return true; + } + + tl = msm_comm_vidc_thermal_level(vidc_driver->thermal_level); + freq = core->curr_freq; + + is_turbo = is_core_turbo(core, freq); + d_vpr_h("Core freq %ld Thermal level %d Turbo mode %d\n", + freq, tl, is_turbo); + + if (is_turbo && tl >= VIDC_THERMAL_LOW) { + d_vpr_e( + "Video session not allowed. Turbo mode %d Thermal level %d\n", + is_turbo, tl); + return false; + } + return true; +} + +bool is_batching_allowed(struct msm_vidc_inst *inst) +{ + u32 op_pixelformat, fps, maxmbs, maxfps; + u32 ignore_flags = VIDC_THUMBNAIL; + u32 enable = 0; + + if (!inst || !inst->core) + return false; + + /* Enable decode batching based on below conditions */ + op_pixelformat = + inst->fmts[OUTPUT_PORT].v4l2_fmt.fmt.pix_mp.pixelformat; + fps = inst->clk_data.frame_rate >> 16; + maxmbs = inst->capability.cap[CAP_BATCH_MAX_MB_PER_FRAME].max; + maxfps = inst->capability.cap[CAP_BATCH_MAX_FPS].max; + + /* + * if batching enabled previously then you may chose + * to disable it based on recent configuration changes. + * if batching already disabled do not enable it again + * as sufficient extra buffers (required for batch mode + * on both ports) may not have been updated to client. + */ + enable = (inst->batch.enable && + inst->core->resources.decode_batching && + !is_low_latency_hint(inst) && + is_single_session(inst, ignore_flags) && + is_decode_session(inst) && + !is_thumbnail_session(inst) && + is_realtime_session(inst) && + !is_heif_decoder(inst) && + !inst->clk_data.low_latency_mode && + (op_pixelformat == V4L2_PIX_FMT_NV12_UBWC || + op_pixelformat == V4L2_PIX_FMT_NV12_TP10_UBWC) && + fps <= maxfps && + msm_vidc_get_mbs_per_frame(inst) <= maxmbs); + + s_vpr_hp(inst->sid, "%s: batching %s\n", + __func__, enable ? "enabled" : "disabled"); + + return enable; +} + +static int msm_comm_session_abort(struct msm_vidc_inst *inst) +{ + int rc = 0, abort_completion = 0; + struct hfi_device *hdev; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + hdev = inst->core->device; + abort_completion = SESSION_MSG_INDEX(HAL_SESSION_ABORT_DONE); + + s_vpr_e(inst->sid, "%s: inst %pK\n", __func__, inst); + rc = call_hfi_op(hdev, session_abort, (void *)inst->session); + if (rc) { + s_vpr_e(inst->sid, + "%s: session_abort failed rc: %d\n", __func__, rc); + goto exit; + } + rc = wait_for_completion_timeout( + &inst->completions[abort_completion], + msecs_to_jiffies( + inst->core->resources.msm_vidc_hw_rsp_timeout)); + if (!rc) { + s_vpr_e(inst->sid, "%s: session abort timed out\n", __func__); + msm_comm_generate_sys_error(inst); + rc = -EBUSY; + } else { + rc = 0; + } +exit: + return rc; +} + +static void handle_thermal_event(struct msm_vidc_core *core) +{ + int rc = 0; + struct msm_vidc_inst *inst; + + if (!core || !core->device) { + d_vpr_e("%s: invalid params %pK\n", __func__, core); + return; + } + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) { + if (!inst->session) + continue; + + mutex_unlock(&core->lock); + if (inst->state >= MSM_VIDC_OPEN_DONE && + inst->state < MSM_VIDC_CLOSE_DONE) { + s_vpr_e(inst->sid, "%s: abort inst %pK\n", + __func__, inst); + rc = msm_comm_session_abort(inst); + if (rc) { + s_vpr_e(inst->sid, + "%s: session_abort failed rc: %d\n", + __func__, rc); + goto err_sess_abort; + } + change_inst_state(inst, MSM_VIDC_CORE_INVALID); + s_vpr_e(inst->sid, "%s: Send sys error for inst %pK\n", + __func__, inst); + msm_vidc_queue_v4l2_event(inst, + V4L2_EVENT_MSM_VIDC_SYS_ERROR); + } else { + msm_comm_generate_session_error(inst); + } + mutex_lock(&core->lock); + } + mutex_unlock(&core->lock); + return; + +err_sess_abort: + msm_comm_clean_notify_client(core); +} + +void msm_comm_handle_thermal_event(void) +{ + struct msm_vidc_core *core; + + list_for_each_entry(core, &vidc_driver->cores, list) { + if (!is_thermal_permissible(core)) { + d_vpr_e( + "Thermal level critical, stop active sessions\n"); + handle_thermal_event(core); + } + } +} + +int msm_comm_check_core_init(struct msm_vidc_core *core, u32 sid) +{ + int rc = 0; + + mutex_lock(&core->lock); + if (core->state >= VIDC_CORE_INIT_DONE) { + s_vpr_h(sid, "Video core: %d is already in state: %d\n", + core->id, core->state); + goto exit; + } + s_vpr_h(sid, "Waiting for SYS_INIT_DONE\n"); + rc = wait_for_completion_timeout( + &core->completions[SYS_MSG_INDEX(HAL_SYS_INIT_DONE)], + msecs_to_jiffies(core->resources.msm_vidc_hw_rsp_timeout)); + if (!rc) { + s_vpr_e(sid, "%s: Wait interrupted or timed out: %d\n", + __func__, SYS_MSG_INDEX(HAL_SYS_INIT_DONE)); + rc = -EIO; + goto exit; + } else { + core->state = VIDC_CORE_INIT_DONE; + rc = 0; + } + s_vpr_h(sid, "SYS_INIT_DONE!!!\n"); +exit: + mutex_unlock(&core->lock); + return rc; +} + +static int msm_comm_init_core_done(struct msm_vidc_inst *inst) +{ + int rc = 0; + + rc = msm_comm_check_core_init(inst->core, inst->sid); + if (rc) { + d_vpr_e("%s: failed to initialize core\n", __func__); + msm_comm_generate_sys_error(inst); + return rc; + } + change_inst_state(inst, MSM_VIDC_CORE_INIT_DONE); + return rc; +} + +static int msm_comm_init_core(struct msm_vidc_inst *inst) +{ + int rc, i; + struct hfi_device *hdev; + struct msm_vidc_core *core; + + if (!inst || !inst->core || !inst->core->device) + return -EINVAL; + + core = inst->core; + hdev = core->device; + mutex_lock(&core->lock); + if (core->state >= VIDC_CORE_INIT) { + s_vpr_h(inst->sid, "Video core: %d is already in state: %d\n", + core->id, core->state); + goto core_already_inited; + } + s_vpr_h(inst->sid, "%s: core %pK\n", __func__, core); + rc = call_hfi_op(hdev, core_init, hdev->hfi_device_data); + if (rc) { + s_vpr_e(inst->sid, "Failed to init core, id = %d\n", + core->id); + goto fail_core_init; + } + + /* initialize core while firmware processing SYS_INIT cmd */ + core->state = VIDC_CORE_INIT; + core->smmu_fault_handled = false; + core->trigger_ssr = false; + core->resources.max_secure_inst_count = + core->resources.max_secure_inst_count ? + core->resources.max_secure_inst_count : + core->resources.max_inst_count; + s_vpr_h(inst->sid, "%s: codecs count %d, max inst count %d\n", + __func__, core->resources.codecs_count, + core->resources.max_inst_count); + if (!core->resources.codecs || !core->resources.codecs_count) { + s_vpr_e(inst->sid, "%s: invalid codecs\n", __func__); + rc = -EINVAL; + goto fail_core_init; + } + if (!core->capabilities) { + core->capabilities = kcalloc(core->resources.codecs_count, + sizeof(struct msm_vidc_capability), GFP_KERNEL); + if (!core->capabilities) { + s_vpr_e(inst->sid, + "%s: failed to allocate capabilities\n", + __func__); + rc = -ENOMEM; + goto fail_core_init; + } + } else { + s_vpr_e(inst->sid, + "%s: capabilities memory is expected to be freed\n", + __func__); + } + for (i = 0; i < core->resources.codecs_count; i++) { + core->capabilities[i].domain = + core->resources.codecs[i].domain; + core->capabilities[i].codec = + core->resources.codecs[i].codec; + } + rc = msm_vidc_capabilities(core); + if (rc) { + s_vpr_e(inst->sid, + "%s: default capabilities failed\n", __func__); + kfree(core->capabilities); + core->capabilities = NULL; + goto fail_core_init; + } + s_vpr_h(inst->sid, "%s: done\n", __func__); +core_already_inited: + change_inst_state(inst, MSM_VIDC_CORE_INIT); + mutex_unlock(&core->lock); + + rc = msm_comm_scale_clocks_and_bus(inst, 1); + return rc; + +fail_core_init: + core->state = VIDC_CORE_UNINIT; + mutex_unlock(&core->lock); + return rc; +} + +static int msm_vidc_deinit_core(struct msm_vidc_inst *inst) +{ + struct msm_vidc_core *core; + struct hfi_device *hdev; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + core = inst->core; + hdev = core->device; + + mutex_lock(&core->lock); + if (core->state == VIDC_CORE_UNINIT) { + s_vpr_h(inst->sid, "Video core: %d is already in state: %d\n", + core->id, core->state); + goto core_already_uninited; + } + mutex_unlock(&core->lock); + + msm_comm_scale_clocks_and_bus(inst, 1); + + mutex_lock(&core->lock); + + if (!core->resources.never_unload_fw) { + cancel_delayed_work(&core->fw_unload_work); + + /* + * Delay unloading of firmware. This is useful + * in avoiding firmware download delays in cases where we + * will have a burst of back to back video playback sessions + * e.g. thumbnail generation. + */ + schedule_delayed_work(&core->fw_unload_work, + msecs_to_jiffies(core->state == VIDC_CORE_INIT_DONE ? + core->resources.msm_vidc_firmware_unload_delay : 0)); + + s_vpr_h(inst->sid, "firmware unload delayed by %u ms\n", + core->state == VIDC_CORE_INIT_DONE ? + core->resources.msm_vidc_firmware_unload_delay : 0); + } + +core_already_uninited: + change_inst_state(inst, MSM_VIDC_CORE_UNINIT); + mutex_unlock(&core->lock); + return 0; +} + +int msm_comm_force_cleanup(struct msm_vidc_inst *inst) +{ + msm_comm_kill_session(inst); + return msm_vidc_deinit_core(inst); +} + +static int msm_comm_session_init_done(int flipped_state, + struct msm_vidc_inst *inst) +{ + int rc; + + if (!inst) { + d_vpr_e("Invalid parameter %s\n", __func__); + return -EINVAL; + } + s_vpr_h(inst->sid, "waiting for session init done\n"); + rc = wait_for_state(inst, flipped_state, MSM_VIDC_OPEN_DONE, + HAL_SESSION_INIT_DONE); + if (rc) { + s_vpr_e(inst->sid, "Session init failed for inst %pK\n", inst); + msm_comm_generate_sys_error(inst); + return rc; + } + + return rc; +} + +static int msm_comm_session_init(int flipped_state, + struct msm_vidc_inst *inst) +{ + int rc = 0; + int fourcc = 0; + struct hfi_device *hdev; + struct v4l2_format *f; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + hdev = inst->core->device; + + if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_OPEN)) { + s_vpr_h(inst->sid, "inst: %pK is already in state: %d\n", + inst, inst->state); + goto exit; + } + if (inst->session_type == MSM_VIDC_DECODER) { + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + fourcc = f->fmt.pix_mp.pixelformat; + } else if (inst->session_type == MSM_VIDC_ENCODER) { + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + fourcc = f->fmt.pix_mp.pixelformat; + } else { + s_vpr_e(inst->sid, "Invalid session\n"); + return -EINVAL; + } + + rc = msm_comm_init_clocks_and_bus_data(inst); + if (rc) { + s_vpr_e(inst->sid, + "Failed to initialize clocks and bus data\n"); + goto exit; + } + + s_vpr_h(inst->sid, "%s: inst %pK\n", __func__, inst); + rc = call_hfi_op(hdev, session_init, hdev->hfi_device_data, + inst, get_hal_domain(inst->session_type, inst->sid), + get_hal_codec(fourcc, inst->sid), + &inst->session, inst->sid); + if (rc || !inst->session) { + s_vpr_e(inst->sid, + "Failed to call session init for: %pK, %pK, %d, %d\n", + inst->core->device, inst, + inst->session_type, fourcc); + rc = -EINVAL; + goto exit; + } + rc = msm_comm_update_capabilities(inst); + if (rc) { + s_vpr_e(inst->sid, "Failed to update capabilities\n"); + goto exit; + } + rc = msm_vidc_calculate_buffer_counts(inst); + if (rc) { + s_vpr_e(inst->sid, "Failed to initialize buff counts\n"); + goto exit; + } + change_inst_state(inst, MSM_VIDC_OPEN); + +exit: + return rc; +} + +int msm_comm_update_dpb_bufreqs(struct msm_vidc_inst *inst) +{ + struct hal_buffer_requirements *req = NULL; + struct msm_vidc_format *fmt; + struct v4l2_format *f; + u32 i, hfi_fmt, rc = 0; + + if (!inst) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + if (msm_comm_get_stream_output_mode(inst) != + HAL_VIDEO_DECODER_SECONDARY) + return 0; + + for (i = 0; i < HAL_BUFFER_MAX; i++) { + if (inst->buff_req.buffer[i].buffer_type == HAL_BUFFER_OUTPUT) { + req = &inst->buff_req.buffer[i]; + break; + } + } + + if (!req) { + s_vpr_e(inst->sid, "%s: req not found\n", __func__); + return -EINVAL; + } + + fmt = &inst->fmts[OUTPUT_PORT]; + /* For DPB buffers, Always use min count */ + req->buffer_count_min = req->buffer_count_min_host = + req->buffer_count_actual = fmt->count_min; + + hfi_fmt = msm_comm_convert_color_fmt(inst->clk_data.dpb_fourcc, + inst->sid); + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + req->buffer_size = VENUS_BUFFER_SIZE(hfi_fmt, f->fmt.pix_mp.width, + f->fmt.pix_mp.height); + + return rc; +} + +static int msm_comm_get_dpb_bufreqs(struct msm_vidc_inst *inst, + struct hal_buffer_requirements *req) +{ + struct hal_buffer_requirements *dpb = NULL; + u32 i; + + if (!inst || !req) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + if (msm_comm_get_stream_output_mode(inst) != + HAL_VIDEO_DECODER_SECONDARY) + return 0; + + for (i = 0; i < HAL_BUFFER_MAX; i++) { + if (inst->buff_req.buffer[i].buffer_type == HAL_BUFFER_OUTPUT) { + dpb = &inst->buff_req.buffer[i]; + break; + } + } + + if (!dpb) { + s_vpr_e(inst->sid, "%s: req not found\n", __func__); + return -EINVAL; + } + + memcpy(req, dpb, sizeof(struct hal_buffer_requirements)); + + return 0; +} + +static void msm_comm_print_mem_usage(struct msm_vidc_core *core) +{ + struct msm_vidc_inst *inst; + struct msm_vidc_format *inp_f, *out_f; + u32 dpb_cnt, dpb_size, i = 0, rc = 0; + struct v4l2_pix_format_mplane *iplane, *oplane; + u32 sz_i, sz_i_e, sz_o, sz_o_e, sz_s, sz_s1, sz_s2, sz_p, sz_p1, sz_r; + u32 cnt_i, cnt_o, cnt_s, cnt_s1, cnt_s2, cnt_p, cnt_p1, cnt_r; + u64 total; + + d_vpr_e("Running instances - mem breakup:\n"); + d_vpr_e( + "%4s|%4s|%24s|%24s|%24s|%24s|%24s|%10s|%10s|%10s|%10s|%10s|%10s|%10s\n", + "w", "h", "in", "extra_in", "out", "extra_out", + "out2", "scratch", "scratch_1", "scratch_2", + "persist", "persist_1", "recon", "total_kb"); + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) { + dpb_cnt = dpb_size = total = 0; + sz_s = sz_s1 = sz_s2 = sz_p = sz_p1 = sz_r = 0; + cnt_s = cnt_s1 = cnt_s2 = cnt_p = cnt_p1 = cnt_r = 0; + + inp_f = &inst->fmts[INPUT_PORT]; + out_f = &inst->fmts[OUTPUT_PORT]; + iplane = &inp_f->v4l2_fmt.fmt.pix_mp; + oplane = &out_f->v4l2_fmt.fmt.pix_mp; + + if (msm_comm_get_stream_output_mode(inst) == + HAL_VIDEO_DECODER_SECONDARY) { + struct hal_buffer_requirements dpb = {0}; + + rc = msm_comm_get_dpb_bufreqs(inst, &dpb); + if (rc) { + s_vpr_e(inst->sid, + "%s: get dpb bufreq failed\n", + __func__); + goto error; + } + dpb_cnt = dpb.buffer_count_actual; + dpb_size = dpb.buffer_size; + } + for (i = 0; i < HAL_BUFFER_MAX; i++) { + struct hal_buffer_requirements *req; + + req = &inst->buff_req.buffer[i]; + switch (req->buffer_type) { + case HAL_BUFFER_INTERNAL_SCRATCH: + sz_s = req->buffer_size; + cnt_s = req->buffer_count_actual; + break; + case HAL_BUFFER_INTERNAL_SCRATCH_1: + sz_s1 = req->buffer_size; + cnt_s1 = req->buffer_count_actual; + break; + case HAL_BUFFER_INTERNAL_SCRATCH_2: + sz_s2 = req->buffer_size; + cnt_s2 = req->buffer_count_actual; + break; + case HAL_BUFFER_INTERNAL_PERSIST: + sz_p = req->buffer_size; + cnt_p = req->buffer_count_actual; + break; + case HAL_BUFFER_INTERNAL_PERSIST_1: + sz_p1 = req->buffer_size; + cnt_p1 = req->buffer_count_actual; + break; + case HAL_BUFFER_INTERNAL_RECON: + sz_r = req->buffer_size; + cnt_r = req->buffer_count_actual; + break; + default: + break; + } + } + if (is_decode_session(inst)) + sz_i = msm_vidc_calculate_dec_input_frame_size(inst, 0); + else + sz_i = iplane->plane_fmt[0].sizeimage; + sz_i_e = iplane->plane_fmt[1].sizeimage; + cnt_i = inp_f->count_min_host; + + sz_o = oplane->plane_fmt[0].sizeimage; + sz_o_e = oplane->plane_fmt[1].sizeimage; + cnt_o = out_f->count_min_host; + + total = sz_i * cnt_i + sz_i_e * cnt_i + sz_o * cnt_o + + sz_o_e * cnt_o + dpb_cnt * dpb_size + sz_s * cnt_s + + sz_s1 * cnt_s1 + sz_s2 * cnt_s2 + sz_p * cnt_p + + sz_p1 * cnt_p1 + sz_r * cnt_r; + total = total >> 10; + + s_vpr_e(inst->sid, + "%4d|%4d|%11u(%8ux%2u)|%11u(%8ux%2u)|%11u(%8ux%2u)|%11u(%8ux%2u)|%11u(%8ux%2u)|%10u|%10u|%10u|%10u|%10u|%10u|%10llu\n", + max(iplane->width, oplane->width), + max(iplane->height, oplane->height), + sz_i * cnt_i, sz_i, cnt_i, + sz_i_e * cnt_i, sz_i_e, cnt_i, + sz_o * cnt_o, sz_o, cnt_o, + sz_o_e * cnt_o, sz_o_e, cnt_o, + dpb_size * dpb_cnt, dpb_size, dpb_cnt, + sz_s * cnt_s, sz_s1 * cnt_s1, + sz_s2 * cnt_s2, sz_p * cnt_p, sz_p1 * cnt_p1, + sz_r * cnt_r, total); + } +error: + mutex_unlock(&core->lock); + +} + +static void msm_vidc_print_running_insts(struct msm_vidc_core *core) +{ + struct msm_vidc_inst *temp; + int op_rate = 0; + struct v4l2_format *out_f; + struct v4l2_format *inp_f; + + d_vpr_e("Running instances:\n"); + d_vpr_e("%4s|%4s|%4s|%4s|%4s|%4s\n", + "type", "w", "h", "fps", "opr", "prop"); + + mutex_lock(&core->lock); + list_for_each_entry(temp, &core->instances, list) { + out_f = &temp->fmts[OUTPUT_PORT].v4l2_fmt; + inp_f = &temp->fmts[INPUT_PORT].v4l2_fmt; + if (temp->state >= MSM_VIDC_OPEN_DONE && + temp->state < MSM_VIDC_STOP_DONE) { + char properties[5] = ""; + + if (is_thumbnail_session(temp)) + strlcat(properties, "N", sizeof(properties)); + + if (is_turbo_session(temp)) + strlcat(properties, "T", sizeof(properties)); + + if (is_realtime_session(temp)) + strlcat(properties, "R", sizeof(properties)); + + if (is_grid_session(temp)) + strlcat(properties, "I", sizeof(properties)); + + if (temp->clk_data.operating_rate) + op_rate = temp->clk_data.operating_rate >> 16; + else + op_rate = temp->clk_data.frame_rate >> 16; + + s_vpr_e(temp->sid, "%4d|%4d|%4d|%4d|%4d|%4s\n", + temp->session_type, + max(out_f->fmt.pix_mp.width, + inp_f->fmt.pix_mp.width), + max(out_f->fmt.pix_mp.height, + inp_f->fmt.pix_mp.height), + temp->clk_data.frame_rate >> 16, + op_rate, properties); + } + } + mutex_unlock(&core->lock); +} + +static int msm_vidc_load_resources(int flipped_state, + struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct msm_vidc_core *core; + int max_video_load = 0, max_image_load = 0; + int video_load = 0, image_load = 0; + enum load_calc_quirks quirks = LOAD_ADMISSION_CONTROL; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + if (inst->state == MSM_VIDC_CORE_INVALID) { + s_vpr_e(inst->sid, "%s: inst %pK is in invalid state\n", + __func__, inst); + return -EINVAL; + } + if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_LOAD_RESOURCES)) { + s_vpr_h(inst->sid, "inst: %pK is already in state: %d\n", + inst, inst->state); + goto exit; + } + core = inst->core; + + image_load = msm_comm_get_device_load(core, + MSM_VIDC_ENCODER, MSM_VIDC_IMAGE, + quirks); + video_load = msm_comm_get_device_load(core, + MSM_VIDC_DECODER, MSM_VIDC_VIDEO, + quirks); + video_load += msm_comm_get_device_load(core, + MSM_VIDC_ENCODER, MSM_VIDC_VIDEO, + quirks); + + max_video_load = inst->core->resources.max_load + + inst->capability.cap[CAP_MBS_PER_FRAME].max; + max_image_load = inst->core->resources.max_image_load; + + if (video_load > max_video_load) { + s_vpr_e(inst->sid, + "H/W is overloaded. needed: %d max: %d\n", + video_load, max_video_load); + msm_vidc_print_running_insts(inst->core); + return -EBUSY; + } + + if (video_load + image_load > max_video_load + max_image_load) { + s_vpr_e(inst->sid, + "H/W is overloaded. needed: [video + image][%d + %d], max: [video + image][%d + %d]\n", + video_load, image_load, max_video_load, max_image_load); + msm_vidc_print_running_insts(inst->core); + return -EBUSY; + } + + hdev = core->device; + s_vpr_h(inst->sid, "%s: inst %pK\n", __func__, inst); + rc = call_hfi_op(hdev, session_load_res, (void *) inst->session); + if (rc) { + s_vpr_e(inst->sid, "Failed to send load resources\n"); + goto exit; + } + change_inst_state(inst, MSM_VIDC_LOAD_RESOURCES); +exit: + return rc; +} + +static int msm_vidc_start(int flipped_state, struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + if (inst->state == MSM_VIDC_CORE_INVALID) { + s_vpr_e(inst->sid, "%s: inst %pK is in invalid\n", + __func__, inst); + return -EINVAL; + } + if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_START)) { + s_vpr_h(inst->sid, "inst: %pK is already in state: %d\n", + inst, inst->state); + goto exit; + } + hdev = inst->core->device; + s_vpr_h(inst->sid, "%s: inst %pK\n", __func__, inst); + rc = call_hfi_op(hdev, session_start, (void *) inst->session); + if (rc) { + s_vpr_e(inst->sid, "Failed to send start\n"); + goto exit; + } + change_inst_state(inst, MSM_VIDC_START); +exit: + return rc; +} + +static int msm_vidc_stop(int flipped_state, struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + if (inst->state == MSM_VIDC_CORE_INVALID) { + s_vpr_e(inst->sid, "%s: inst %pK is in invalid state\n", + __func__, inst); + return -EINVAL; + } + if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_STOP)) { + s_vpr_h(inst->sid, "inst: %pK is already in state: %d\n", + inst, inst->state); + goto exit; + } + hdev = inst->core->device; + s_vpr_h(inst->sid, "%s: inst %pK\n", __func__, inst); + rc = call_hfi_op(hdev, session_stop, (void *) inst->session); + if (rc) { + s_vpr_e(inst->sid, "%s: inst %pK session_stop failed\n", + __func__, inst); + goto exit; + } + change_inst_state(inst, MSM_VIDC_STOP); +exit: + return rc; +} + +static int msm_vidc_release_res(int flipped_state, struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + if (inst->state == MSM_VIDC_CORE_INVALID) { + s_vpr_e(inst->sid, "%s: inst %pK is in invalid state\n", + __func__, inst); + return -EINVAL; + } + if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_RELEASE_RESOURCES)) { + s_vpr_h(inst->sid, "inst: %pK is already in state: %d\n", + inst, inst->state); + goto exit; + } + hdev = inst->core->device; + s_vpr_h(inst->sid, "%s: inst %pK\n", __func__, inst); + rc = call_hfi_op(hdev, session_release_res, (void *) inst->session); + if (rc) { + s_vpr_e(inst->sid, "Failed to send release resources\n"); + goto exit; + } + change_inst_state(inst, MSM_VIDC_RELEASE_RESOURCES); +exit: + return rc; +} + +static int msm_comm_session_close(int flipped_state, + struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_CLOSE)) { + s_vpr_h(inst->sid, "inst: %pK is already in state: %d\n", + inst, inst->state); + goto exit; + } + hdev = inst->core->device; + s_vpr_h(inst->sid, "%s: inst %pK\n", __func__, inst); + rc = call_hfi_op(hdev, session_end, (void *) inst->session); + if (rc) { + s_vpr_e(inst->sid, "Failed to send close\n"); + goto exit; + } + change_inst_state(inst, MSM_VIDC_CLOSE); +exit: + return rc; +} + +int msm_comm_suspend(int core_id) +{ + struct hfi_device *hdev; + struct msm_vidc_core *core; + int rc = 0; + + core = get_vidc_core(core_id); + if (!core) { + d_vpr_e("%s: Failed to find core for core_id = %d\n", + __func__, core_id); + return -EINVAL; + } + + hdev = (struct hfi_device *)core->device; + if (!hdev) { + d_vpr_e("%s: Invalid device handle\n", __func__); + return -EINVAL; + } + + rc = call_hfi_op(hdev, suspend, hdev->hfi_device_data); + if (rc) + d_vpr_e("Failed to suspend\n"); + + return rc; +} + +static int get_flipped_state(int present_state, + int desired_state) +{ + int flipped_state = present_state; + + if (flipped_state < MSM_VIDC_STOP + && desired_state > MSM_VIDC_STOP) { + flipped_state = MSM_VIDC_STOP + (MSM_VIDC_STOP - flipped_state); + flipped_state &= 0xFFFE; + flipped_state = flipped_state - 1; + } else if (flipped_state > MSM_VIDC_STOP + && desired_state < MSM_VIDC_STOP) { + flipped_state = MSM_VIDC_STOP - + (flipped_state - MSM_VIDC_STOP + 1); + flipped_state &= 0xFFFE; + flipped_state = flipped_state - 1; + } + return flipped_state; +} + +int msm_comm_reset_bufreqs(struct msm_vidc_inst *inst, enum hal_buffer buf_type) +{ + struct hal_buffer_requirements *bufreqs; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + bufreqs = get_buff_req_buffer(inst, buf_type); + if (!bufreqs) { + s_vpr_e(inst->sid, "%s: invalid buf type %d\n", + __func__, buf_type); + return -EINVAL; + } + bufreqs->buffer_size = bufreqs->buffer_count_min = + bufreqs->buffer_count_min_host = bufreqs->buffer_count_actual = + bufreqs->buffer_alignment = 0; + + return 0; +} + +struct hal_buffer_requirements *get_buff_req_buffer( + struct msm_vidc_inst *inst, enum hal_buffer buffer_type) +{ + int i; + + for (i = 0; i < HAL_BUFFER_MAX; i++) { + if (inst->buff_req.buffer[i].buffer_type == buffer_type) + return &inst->buff_req.buffer[i]; + } + s_vpr_e(inst->sid, "Failed to get buff req for : %x", buffer_type); + return NULL; +} + +u32 msm_comm_convert_color_fmt(u32 v4l2_fmt, u32 sid) +{ + switch (v4l2_fmt) { + case V4L2_PIX_FMT_NV12: + return COLOR_FMT_NV12; + case V4L2_PIX_FMT_NV21: + return COLOR_FMT_NV21; + case V4L2_PIX_FMT_NV12_128: + return COLOR_FMT_NV12_128; + case V4L2_PIX_FMT_NV12_512: + return COLOR_FMT_NV12_512; + case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS: + return COLOR_FMT_P010; + case V4L2_PIX_FMT_NV12_UBWC: + return COLOR_FMT_NV12_UBWC; + case V4L2_PIX_FMT_NV12_TP10_UBWC: + return COLOR_FMT_NV12_BPP10_UBWC; + case V4L2_PIX_FMT_RGBA8888_UBWC: + return COLOR_FMT_RGBA8888_UBWC; + default: + s_vpr_e(sid, + "Invalid v4l2 color fmt FMT : %x, Set default(NV12)", + v4l2_fmt); + return COLOR_FMT_NV12; + } +} + +static u32 get_hfi_buffer(int hal_buffer, u32 sid) +{ + u32 buffer; + + switch (hal_buffer) { + case HAL_BUFFER_INPUT: + buffer = HFI_BUFFER_INPUT; + break; + case HAL_BUFFER_OUTPUT: + buffer = HFI_BUFFER_OUTPUT; + break; + case HAL_BUFFER_OUTPUT2: + buffer = HFI_BUFFER_OUTPUT2; + break; + case HAL_BUFFER_EXTRADATA_INPUT: + buffer = HFI_BUFFER_EXTRADATA_INPUT; + break; + case HAL_BUFFER_EXTRADATA_OUTPUT: + buffer = HFI_BUFFER_EXTRADATA_OUTPUT; + break; + case HAL_BUFFER_EXTRADATA_OUTPUT2: + buffer = HFI_BUFFER_EXTRADATA_OUTPUT2; + break; + case HAL_BUFFER_INTERNAL_SCRATCH: + buffer = HFI_BUFFER_COMMON_INTERNAL_SCRATCH; + break; + case HAL_BUFFER_INTERNAL_SCRATCH_1: + buffer = HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1; + break; + case HAL_BUFFER_INTERNAL_SCRATCH_2: + buffer = HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2; + break; + case HAL_BUFFER_INTERNAL_PERSIST: + buffer = HFI_BUFFER_INTERNAL_PERSIST; + break; + case HAL_BUFFER_INTERNAL_PERSIST_1: + buffer = HFI_BUFFER_INTERNAL_PERSIST_1; + break; + default: + s_vpr_e(sid, "Invalid buffer: %#x\n", hal_buffer); + buffer = 0; + break; + } + return buffer; +} + +static int set_dpb_only_buffers(struct msm_vidc_inst *inst, + enum hal_buffer buffer_type) +{ + int rc = 0; + struct internal_buf *binfo = NULL; + u32 smem_flags = SMEM_UNCACHED, buffer_size = 0, num_buffers = 0; + unsigned int i; + struct hfi_device *hdev; + struct hfi_buffer_size_minimum b; + struct v4l2_format *f; + struct hal_buffer_requirements dpb = {0}; + + hdev = inst->core->device; + + rc = msm_comm_get_dpb_bufreqs(inst, &dpb); + if (rc) { + s_vpr_e(inst->sid, "Couldn't retrieve dpb count & size\n"); + return -EINVAL; + } + num_buffers = dpb.buffer_count_actual; + buffer_size = dpb.buffer_size; + s_vpr_h(inst->sid, "dpb: cnt = %d, size = %d\n", + num_buffers, buffer_size); + + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + + b.buffer_type = get_hfi_buffer(buffer_type, inst->sid); + if (!b.buffer_type) + return -EINVAL; + b.buffer_size = buffer_size; + rc = call_hfi_op(hdev, session_set_property, + inst->session, HFI_PROPERTY_PARAM_BUFFER_SIZE_MINIMUM, + &b, sizeof(b)); + + if (f->fmt.pix_mp.num_planes == 1 || + !f->fmt.pix_mp.plane_fmt[1].sizeimage) { + s_vpr_h(inst->sid, + "This extradata buffer not required, buffer_type: %x\n", + buffer_type); + } else { + s_vpr_h(inst->sid, "extradata: num = 1, size = %d\n", + f->fmt.pix_mp.plane_fmt[1].sizeimage); + inst->dpb_extra_binfo = NULL; + inst->dpb_extra_binfo = kzalloc(sizeof(*binfo), GFP_KERNEL); + if (!inst->dpb_extra_binfo) { + s_vpr_e(inst->sid, "%s: Out of memory\n", __func__); + rc = -ENOMEM; + goto fail_kzalloc; + } + rc = msm_comm_smem_alloc(inst, + f->fmt.pix_mp.plane_fmt[1].sizeimage, 1, smem_flags, + buffer_type, 0, &inst->dpb_extra_binfo->smem); + if (rc) { + s_vpr_e(inst->sid, + "Failed to allocate output memory\n"); + goto err_no_mem; + } + } + + if (inst->flags & VIDC_SECURE) + smem_flags |= SMEM_SECURE; + + if (buffer_size) { + for (i = 0; i < num_buffers; i++) { + binfo = kzalloc(sizeof(*binfo), GFP_KERNEL); + if (!binfo) { + s_vpr_e(inst->sid, "Out of memory\n"); + rc = -ENOMEM; + goto fail_kzalloc; + } + rc = msm_comm_smem_alloc(inst, + buffer_size, 1, smem_flags, + buffer_type, 0, &binfo->smem); + if (rc) { + s_vpr_e(inst->sid, + "Failed to allocate output memory\n"); + goto err_no_mem; + } + binfo->buffer_type = buffer_type; + binfo->buffer_ownership = DRIVER; + s_vpr_h(inst->sid, "Output buffer address: %#x\n", + binfo->smem.device_addr); + + if (inst->buffer_mode_set[OUTPUT_PORT] == + HAL_BUFFER_MODE_STATIC) { + struct vidc_buffer_addr_info buffer_info = {0}; + + buffer_info.buffer_size = buffer_size; + buffer_info.buffer_type = buffer_type; + buffer_info.num_buffers = 1; + buffer_info.align_device_addr = + binfo->smem.device_addr; + buffer_info.extradata_addr = + inst->dpb_extra_binfo->smem.device_addr; + buffer_info.extradata_size = + inst->dpb_extra_binfo->smem.size; + rc = call_hfi_op(hdev, session_set_buffers, + (void *) inst->session, &buffer_info); + if (rc) { + s_vpr_e(inst->sid, + "%s: session_set_buffers failed\n", + __func__); + goto fail_set_buffers; + } + } + mutex_lock(&inst->outputbufs.lock); + list_add_tail(&binfo->list, &inst->outputbufs.list); + mutex_unlock(&inst->outputbufs.lock); + } + } + return rc; +fail_set_buffers: + msm_comm_smem_free(inst, &binfo->smem); +err_no_mem: + kfree(binfo); +fail_kzalloc: + return rc; +} + +static inline char *get_buffer_name(enum hal_buffer buffer_type) +{ + switch (buffer_type) { + case HAL_BUFFER_INPUT: return "input"; + case HAL_BUFFER_OUTPUT: return "output"; + case HAL_BUFFER_OUTPUT2: return "output_2"; + case HAL_BUFFER_EXTRADATA_INPUT: return "input_extra"; + case HAL_BUFFER_EXTRADATA_OUTPUT: return "output_extra"; + case HAL_BUFFER_EXTRADATA_OUTPUT2: return "output2_extra"; + case HAL_BUFFER_INTERNAL_SCRATCH: return "scratch"; + case HAL_BUFFER_INTERNAL_SCRATCH_1: return "scratch_1"; + case HAL_BUFFER_INTERNAL_SCRATCH_2: return "scratch_2"; + case HAL_BUFFER_INTERNAL_PERSIST: return "persist"; + case HAL_BUFFER_INTERNAL_PERSIST_1: return "persist_1"; + case HAL_BUFFER_INTERNAL_CMD_QUEUE: return "queue"; + default: return "????"; + } +} + +static int set_internal_buf_on_fw(struct msm_vidc_inst *inst, + enum hal_buffer buffer_type, + struct msm_smem *handle, bool reuse) +{ + struct vidc_buffer_addr_info buffer_info; + struct hfi_device *hdev; + int rc = 0; + + if (!inst || !inst->core || !inst->core->device || !handle) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, inst, handle); + return -EINVAL; + } + + hdev = inst->core->device; + + buffer_info.buffer_size = handle->size; + buffer_info.buffer_type = buffer_type; + buffer_info.num_buffers = 1; + buffer_info.align_device_addr = handle->device_addr; + s_vpr_h(inst->sid, "%s %s buffer : %x\n", + reuse ? "Reusing" : "Allocated", + get_buffer_name(buffer_type), + buffer_info.align_device_addr); + + rc = call_hfi_op(hdev, session_set_buffers, + (void *) inst->session, &buffer_info); + if (rc) { + s_vpr_e(inst->sid, "vidc_hal_session_set_buffers failed\n"); + return rc; + } + return 0; +} + +static bool reuse_internal_buffers(struct msm_vidc_inst *inst, + enum hal_buffer buffer_type, struct msm_vidc_list *buf_list) +{ + struct internal_buf *buf; + int rc = 0; + bool reused = false; + + if (!inst || !buf_list) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, inst, buf_list); + return false; + } + + mutex_lock(&buf_list->lock); + list_for_each_entry(buf, &buf_list->list, list) { + if (buf->buffer_type != buffer_type) + continue; + + /* + * Persist buffer size won't change with resolution. If they + * are in queue means that they are already allocated and + * given to HW. HW can use them without reallocation. These + * buffers are not released as part of port reconfig. So + * driver no need to set them again. + */ + + if (buffer_type != HAL_BUFFER_INTERNAL_PERSIST + && buffer_type != HAL_BUFFER_INTERNAL_PERSIST_1) { + + rc = set_internal_buf_on_fw(inst, buffer_type, + &buf->smem, true); + if (rc) { + s_vpr_e(inst->sid, + "%s: session_set_buffers failed\n", + __func__); + reused = false; + break; + } + } + reused = true; + s_vpr_h(inst->sid, + "Re-using internal buffer type : %d\n", buffer_type); + } + mutex_unlock(&buf_list->lock); + return reused; +} + +static int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst, + struct hal_buffer_requirements *internal_bufreq, + struct msm_vidc_list *buf_list) +{ + struct internal_buf *binfo; + u32 smem_flags = SMEM_UNCACHED; + int rc = 0; + unsigned int i = 0; + + if (!inst || !internal_bufreq || !buf_list) + return -EINVAL; + + if (!internal_bufreq->buffer_size) + return 0; + + if (inst->flags & VIDC_SECURE) + smem_flags |= SMEM_SECURE; + + for (i = 0; i < internal_bufreq->buffer_count_actual; i++) { + binfo = kzalloc(sizeof(*binfo), GFP_KERNEL); + if (!binfo) { + s_vpr_e(inst->sid, "%s: Out of memory\n", __func__); + rc = -ENOMEM; + goto fail_kzalloc; + } + rc = msm_comm_smem_alloc(inst, internal_bufreq->buffer_size, + 1, smem_flags, internal_bufreq->buffer_type, + 0, &binfo->smem); + if (rc) { + s_vpr_e(inst->sid, + "Failed to allocate scratch memory\n"); + goto err_no_mem; + } + + binfo->buffer_type = internal_bufreq->buffer_type; + + rc = set_internal_buf_on_fw(inst, internal_bufreq->buffer_type, + &binfo->smem, false); + if (rc) + goto fail_set_buffers; + + mutex_lock(&buf_list->lock); + list_add_tail(&binfo->list, &buf_list->list); + mutex_unlock(&buf_list->lock); + } + return rc; + +fail_set_buffers: + msm_comm_smem_free(inst, &binfo->smem); +err_no_mem: + kfree(binfo); +fail_kzalloc: + return rc; + +} + +static int set_internal_buffers(struct msm_vidc_inst *inst, + enum hal_buffer buffer_type, struct msm_vidc_list *buf_list) +{ + struct hal_buffer_requirements *internal_buf; + + internal_buf = get_buff_req_buffer(inst, buffer_type); + if (!internal_buf) { + s_vpr_h(inst->sid, + "This internal buffer not required, buffer_type: %x\n", + buffer_type); + return 0; + } + + s_vpr_h(inst->sid, "Buffer type %s: num = %d, size = %d\n", + get_buffer_name(buffer_type), + internal_buf->buffer_count_actual, internal_buf->buffer_size); + + /* + * Try reusing existing internal buffers first. + * If it's not possible to reuse, allocate new buffers. + */ + if (reuse_internal_buffers(inst, buffer_type, buf_list)) + return 0; + + return allocate_and_set_internal_bufs(inst, internal_buf, + buf_list); +} + +int msm_comm_try_state(struct msm_vidc_inst *inst, int state) +{ + int rc = 0; + int flipped_state; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + s_vpr_h(inst->sid, "Trying to move inst: %pK from: %#x to %#x\n", + inst, inst->state, state); + + mutex_lock(&inst->sync_lock); + if (inst->state == MSM_VIDC_CORE_INVALID) { + s_vpr_e(inst->sid, "%s: inst %pK is in invalid\n", + __func__, inst); + rc = -EINVAL; + goto exit; + } + + flipped_state = get_flipped_state(inst->state, state); + s_vpr_h(inst->sid, "inst: %pK flipped_state = %#x\n", + inst, flipped_state); + switch (flipped_state) { + case MSM_VIDC_CORE_UNINIT_DONE: + case MSM_VIDC_CORE_INIT: + rc = msm_comm_init_core(inst); + if (rc || state <= get_flipped_state(inst->state, state)) + break; + case MSM_VIDC_CORE_INIT_DONE: + rc = msm_comm_init_core_done(inst); + if (rc || state <= get_flipped_state(inst->state, state)) + break; + case MSM_VIDC_OPEN: + rc = msm_comm_session_init(flipped_state, inst); + if (rc || state <= get_flipped_state(inst->state, state)) + break; + case MSM_VIDC_OPEN_DONE: + rc = msm_comm_session_init_done(flipped_state, inst); + if (rc || state <= get_flipped_state(inst->state, state)) + break; + case MSM_VIDC_LOAD_RESOURCES: + rc = msm_vidc_load_resources(flipped_state, inst); + if (rc || state <= get_flipped_state(inst->state, state)) + break; + case MSM_VIDC_LOAD_RESOURCES_DONE: + case MSM_VIDC_START: + rc = msm_vidc_start(flipped_state, inst); + if (rc || state <= get_flipped_state(inst->state, state)) + break; + case MSM_VIDC_START_DONE: + rc = wait_for_state(inst, flipped_state, MSM_VIDC_START_DONE, + HAL_SESSION_START_DONE); + if (rc || state <= get_flipped_state(inst->state, state)) + break; + case MSM_VIDC_STOP: + rc = msm_vidc_stop(flipped_state, inst); + if (rc || state <= get_flipped_state(inst->state, state)) + break; + case MSM_VIDC_STOP_DONE: + rc = wait_for_state(inst, flipped_state, MSM_VIDC_STOP_DONE, + HAL_SESSION_STOP_DONE); + if (rc || state <= get_flipped_state(inst->state, state)) + break; + s_vpr_h(inst->sid, "Moving to Stop Done state\n"); + case MSM_VIDC_RELEASE_RESOURCES: + rc = msm_vidc_release_res(flipped_state, inst); + if (rc || state <= get_flipped_state(inst->state, state)) + break; + case MSM_VIDC_RELEASE_RESOURCES_DONE: + rc = wait_for_state(inst, flipped_state, + MSM_VIDC_RELEASE_RESOURCES_DONE, + HAL_SESSION_RELEASE_RESOURCE_DONE); + if (rc || state <= get_flipped_state(inst->state, state)) + break; + s_vpr_h(inst->sid, "Moving to release resources done state\n"); + case MSM_VIDC_CLOSE: + rc = msm_comm_session_close(flipped_state, inst); + if (rc || state <= get_flipped_state(inst->state, state)) + break; + case MSM_VIDC_CLOSE_DONE: + rc = wait_for_state(inst, flipped_state, MSM_VIDC_CLOSE_DONE, + HAL_SESSION_END_DONE); + if (rc || state <= get_flipped_state(inst->state, state)) + break; + msm_comm_session_clean(inst); + case MSM_VIDC_CORE_UNINIT: + case MSM_VIDC_CORE_INVALID: + s_vpr_h(inst->sid, "Sending core uninit\n"); + rc = msm_vidc_deinit_core(inst); + if (rc || state == get_flipped_state(inst->state, state)) + break; + default: + s_vpr_e(inst->sid, "State not recognized\n"); + rc = -EINVAL; + break; + } + +exit: + mutex_unlock(&inst->sync_lock); + + if (rc) { + s_vpr_e(inst->sid, "Failed to move from state: %d to %d\n", + inst->state, state); + msm_comm_kill_session(inst); + } else { + trace_msm_vidc_common_state_change((void *)inst, + inst->state, state); + } + return rc; +} + +int msm_vidc_send_pending_eos_buffers(struct msm_vidc_inst *inst) +{ + struct vidc_frame_data data = {0}; + struct hfi_device *hdev; + struct eos_buf *binfo = NULL, *temp = NULL; + int rc = 0; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: Invalid arguments\n", __func__); + return -EINVAL; + } + + mutex_lock(&inst->eosbufs.lock); + list_for_each_entry_safe(binfo, temp, &inst->eosbufs.list, list) { + if (binfo->is_queued) + continue; + + data.alloc_len = binfo->smem.size; + data.device_addr = binfo->smem.device_addr; + data.input_tag = 0; + data.buffer_type = HAL_BUFFER_INPUT; + data.filled_len = 0; + data.offset = 0; + data.flags = HAL_BUFFERFLAG_EOS; + data.timestamp = 0; + data.extradata_addr = 0; + data.extradata_size = 0; + s_vpr_h(inst->sid, "Queueing EOS buffer 0x%x\n", + data.device_addr); + hdev = inst->core->device; + + rc = call_hfi_op(hdev, session_etb, inst->session, + &data); + binfo->is_queued = 1; + } + mutex_unlock(&inst->eosbufs.lock); + + return rc; +} + +int msm_vidc_comm_cmd(void *instance, union msm_v4l2_cmd *cmd) +{ + struct msm_vidc_inst *inst = instance; + struct v4l2_decoder_cmd *dec = NULL; + struct v4l2_encoder_cmd *enc = NULL; + struct msm_vidc_core *core; + int which_cmd = 0, flags = 0, rc = 0; + + if (!inst || !inst->core || !cmd) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, inst, cmd); + return -EINVAL; + } + core = inst->core; + if (inst->session_type == MSM_VIDC_ENCODER) { + enc = (struct v4l2_encoder_cmd *)cmd; + which_cmd = enc->cmd; + flags = enc->flags; + } else if (inst->session_type == MSM_VIDC_DECODER) { + dec = (struct v4l2_decoder_cmd *)cmd; + which_cmd = dec->cmd; + flags = dec->flags; + } + + + switch (which_cmd) { + case V4L2_CMD_FLUSH: + rc = msm_comm_flush(inst, flags); + if (rc) { + s_vpr_e(inst->sid, "Failed to flush buffers: %d\n", rc); + } + break; + /* This case also for V4L2_ENC_CMD_STOP */ + case V4L2_DEC_CMD_STOP: + { + struct eos_buf *binfo = NULL; + u32 smem_flags = SMEM_UNCACHED; + + if (inst->state != MSM_VIDC_START_DONE) { + s_vpr_h(inst->sid, + "Inst = %pK is not ready for EOS\n", inst); + break; + } + + binfo = kzalloc(sizeof(*binfo), GFP_KERNEL); + if (!binfo) { + s_vpr_e(inst->sid, "%s: Out of memory\n", __func__); + rc = -ENOMEM; + break; + } + + if (inst->flags & VIDC_SECURE) + smem_flags |= SMEM_SECURE; + + rc = msm_comm_smem_alloc(inst, + SZ_4K, 1, smem_flags, + HAL_BUFFER_INPUT, 0, &binfo->smem); + if (rc) { + kfree(binfo); + s_vpr_e(inst->sid, + "Failed to allocate output memory\n"); + rc = -ENOMEM; + break; + } + + mutex_lock(&inst->eosbufs.lock); + list_add_tail(&binfo->list, &inst->eosbufs.list); + mutex_unlock(&inst->eosbufs.lock); + + rc = msm_vidc_send_pending_eos_buffers(inst); + if (rc) { + s_vpr_e(inst->sid, + "Failed pending_eos_buffers sending\n"); + list_del(&binfo->list); + kfree(binfo); + break; + } + break; + } + default: + s_vpr_e(inst->sid, "Unknown Command %d\n", which_cmd); + rc = -ENOTSUPP; + break; + } + return rc; +} + +static void populate_frame_data(struct vidc_frame_data *data, + struct msm_vidc_buffer *mbuf, struct msm_vidc_inst *inst) +{ + u64 time_usec; + struct v4l2_format *f = NULL; + struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vbuf; + u32 itag = 0, itag2 = 0; + + if (!inst || !mbuf || !data) { + d_vpr_e("%s: invalid params %pK %pK %pK\n", + __func__, inst, mbuf, data); + return; + } + + vb = &mbuf->vvb.vb2_buf; + vbuf = to_vb2_v4l2_buffer(vb); + + time_usec = vb->timestamp; + do_div(time_usec, NSEC_PER_USEC); + + data->alloc_len = vb->planes[0].length; + data->device_addr = mbuf->smem[0].device_addr; + data->timestamp = time_usec; + data->flags = 0; + data->input_tag = 0; + + if (vb->type == INPUT_MPLANE) { + data->buffer_type = HAL_BUFFER_INPUT; + data->filled_len = vb->planes[0].bytesused; + data->offset = vb->planes[0].data_offset; + + if (vbuf->flags & V4L2_BUF_FLAG_EOS) + data->flags |= HAL_BUFFERFLAG_EOS; + + if (vbuf->flags & V4L2_BUF_FLAG_CODECCONFIG) + data->flags |= HAL_BUFFERFLAG_CODECCONFIG; + + if(msm_vidc_cvp_usage && (vbuf->flags & V4L2_BUF_FLAG_CVPMETADATA_SKIP)) + data->flags |= HAL_BUFFERFLAG_CVPMETADATA_SKIP; + + msm_comm_fetch_input_tag(&inst->etb_data, vb->index, + &itag, &itag2, inst->sid); + data->input_tag = itag; + + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + } else if (vb->type == OUTPUT_MPLANE) { + data->buffer_type = msm_comm_get_hal_output_buffer(inst); + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + } + + if (f && f->fmt.pix_mp.num_planes > 1) { + data->extradata_addr = mbuf->smem[1].device_addr; + data->extradata_size = vb->planes[1].length; + data->flags |= HAL_BUFFERFLAG_EXTRADATA; + } +} + +enum hal_buffer get_hal_buffer_type(unsigned int type, + unsigned int plane_num) +{ + if (type == INPUT_MPLANE) { + if (plane_num == 0) + return HAL_BUFFER_INPUT; + else + return HAL_BUFFER_EXTRADATA_INPUT; + } else if (type == OUTPUT_MPLANE) { + if (plane_num == 0) + return HAL_BUFFER_OUTPUT; + else + return HAL_BUFFER_EXTRADATA_OUTPUT; + } else { + return -EINVAL; + } +} + +int msm_comm_num_queued_bufs(struct msm_vidc_inst *inst, u32 type) +{ + int count = 0; + struct msm_vidc_buffer *mbuf; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return 0; + } + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry(mbuf, &inst->registeredbufs.list, list) { + if (mbuf->vvb.vb2_buf.type != type) + continue; + if (!(mbuf->flags & MSM_VIDC_FLAG_QUEUED)) + continue; + count++; + } + mutex_unlock(&inst->registeredbufs.lock); + + return count; +} + +static int num_pending_qbufs(struct msm_vidc_inst *inst, u32 type) +{ + int count = 0; + struct msm_vidc_buffer *mbuf; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return 0; + } + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry(mbuf, &inst->registeredbufs.list, list) { + if (mbuf->vvb.vb2_buf.type != type) + continue; + /* Count only deferred buffers */ + if (!(mbuf->flags & MSM_VIDC_FLAG_DEFERRED)) + continue; + count++; + } + mutex_unlock(&inst->registeredbufs.lock); + + return count; +} + +static int msm_comm_qbuf_to_hfi(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + int rc = 0; + struct hfi_device *hdev; + enum msm_vidc_debugfs_event e; + struct vidc_frame_data frame_data = {0}; + + if (!inst || !inst->core || !inst->core->device || !mbuf) { + d_vpr_e("%s: Invalid arguments\n", __func__); + return -EINVAL; + } + hdev = inst->core->device; + + populate_frame_data(&frame_data, mbuf, inst); + /* mbuf is not deferred anymore */ + mbuf->flags &= ~MSM_VIDC_FLAG_DEFERRED; + + if (mbuf->vvb.vb2_buf.type == INPUT_MPLANE) { + e = MSM_VIDC_DEBUGFS_EVENT_ETB; + rc = call_hfi_op(hdev, session_etb, inst->session, &frame_data); + } else if (mbuf->vvb.vb2_buf.type == OUTPUT_MPLANE) { + e = MSM_VIDC_DEBUGFS_EVENT_FTB; + rc = call_hfi_op(hdev, session_ftb, inst->session, &frame_data); + } else { + s_vpr_e(inst->sid, "%s: invalid qbuf type %d:\n", __func__, + mbuf->vvb.vb2_buf.type); + rc = -EINVAL; + } + if (rc) { + mbuf->flags |= MSM_VIDC_FLAG_DEFERRED; + s_vpr_e(inst->sid, "%s: Failed to qbuf: %d\n", __func__, rc); + goto err_bad_input; + } + mbuf->flags |= MSM_VIDC_FLAG_QUEUED; + msm_vidc_debugfs_update(inst, e); + + if (mbuf->vvb.vb2_buf.type == INPUT_MPLANE && + is_decode_session(inst)) + rc = msm_comm_check_window_bitrate(inst, &frame_data); + +err_bad_input: + return rc; +} + +void msm_vidc_batch_handler(struct work_struct *work) +{ + int rc = 0; + struct msm_vidc_inst *inst; + + inst = container_of(work, struct msm_vidc_inst, batch_work.work); + inst = get_inst(get_vidc_core(MSM_VIDC_CORE_VENUS), inst); + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return; + } + + if (inst->state == MSM_VIDC_CORE_INVALID) { + s_vpr_e(inst->sid, "%s: invalid state\n", __func__); + goto exit; + } + + s_vpr_h(inst->sid, "%s: queue pending batch buffers\n", + __func__); + + rc = msm_comm_qbufs_batch(inst, NULL); + if (rc) { + s_vpr_e(inst->sid, "%s: batch qbufs failed\n", __func__); + msm_vidc_queue_v4l2_event(inst, V4L2_EVENT_MSM_VIDC_SYS_ERROR); + } + +exit: + put_inst(inst); +} + +static int msm_comm_qbuf_superframe_to_hfi(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + int rc, i; + struct hfi_device *hdev; + struct v4l2_format *f; + struct v4l2_ctrl *ctrl; + u64 ts_delta_us; + struct vidc_frame_data *frames; + u32 num_etbs, superframe_count, frame_size, hfi_fmt; + bool skip_allowed = false; + + if (!inst || !inst->core || !inst->core->device || !mbuf) { + d_vpr_e("%s: Invalid arguments\n", __func__); + return -EINVAL; + } + hdev = inst->core->device; + frames = inst->superframe_data; + + if (!is_input_buffer(mbuf)) + return msm_comm_qbuf_to_hfi(inst, mbuf); + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_SUPERFRAME); + superframe_count = ctrl->val; + if (superframe_count > VIDC_SUPERFRAME_MAX) { + s_vpr_e(inst->sid, "%s: wrong superframe count %d, max %d\n", + __func__, superframe_count, VIDC_SUPERFRAME_MAX); + return -EINVAL; + } + + ts_delta_us = 1000000 / (inst->clk_data.frame_rate >> 16); + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + hfi_fmt = msm_comm_convert_color_fmt(f->fmt.pix_mp.pixelformat, + inst->sid); + frame_size = VENUS_BUFFER_SIZE(hfi_fmt, f->fmt.pix_mp.width, + f->fmt.pix_mp.height); + if (frame_size * superframe_count != + mbuf->vvb.vb2_buf.planes[0].length) { + s_vpr_e(inst->sid, + "%s: invalid superframe length, pxlfmt %#x wxh %dx%d framesize %d count %d length %d\n", + __func__, f->fmt.pix_mp.pixelformat, + f->fmt.pix_mp.width, f->fmt.pix_mp.height, + frame_size, superframe_count, + mbuf->vvb.vb2_buf.planes[0].length); + return -EINVAL; + } + + num_etbs = 0; + populate_frame_data(&frames[0], mbuf, inst); + /* prepare superframe buffers */ + frames[0].filled_len = frame_size; + /* + * superframe logic updates extradata, cvpmetadata_skip and eos flags only, + * so ensure no other flags are populated in populate_frame_data() + */ + frames[0].flags &= ~HAL_BUFFERFLAG_EXTRADATA; + frames[0].flags &= ~HAL_BUFFERFLAG_EOS; + frames[0].flags &= ~HAL_BUFFERFLAG_CVPMETADATA_SKIP; + frames[0].flags &= ~HAL_BUFFERFLAG_ENDOFSUBFRAME; + if (frames[0].flags) + s_vpr_e(inst->sid, "%s: invalid flags %#x\n", + __func__, frames[0].flags); + frames[0].flags = 0; + + /* Add skip flag only if CVP metadata is enabled */ + if (inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_CVP) { + skip_allowed = true; + frames[0].flags |= HAL_BUFFERFLAG_CVPMETADATA_SKIP; + } + + for (i = 0; i < superframe_count; i++) { + if (i) + memcpy(&frames[i], &frames[0], + sizeof(struct vidc_frame_data)); + frames[i].offset += i * frame_size; + frames[i].timestamp += i * ts_delta_us; + if (!i) { + /* first frame */ + if (frames[0].extradata_addr) + frames[0].flags |= HAL_BUFFERFLAG_EXTRADATA; + + /* Add work incomplete flag for all etb's except the + * last one. For last frame, flag is cleared at the + * last frame iteration. + */ + frames[0].flags |= HAL_BUFFERFLAG_ENDOFSUBFRAME; + } else if (i == superframe_count - 1) { + /* last frame */ + if (mbuf->vvb.flags & V4L2_BUF_FLAG_EOS) + frames[i].flags |= HAL_BUFFERFLAG_EOS; + /* Clear Subframe flag just for the last frame to + * indicate the end of SuperFrame. + */ + frames[i].flags &= ~HAL_BUFFERFLAG_ENDOFSUBFRAME; + } + num_etbs++; + } + + /* If cvp metadata is enabled and metadata is available, + * do not add skip flag for only first frame */ + if (skip_allowed && !(mbuf->vvb.flags & V4L2_BUF_FLAG_CVPMETADATA_SKIP)) + frames[0].flags &= ~HAL_BUFFERFLAG_CVPMETADATA_SKIP; + + rc = call_hfi_op(hdev, session_process_batch, inst->session, + num_etbs, frames, 0, NULL); + if (rc) { + s_vpr_e(inst->sid, "%s: Failed to qbuf: %d\n", __func__, rc); + return rc; + } + /* update mbuf flags */ + mbuf->flags |= MSM_VIDC_FLAG_QUEUED; + mbuf->flags &= ~MSM_VIDC_FLAG_DEFERRED; + msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_ETB); + + return 0; +} + +static int msm_comm_qbuf_in_rbr(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + int rc = 0; + + if (!inst || !mbuf) { + d_vpr_e("%s: Invalid arguments\n", __func__); + return -EINVAL; + } + + if (inst->state == MSM_VIDC_CORE_INVALID) { + s_vpr_e(inst->sid, "%s: inst is in bad state\n", __func__); + return -EINVAL; + } + + rc = msm_comm_scale_clocks_and_bus(inst, 0); + if (rc) + s_vpr_e(inst->sid, "%s: scale clock failed\n", __func__); + + print_vidc_buffer(VIDC_HIGH|VIDC_PERF, "qbuf in rbr", inst, mbuf); + rc = msm_comm_qbuf_to_hfi(inst, mbuf); + if (rc) + s_vpr_e(inst->sid, + "%s: Failed qbuf to hfi: %d\n", __func__, rc); + + return rc; +} + +int msm_comm_qbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf) +{ + int rc = 0; + struct v4l2_ctrl *ctrl; + int do_bw_calc = 0; + + if (!inst || !mbuf) { + d_vpr_e("%s: Invalid arguments\n", __func__); + return -EINVAL; + } + + if (inst->state == MSM_VIDC_CORE_INVALID) { + s_vpr_e(inst->sid, "%s: inst is in bad state\n", __func__); + return -EINVAL; + } + + if (inst->state != MSM_VIDC_START_DONE) { + mbuf->flags |= MSM_VIDC_FLAG_DEFERRED; + print_vidc_buffer(VIDC_HIGH, "qbuf deferred", inst, mbuf); + return 0; + } + + do_bw_calc = mbuf->vvb.vb2_buf.type == INPUT_MPLANE; + rc = msm_comm_scale_clocks_and_bus(inst, do_bw_calc); + if (rc) + s_vpr_e(inst->sid, "%s: scale clock & bw failed\n", __func__); + + print_vidc_buffer(VIDC_HIGH|VIDC_PERF, "qbuf", inst, mbuf); + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_SUPERFRAME); + if (ctrl->val) + rc = msm_comm_qbuf_superframe_to_hfi(inst, mbuf); + else + rc = msm_comm_qbuf_to_hfi(inst, mbuf); + if (rc) + s_vpr_e(inst->sid, "%s: Failed qbuf to hfi: %d\n", + __func__, rc); + + return rc; +} + +int msm_comm_qbufs(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct msm_vidc_buffer *mbuf; + bool found; + + if (!inst) { + d_vpr_e("%s: Invalid arguments\n", __func__); + return -EINVAL; + } + + if (inst->state != MSM_VIDC_START_DONE) { + s_vpr_h(inst->sid, "%s: inst not in start state: %d\n", + __func__, inst->state); + return 0; + } + + do { + mutex_lock(&inst->registeredbufs.lock); + found = false; + list_for_each_entry(mbuf, &inst->registeredbufs.list, list) { + /* Queue only deferred buffers */ + if (mbuf->flags & MSM_VIDC_FLAG_DEFERRED) { + found = true; + break; + } + } + mutex_unlock(&inst->registeredbufs.lock); + if (!found) { + s_vpr_h(inst->sid, + "%s: no more deferred qbufs\n", __func__); + break; + } + + /* do not call msm_comm_qbuf() under registerbufs lock */ + if (!kref_get_mbuf(inst, mbuf)) { + s_vpr_e(inst->sid, "%s: mbuf not found\n", __func__); + rc = -EINVAL; + break; + } + rc = msm_comm_qbuf(inst, mbuf); + kref_put_mbuf(mbuf); + if (rc) { + s_vpr_e(inst->sid, "%s: failed qbuf\n", __func__); + break; + } + } while (found); + + return rc; +} + +int msm_comm_qbufs_batch(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + int rc = 0; + struct msm_vidc_buffer *buf; + int do_bw_calc = 0; + + do_bw_calc = mbuf ? mbuf->vvb.vb2_buf.type == INPUT_MPLANE : 0; + rc = msm_comm_scale_clocks_and_bus(inst, do_bw_calc); + if (rc) + s_vpr_e(inst->sid, "%s: scale clock & bw failed\n", __func__); + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry(buf, &inst->registeredbufs.list, list) { + /* Don't queue if buffer is not OUTPUT_MPLANE */ + if (buf->vvb.vb2_buf.type != OUTPUT_MPLANE) + goto loop_end; + /* Don't queue if buffer is not a deferred buffer */ + if (!(buf->flags & MSM_VIDC_FLAG_DEFERRED)) + goto loop_end; + /* Don't queue if RBR event is pending on this buffer */ + if (buf->flags & MSM_VIDC_FLAG_RBR_PENDING) + goto loop_end; + + print_vidc_buffer(VIDC_HIGH|VIDC_PERF, "batch-qbuf", inst, buf); + rc = msm_comm_qbuf_to_hfi(inst, buf); + if (rc) { + s_vpr_e(inst->sid, "%s: Failed batch qbuf to hfi: %d\n", + __func__, rc); + break; + } +loop_end: + /* Queue pending buffers till the current buffer only */ + if (buf == mbuf) + break; + } + mutex_unlock(&inst->registeredbufs.lock); + + return rc; +} + +/* + * msm_comm_qbuf_decode_batch - count the buffers which are not queued to + * firmware yet (count includes rbr pending buffers too) and + * queue the buffers at once if full batch count reached. + * Don't queue rbr pending buffers as they would be queued + * when rbr event arrived from firmware. + */ +int msm_comm_qbuf_decode_batch(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + int rc = 0; + u32 count = 0; + + if (!inst || !inst->core || !mbuf) { + d_vpr_e("%s: Invalid arguments\n", __func__); + return -EINVAL; + } + + if (inst->state == MSM_VIDC_CORE_INVALID) { + s_vpr_e(inst->sid, "%s: inst is in bad state\n", __func__); + return -EINVAL; + } + + if (inst->state != MSM_VIDC_START_DONE) { + mbuf->flags |= MSM_VIDC_FLAG_DEFERRED; + print_vidc_buffer(VIDC_HIGH|VIDC_PERF, + "qbuf deferred", inst, mbuf); + return 0; + } + + /* + * Don't defer buffers initially to avoid startup latency increase + * due to batching + */ + if (inst->clk_data.buffer_counter > SKIP_BATCH_WINDOW) { + count = num_pending_qbufs(inst, OUTPUT_MPLANE); + if (count < inst->batch.size) { + print_vidc_buffer(VIDC_HIGH, + "batch-qbuf deferred", inst, mbuf); + schedule_batch_work(inst); + return 0; + } + + /* + * Batch completed - queing bufs to firmware. + * so cancel pending work if any. + */ + cancel_batch_work(inst); + } + + rc = msm_comm_qbufs_batch(inst, mbuf); + if (rc) + s_vpr_e(inst->sid, + "%s: Failed qbuf to hfi: %d\n", + __func__, rc); + + return rc; +} + +int schedule_batch_work(struct msm_vidc_inst *inst) +{ + struct msm_vidc_core *core; + struct msm_vidc_platform_resources *res; + + if (!inst || !inst->core) { + d_vpr_e("%s: Invalid arguments\n", __func__); + return -EINVAL; + } + core = inst->core; + res = &core->resources; + + cancel_delayed_work(&inst->batch_work); + queue_delayed_work(core->vidc_core_workq, &inst->batch_work, + msecs_to_jiffies(res->batch_timeout)); + + return 0; +} + +int cancel_batch_work(struct msm_vidc_inst *inst) +{ + if (!inst) { + d_vpr_e("%s: Invalid arguments\n", __func__); + return -EINVAL; + } + cancel_delayed_work(&inst->batch_work); + + return 0; +} + +int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst) +{ + int rc = -EINVAL, i = 0; + union hal_get_property hprop; + + memset(&hprop, 0x0, sizeof(hprop)); + /* + * First check if we can calculate bufffer sizes. + * If we can calculate then we do it within the driver. + * If we cannot then we get buffer requirements from firmware. + */ + if (inst->buffer_size_calculators) { + rc = inst->buffer_size_calculators(inst); + if (rc) + s_vpr_e(inst->sid, + "Failed calculating internal buffer sizes: %d", + rc); + } + + /* + * Fallback to get buffreq from firmware if internal calculation + * is not done or if it fails + */ + if (rc) { + rc = msm_comm_try_get_buff_req(inst, &hprop); + if (rc) { + s_vpr_e(inst->sid, + "Failed getting buffer requirements: %d", rc); + return rc; + } + + /* reset internal buffers */ + for (i = 0; i < HAL_BUFFER_MAX; i++) { + struct hal_buffer_requirements *req; + + req = &inst->buff_req.buffer[i]; + if (is_internal_buffer(req->buffer_type)) + msm_comm_reset_bufreqs(inst, req->buffer_type); + } + + for (i = 0; i < HAL_BUFFER_MAX; i++) { + struct hal_buffer_requirements req; + struct hal_buffer_requirements *curr_req; + + req = hprop.buf_req.buffer[i]; + /* + * Firmware buffer requirements are needed for internal + * buffers only and all other buffer requirements are + * calculated in driver. + */ + curr_req = get_buff_req_buffer(inst, req.buffer_type); + if (!curr_req) + return -EINVAL; + + if (is_internal_buffer(req.buffer_type)) { + memcpy(curr_req, &req, + sizeof(struct hal_buffer_requirements)); + } + } + } + + s_vpr_h(inst->sid, "Buffer requirements :\n"); + s_vpr_h(inst->sid, "%15s %8s %8s %8s %8s %8s\n", + "buffer type", "count", "mincount_host", "mincount_fw", "size", + "alignment"); + for (i = 0; i < HAL_BUFFER_MAX; i++) { + struct hal_buffer_requirements req = inst->buff_req.buffer[i]; + + if (req.buffer_type != HAL_BUFFER_NONE && + req.buffer_type != HAL_BUFFER_INPUT && + req.buffer_type != HAL_BUFFER_OUTPUT && + req.buffer_type != HAL_BUFFER_OUTPUT2) { + s_vpr_h(inst->sid, "%15s %8d %8d %8d %8d %8d\n", + get_buffer_name(req.buffer_type), + req.buffer_count_actual, + req.buffer_count_min_host, + req.buffer_count_min, req.buffer_size, + req.buffer_alignment); + } + } + return rc; +} + +int msm_comm_try_get_buff_req(struct msm_vidc_inst *inst, + union hal_get_property *hprop) +{ + int rc = 0; + struct hfi_device *hdev; + struct getprop_buf *buf; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + hdev = inst->core->device; + mutex_lock(&inst->sync_lock); + if (inst->state < MSM_VIDC_OPEN_DONE || + inst->state >= MSM_VIDC_CLOSE) { + + /* No need to check inst->state == MSM_VIDC_INVALID since + * INVALID is > CLOSE_DONE. When core went to INVALID state, + * we put all the active instances in INVALID. So > CLOSE_DONE + * is enough check to have. + */ + + s_vpr_e(inst->sid, + "In Wrong state to call Buf Req: Inst %pK or Core %pK\n", + inst, inst->core); + rc = -EAGAIN; + mutex_unlock(&inst->sync_lock); + goto exit; + } + mutex_unlock(&inst->sync_lock); + + rc = call_hfi_op(hdev, session_get_buf_req, inst->session); + if (rc) { + s_vpr_e(inst->sid, "Can't query hardware for property: %d\n", + rc); + goto exit; + } + + rc = wait_for_completion_timeout(&inst->completions[ + SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO)], + msecs_to_jiffies( + inst->core->resources.msm_vidc_hw_rsp_timeout)); + if (!rc) { + s_vpr_e(inst->sid, + "%s: Wait interrupted or timed out [%pK]: %d\n", + __func__, inst, + SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO)); + msm_comm_kill_session(inst); + rc = -ETIMEDOUT; + goto exit; + } else { + /* wait_for_completion_timeout returns jiffies before expiry */ + rc = 0; + } + + mutex_lock(&inst->pending_getpropq.lock); + if (!list_empty(&inst->pending_getpropq.list)) { + buf = list_first_entry(&inst->pending_getpropq.list, + struct getprop_buf, list); + *hprop = *(union hal_get_property *)buf->data; + kfree(buf->data); + list_del(&buf->list); + kfree(buf); + } else { + s_vpr_e(inst->sid, "%s: getprop list empty\n", __func__); + rc = -EINVAL; + } + mutex_unlock(&inst->pending_getpropq.lock); +exit: + return rc; +} + +int msm_comm_release_dpb_only_buffers(struct msm_vidc_inst *inst, + bool force_release) +{ + struct msm_smem *handle; + struct internal_buf *buf, *dummy; + struct vidc_buffer_addr_info buffer_info; + int rc = 0; + struct msm_vidc_core *core; + struct hfi_device *hdev; + + if (!inst) { + d_vpr_e("Invalid instance pointer = %pK\n", inst); + return -EINVAL; + } + mutex_lock(&inst->outputbufs.lock); + if (list_empty(&inst->outputbufs.list)) { + s_vpr_h(inst->sid, "%s: No OUTPUT buffers allocated\n", + __func__); + mutex_unlock(&inst->outputbufs.lock); + return 0; + } + mutex_unlock(&inst->outputbufs.lock); + + core = inst->core; + if (!core) { + s_vpr_e(inst->sid, "Invalid core pointer\n"); + return -EINVAL; + } + hdev = core->device; + if (!hdev) { + s_vpr_e(inst->sid, "Invalid device pointer\n"); + return -EINVAL; + } + mutex_lock(&inst->outputbufs.lock); + list_for_each_entry_safe(buf, dummy, &inst->outputbufs.list, list) { + handle = &buf->smem; + + if ((buf->buffer_ownership == FIRMWARE) && !force_release) { + s_vpr_h(inst->sid, "DPB is with f/w. Can't free it\n"); + /* + * mark this buffer to avoid sending it to video h/w + * again, this buffer belongs to old resolution and + * it will be removed when video h/w returns it. + */ + buf->mark_remove = true; + continue; + } + + buffer_info.buffer_size = handle->size; + buffer_info.buffer_type = buf->buffer_type; + buffer_info.num_buffers = 1; + buffer_info.align_device_addr = handle->device_addr; + if (inst->buffer_mode_set[OUTPUT_PORT] == + HAL_BUFFER_MODE_STATIC) { + buffer_info.response_required = false; + rc = call_hfi_op(hdev, session_release_buffers, + (void *)inst->session, &buffer_info); + if (rc) { + s_vpr_e(inst->sid, + "Rel output buf fail:%x, %d\n", + buffer_info.align_device_addr, + buffer_info.buffer_size); + } + } + + list_del(&buf->list); + msm_comm_smem_free(inst, &buf->smem); + kfree(buf); + } + + if (inst->dpb_extra_binfo) { + msm_comm_smem_free(inst, &inst->dpb_extra_binfo->smem); + kfree(inst->dpb_extra_binfo); + inst->dpb_extra_binfo = NULL; + } + + mutex_unlock(&inst->outputbufs.lock); + return rc; +} + +static enum hal_buffer scratch_buf_sufficient(struct msm_vidc_inst *inst, + enum hal_buffer buffer_type) +{ + struct hal_buffer_requirements *bufreq = NULL; + struct internal_buf *buf; + int count = 0; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + goto not_sufficient; + } + + bufreq = get_buff_req_buffer(inst, buffer_type); + if (!bufreq) + goto not_sufficient; + + /* Check if current scratch buffers are sufficient */ + mutex_lock(&inst->scratchbufs.lock); + + list_for_each_entry(buf, &inst->scratchbufs.list, list) { + if (buf->buffer_type == buffer_type && + bufreq->buffer_size && + buf->smem.size >= bufreq->buffer_size) + count++; + } + mutex_unlock(&inst->scratchbufs.lock); + + if (count != bufreq->buffer_count_actual) + goto not_sufficient; + + s_vpr_h(inst->sid, + "Existing scratch buffer is sufficient for buffer type %#x\n", + buffer_type); + + return buffer_type; + +not_sufficient: + return HAL_BUFFER_NONE; +} + +int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst, + bool check_for_reuse) +{ + struct msm_smem *handle; + struct internal_buf *buf, *dummy; + struct vidc_buffer_addr_info buffer_info; + int rc = 0; + struct msm_vidc_core *core; + struct hfi_device *hdev; + enum hal_buffer sufficiency = HAL_BUFFER_NONE; + + if (!inst) { + d_vpr_e("Invalid instance pointer = %pK\n", inst); + return -EINVAL; + } + core = inst->core; + if (!core) { + s_vpr_e(inst->sid, "Invalid core pointer = %pK\n", core); + return -EINVAL; + } + hdev = core->device; + if (!hdev) { + s_vpr_e(inst->sid, "Invalid device pointer = %pK\n", hdev); + return -EINVAL; + } + + if (check_for_reuse) { + sufficiency |= scratch_buf_sufficient(inst, + HAL_BUFFER_INTERNAL_SCRATCH); + + sufficiency |= scratch_buf_sufficient(inst, + HAL_BUFFER_INTERNAL_SCRATCH_1); + + sufficiency |= scratch_buf_sufficient(inst, + HAL_BUFFER_INTERNAL_SCRATCH_2); + } + + mutex_lock(&inst->scratchbufs.lock); + list_for_each_entry_safe(buf, dummy, &inst->scratchbufs.list, list) { + handle = &buf->smem; + buffer_info.buffer_size = handle->size; + buffer_info.buffer_type = buf->buffer_type; + buffer_info.num_buffers = 1; + buffer_info.align_device_addr = handle->device_addr; + buffer_info.response_required = true; + rc = call_hfi_op(hdev, session_release_buffers, + (void *)inst->session, &buffer_info); + if (!rc) { + mutex_unlock(&inst->scratchbufs.lock); + rc = wait_for_sess_signal_receipt(inst, + HAL_SESSION_RELEASE_BUFFER_DONE); + if (rc) + s_vpr_e(inst->sid, + "%s: wait for signal failed, rc %d\n", + __func__, rc); + mutex_lock(&inst->scratchbufs.lock); + } else { + s_vpr_e(inst->sid, "Rel scrtch buf fail:%x, %d\n", + buffer_info.align_device_addr, + buffer_info.buffer_size); + } + + /*If scratch buffers can be reused, do not free the buffers*/ + if (sufficiency & buf->buffer_type) + continue; + + list_del(&buf->list); + msm_comm_smem_free(inst, handle); + kfree(buf); + } + + mutex_unlock(&inst->scratchbufs.lock); + return rc; +} + +void msm_comm_release_eos_buffers(struct msm_vidc_inst *inst) +{ + struct eos_buf *buf, *next; + + if (!inst) { + d_vpr_e("Invalid instance pointer = %pK\n", inst); + return; + } + + mutex_lock(&inst->eosbufs.lock); + list_for_each_entry_safe(buf, next, &inst->eosbufs.list, list) { + list_del(&buf->list); + msm_comm_smem_free(inst, &buf->smem); + kfree(buf); + } + INIT_LIST_HEAD(&inst->eosbufs.list); + mutex_unlock(&inst->eosbufs.lock); +} + + +int msm_comm_release_recon_buffers(struct msm_vidc_inst *inst) +{ + struct recon_buf *buf, *next; + + if (!inst) { + d_vpr_e("Invalid instance pointer = %pK\n", inst); + return -EINVAL; + } + + mutex_lock(&inst->refbufs.lock); + list_for_each_entry_safe(buf, next, &inst->refbufs.list, list) { + list_del(&buf->list); + kfree(buf); + } + INIT_LIST_HEAD(&inst->refbufs.list); + mutex_unlock(&inst->refbufs.lock); + + return 0; +} + +int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst) +{ + struct msm_smem *handle; + struct list_head *ptr, *next; + struct internal_buf *buf; + struct vidc_buffer_addr_info buffer_info; + int rc = 0; + struct msm_vidc_core *core; + struct hfi_device *hdev; + + if (!inst) { + d_vpr_e("Invalid instance pointer = %pK\n", inst); + return -EINVAL; + } + core = inst->core; + if (!core) { + s_vpr_e(inst->sid, "Invalid core pointer = %pK\n", core); + return -EINVAL; + } + hdev = core->device; + if (!hdev) { + s_vpr_e(inst->sid, "Invalid device pointer = %pK\n", hdev); + return -EINVAL; + } + + mutex_lock(&inst->persistbufs.lock); + list_for_each_safe(ptr, next, &inst->persistbufs.list) { + buf = list_entry(ptr, struct internal_buf, list); + handle = &buf->smem; + buffer_info.buffer_size = handle->size; + buffer_info.buffer_type = buf->buffer_type; + buffer_info.num_buffers = 1; + buffer_info.align_device_addr = handle->device_addr; + buffer_info.response_required = true; + rc = call_hfi_op(hdev, session_release_buffers, + (void *)inst->session, &buffer_info); + if (!rc) { + mutex_unlock(&inst->persistbufs.lock); + rc = wait_for_sess_signal_receipt(inst, + HAL_SESSION_RELEASE_BUFFER_DONE); + if (rc) + s_vpr_e(inst->sid, + "%s: wait for signal failed, rc %d\n", + __func__, rc); + mutex_lock(&inst->persistbufs.lock); + } else { + s_vpr_e(inst->sid, "Rel prst buf fail:%x, %d\n", + buffer_info.align_device_addr, + buffer_info.buffer_size); + } + list_del(&buf->list); + msm_comm_smem_free(inst, handle); + kfree(buf); + } + mutex_unlock(&inst->persistbufs.lock); + return rc; +} + +int msm_comm_set_buffer_count(struct msm_vidc_inst *inst, + int min_count, int act_count, enum hal_buffer type) +{ + int rc = 0; + struct v4l2_ctrl *ctrl; + struct hfi_device *hdev; + struct hfi_buffer_count_actual buf_count; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + hdev = inst->core->device; + + buf_count.buffer_type = get_hfi_buffer(type, inst->sid); + buf_count.buffer_count_actual = act_count; + buf_count.buffer_count_min_host = min_count; + /* set total superframe buffers count */ + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_SUPERFRAME); + if (ctrl->val) + buf_count.buffer_count_actual = act_count * ctrl->val; + s_vpr_h(inst->sid, + "%s: hal_buffer %d min %d actual %d superframe %d\n", + __func__, type, min_count, + buf_count.buffer_count_actual, ctrl->val); + rc = call_hfi_op(hdev, session_set_property, + inst->session, HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL, + &buf_count, sizeof(buf_count)); + if (rc) + s_vpr_e(inst->sid, + "Failed to set actual buffer count %d for buffer type %d\n", + buf_count.buffer_count_actual, type); + return rc; +} + +int msm_comm_set_dpb_only_buffers(struct msm_vidc_inst *inst) +{ + int rc = 0; + bool force_release = true; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + if (get_v4l2_codec(inst) == V4L2_PIX_FMT_VP9) + force_release = false; + + if (msm_comm_release_dpb_only_buffers(inst, force_release)) + s_vpr_e(inst->sid, "Failed to release output buffers\n"); + + rc = set_dpb_only_buffers(inst, HAL_BUFFER_OUTPUT); + if (rc) + goto error; + return rc; +error: + msm_comm_release_dpb_only_buffers(inst, true); + return rc; +} + +int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst) +{ + int rc = 0; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + if (msm_comm_release_scratch_buffers(inst, true)) + s_vpr_e(inst->sid, "Failed to release scratch buffers\n"); + + rc = set_internal_buffers(inst, HAL_BUFFER_INTERNAL_SCRATCH, + &inst->scratchbufs); + if (rc) + goto error; + + rc = set_internal_buffers(inst, HAL_BUFFER_INTERNAL_SCRATCH_1, + &inst->scratchbufs); + if (rc) + goto error; + + rc = set_internal_buffers(inst, HAL_BUFFER_INTERNAL_SCRATCH_2, + &inst->scratchbufs); + if (rc) + goto error; + + return rc; +error: + msm_comm_release_scratch_buffers(inst, false); + return rc; +} + +int msm_comm_set_recon_buffers(struct msm_vidc_inst *inst) +{ + int rc = 0; + unsigned int i = 0, bufcount = 0; + struct recon_buf *binfo; + struct msm_vidc_list *buf_list = &inst->refbufs; + + if (!inst) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + bufcount = inst->fmts[OUTPUT_PORT].count_actual; + + msm_comm_release_recon_buffers(inst); + + for (i = 0; i < bufcount; i++) { + binfo = kzalloc(sizeof(*binfo), GFP_KERNEL); + if (!binfo) { + s_vpr_e(inst->sid, "%s: Out of memory\n", __func__); + rc = -ENOMEM; + goto fail_kzalloc; + } + + binfo->buffer_index = i; + mutex_lock(&buf_list->lock); + list_add_tail(&binfo->list, &buf_list->list); + mutex_unlock(&buf_list->lock); + } + +fail_kzalloc: + return rc; +} + +int msm_comm_set_persist_buffers(struct msm_vidc_inst *inst) +{ + int rc = 0; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + rc = set_internal_buffers(inst, HAL_BUFFER_INTERNAL_PERSIST, + &inst->persistbufs); + if (rc) + goto error; + + rc = set_internal_buffers(inst, HAL_BUFFER_INTERNAL_PERSIST_1, + &inst->persistbufs); + if (rc) + goto error; + return rc; +error: + msm_comm_release_persist_buffers(inst); + return rc; +} + +static void msm_comm_flush_in_invalid_state(struct msm_vidc_inst *inst) +{ + struct list_head *ptr, *next; + enum vidc_ports ports[] = {INPUT_PORT, OUTPUT_PORT}; + int c = 0; + + /* before flush ensure venus released all buffers */ + msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE); + + for (c = 0; c < ARRAY_SIZE(ports); ++c) { + enum vidc_ports port = ports[c]; + + mutex_lock(&inst->bufq[port].lock); + list_for_each_safe(ptr, next, + &inst->bufq[port].vb2_bufq.queued_list) { + struct vb2_buffer *vb = container_of(ptr, + struct vb2_buffer, queued_entry); + if (vb->state == VB2_BUF_STATE_ACTIVE) { + vb->planes[0].bytesused = 0; + print_vb2_buffer("flush in invalid", inst, vb); + vb2_buffer_done(vb, VB2_BUF_STATE_DONE); + } else { + s_vpr_e(inst->sid, + "%s: VB is in state %d not in ACTIVE state\n", + __func__, vb->state); + } + } + mutex_unlock(&inst->bufq[port].lock); + } + msm_vidc_queue_v4l2_event(inst, V4L2_EVENT_MSM_VIDC_FLUSH_DONE); +} + +int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags) +{ + unsigned int i = 0; + int rc = 0; + bool ip_flush = false; + bool op_flush = false; + struct msm_vidc_buffer *mbuf, *next; + struct msm_vidc_core *core; + struct hfi_device *hdev; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("invalid params %pK\n", inst); + return -EINVAL; + } + + if (inst->state < MSM_VIDC_OPEN_DONE) { + s_vpr_e(inst->sid, + "Invalid state to call flush, inst %pK, state %#x\n", + inst, inst->state); + return -EINVAL; + } + + core = inst->core; + hdev = core->device; + + ip_flush = !!(flags & V4L2_CMD_FLUSH_OUTPUT); + op_flush = !!(flags & V4L2_CMD_FLUSH_CAPTURE); + if (ip_flush && !op_flush) { + s_vpr_e(inst->sid, + "Input only flush not supported, making it flush all\n"); + op_flush = true; + goto exit; + } + + if ((inst->in_flush && ip_flush) || (inst->out_flush && op_flush)) { + s_vpr_e(inst->sid, "%s: Already in flush\n", __func__); + goto exit; + } + + msm_clock_data_reset(inst); + + cancel_batch_work(inst); + if (inst->state == MSM_VIDC_CORE_INVALID) { + s_vpr_e(inst->sid, "Core %pK and inst %pK are in bad state\n", + core, inst); + msm_comm_flush_in_invalid_state(inst); + goto exit; + } + + if (ip_flush) + mutex_lock(&inst->bufq[INPUT_PORT].lock); + if (op_flush) + mutex_lock(&inst->bufq[OUTPUT_PORT].lock); + /* enable in flush */ + inst->in_flush = ip_flush; + inst->out_flush = op_flush; + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry_safe(mbuf, next, &inst->registeredbufs.list, list) { + /* don't flush input buffers if input flush is not requested */ + if (!ip_flush && mbuf->vvb.vb2_buf.type == INPUT_MPLANE) + continue; + + /* flush only deferred or rbr pending buffers */ + if (!(mbuf->flags & MSM_VIDC_FLAG_DEFERRED || + mbuf->flags & MSM_VIDC_FLAG_RBR_PENDING)) + continue; + + /* + * flush buffers which are queued by client already, + * the refcount will be two or more for those buffers. + */ + if (!(mbuf->smem[0].refcount >= 2)) + continue; + + print_vidc_buffer(VIDC_HIGH, "flush buf", inst, mbuf); + msm_comm_flush_vidc_buffer(inst, mbuf); + + for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) { + if (inst->smem_ops->smem_unmap_dma_buf(inst, + &mbuf->smem[i])) + print_vidc_buffer(VIDC_ERR, + "dqbuf: unmap failed.", inst, mbuf); + if (inst->smem_ops->smem_unmap_dma_buf(inst, + &mbuf->smem[i])) + print_vidc_buffer(VIDC_ERR, + "dqbuf: unmap failed..", inst, mbuf); + } + if (!mbuf->smem[0].refcount) { + list_del(&mbuf->list); + kref_put_mbuf(mbuf); + } else { + /* buffer is no more a deferred buffer */ + mbuf->flags &= ~MSM_VIDC_FLAG_DEFERRED; + } + } + mutex_unlock(&inst->registeredbufs.lock); + + hdev = inst->core->device; + if (ip_flush) { + s_vpr_h(inst->sid, "Send flush on all ports to firmware\n"); + rc = call_hfi_op(hdev, session_flush, inst->session, + HAL_FLUSH_ALL); + } else { + s_vpr_h(inst->sid, "Send flush on output port to firmware\n"); + rc = call_hfi_op(hdev, session_flush, inst->session, + HAL_FLUSH_OUTPUT); + } + if (op_flush) + mutex_unlock(&inst->bufq[OUTPUT_PORT].lock); + if (ip_flush) + mutex_unlock(&inst->bufq[INPUT_PORT].lock); + if (rc) { + s_vpr_e(inst->sid, + "Sending flush to firmware failed, flush out all buffers\n"); + msm_comm_flush_in_invalid_state(inst); + /* disable in_flush & out_flush */ + inst->in_flush = false; + inst->out_flush = false; + goto exit; + } + /* + * Set inst->flush_timestamps to true when flush is issued + * Use this variable to clear timestamps list, everytime + * flush is issued, before adding the next buffer's timestamp + * to the list. + */ + if (!is_image_session(inst) && inst->in_flush) { + inst->flush_timestamps = true; + s_vpr_h(inst->sid, + "Setting flush variable to clear timestamp list: %d\n", + inst->flush_timestamps); + } +exit: + return rc; +} + +int msm_vidc_noc_error_info(struct msm_vidc_core *core) +{ + struct hfi_device *hdev; + + if (!core || !core->device) { + d_vpr_e("%s: Invalid parameters: %pK\n", + __func__, core); + return -EINVAL; + } + + if (!core->resources.non_fatal_pagefaults) + return 0; + + if (!core->smmu_fault_handled) + return 0; + + hdev = core->device; + call_hfi_op(hdev, noc_error_info, hdev->hfi_device_data); + + return 0; +} + +int msm_vidc_trigger_ssr(struct msm_vidc_core *core, + u64 trigger_ssr_val) +{ + struct msm_vidc_ssr *ssr; + if (!core) { + d_vpr_e("%s: Invalid parameters\n", __func__); + return -EINVAL; + } + ssr = &core->ssr; + /* + * ssr_type: 0-3 bits + * sub_client_id: 4-7 bits + * reserved: 8-31 bits + * test_addr: 32-63 bits */ + ssr->ssr_type = (trigger_ssr_val & + (unsigned long)SSR_TYPE) >> SSR_TYPE_SHIFT; + ssr->sub_client_id = (trigger_ssr_val & + (unsigned long)SSR_SUB_CLIENT_ID) >> SSR_SUB_CLIENT_ID_SHIFT; + ssr->test_addr = (trigger_ssr_val & + (unsigned long)SSR_ADDR_ID) >> SSR_ADDR_SHIFT; + schedule_work(&core->ssr_work); + return 0; +} + +void msm_vidc_ssr_handler(struct work_struct *work) +{ + int rc; + struct msm_vidc_core *core; + struct hfi_device *hdev; + struct msm_vidc_ssr *ssr; + + core = container_of(work, struct msm_vidc_core, ssr_work); + if (!core || !core->device) { + d_vpr_e("%s: invalid params %pK\n", __func__, core); + return; + } + hdev = core->device; + ssr = &core->ssr; + + mutex_lock(&core->lock); + if (core->state == VIDC_CORE_INIT_DONE) { + d_vpr_e("%s: ssr type %d\n", __func__, ssr->ssr_type); + /* + * In current implementation user-initiated SSR triggers + * a fatal error from hardware. However, there is no way + * to know if fatal error is due to SSR or not. Handle + * user SSR as non-fatal. + */ + core->trigger_ssr = true; + rc = call_hfi_op(hdev, core_trigger_ssr, + hdev->hfi_device_data, ssr->ssr_type, + ssr->sub_client_id, ssr->test_addr); + if (rc) { + d_vpr_e("%s: trigger_ssr failed\n", __func__); + core->trigger_ssr = false; + } + } else { + d_vpr_e("%s: video core not initialized\n", __func__); + } + mutex_unlock(&core->lock); +} + +static int msm_vidc_check_mbpf_supported(struct msm_vidc_inst *inst) +{ + u32 mbpf = 0; + struct msm_vidc_core *core; + struct msm_vidc_inst *temp; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + core = inst->core; + + if (!core->resources.max_mbpf) { + s_vpr_h(inst->sid, "%s: max mbpf not available\n", + __func__); + return 0; + } + + mutex_lock(&core->lock); + list_for_each_entry(temp, &core->instances, list) { + /* ignore invalid and completed session */ + if (temp->state == MSM_VIDC_CORE_INVALID || + temp->state >= MSM_VIDC_STOP_DONE) + continue; + /* ignore thumbnail session */ + if (is_thumbnail_session(temp)) + continue; + /* ignore HEIF sessions */ + if (is_image_session(temp) || is_grid_session(temp)) + continue; + mbpf += NUM_MBS_PER_FRAME( + temp->fmts[INPUT_PORT].v4l2_fmt.fmt.pix_mp.height, + temp->fmts[INPUT_PORT].v4l2_fmt.fmt.pix_mp.width); + } + mutex_unlock(&core->lock); + + if (mbpf > core->resources.max_mbpf) { + msm_vidc_print_running_insts(inst->core); + return -ENOMEM; + } + + return 0; +} + +static u32 msm_comm_get_memory_limit(struct msm_vidc_core *core) +{ + struct memory_limit_table *memory_limits_tbl; + u32 memory_limits_tbl_size = 0; + u32 memory_limit = 0, memory_size = 0; + u32 memory_limit_mbytes = 0; + int i = 0; + + memory_limits_tbl = core->resources.mem_limit_tbl; + memory_limits_tbl_size = core->resources.memory_limit_table_size; + memory_limit_mbytes = ((u64)totalram_pages * PAGE_SIZE) >> 20; + for (i = memory_limits_tbl_size - 1; i >= 0; i--) { + memory_size = memory_limits_tbl[i].ddr_size; + memory_limit = memory_limits_tbl[i].mem_limit; + if (memory_size >= memory_limit_mbytes) + break; + } + + return memory_limit; +} + +int msm_comm_check_memory_supported(struct msm_vidc_inst *vidc_inst) +{ + struct msm_vidc_core *core; + struct msm_vidc_inst *inst; + struct msm_vidc_format *fmt; + struct v4l2_format *f; + struct hal_buffer_requirements *req; + struct context_bank_info *cb = NULL; + u32 i, dpb_cnt = 0, dpb_size = 0, input_size = 1, rc = 0; + u32 inst_mem_size, non_sec_cb_size = 0; + u64 total_mem_size = 0, non_sec_mem_size = 0; + u32 memory_limit_mbytes; + + core = vidc_inst->core; + + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) { + inst_mem_size = 0; + input_size = 1; + /* input port buffers memory size */ + fmt = &inst->fmts[INPUT_PORT]; + f = &fmt->v4l2_fmt; + if (is_decode_session(inst)) + input_size = msm_vidc_calculate_dec_input_frame_size(inst, 0); + else + input_size = f->fmt.pix_mp.plane_fmt[0].sizeimage; + inst_mem_size += input_size * fmt->count_min_host; + + for (i = 1; i < f->fmt.pix_mp.num_planes; i++) + inst_mem_size += f->fmt.pix_mp.plane_fmt[i].sizeimage * + fmt->count_min_host; + + /* output port buffers memory size */ + fmt = &inst->fmts[OUTPUT_PORT]; + f = &fmt->v4l2_fmt; + for (i = 0; i < f->fmt.pix_mp.num_planes; i++) + inst_mem_size += f->fmt.pix_mp.plane_fmt[i].sizeimage * + fmt->count_min_host; + + /* dpb buffers memory size */ + if (msm_comm_get_stream_output_mode(inst) == + HAL_VIDEO_DECODER_SECONDARY) { + struct hal_buffer_requirements dpb = {0}; + + rc = msm_comm_get_dpb_bufreqs(inst, &dpb); + if (rc) { + s_vpr_e(inst->sid, + "Couldn't retrieve dpb count & size\n"); + mutex_unlock(&core->lock); + return rc; + } + dpb_cnt = dpb.buffer_count_actual; + dpb_size = dpb.buffer_size; + inst_mem_size += dpb_cnt * dpb_size; + } + + /* internal buffers memory size */ + for (i = 0; i < HAL_BUFFER_MAX; i++) { + req = &inst->buff_req.buffer[i]; + if (is_internal_buffer(req->buffer_type)) + inst_mem_size += req->buffer_size * + req->buffer_count_actual; + } + + if (!is_secure_session(inst)) + non_sec_mem_size += inst_mem_size; + total_mem_size += inst_mem_size; + } + mutex_unlock(&core->lock); + + memory_limit_mbytes = msm_comm_get_memory_limit(core); + + if ((total_mem_size >> 20) > memory_limit_mbytes) { + s_vpr_e(vidc_inst->sid, + "%s: video mem overshoot - reached %llu MB, max_limit %llu MB\n", + __func__, total_mem_size >> 20, memory_limit_mbytes); + msm_comm_print_mem_usage(core); + return -EBUSY; + } + + if (!is_secure_session(vidc_inst)) { + mutex_lock(&core->resources.cb_lock); + list_for_each_entry(cb, &core->resources.context_banks, list) + if (!cb->is_secure) + non_sec_cb_size = cb->addr_range.size; + mutex_unlock(&core->resources.cb_lock); + + if (non_sec_mem_size > non_sec_cb_size) { + s_vpr_e(vidc_inst->sid, + "%s: insufficient device addr space, required %llu, available %llu\n", + __func__, non_sec_mem_size, non_sec_cb_size); + msm_comm_print_mem_usage(core); + return -EINVAL; + } + } + + return 0; +} + +static int msm_vidc_check_mbps_supported(struct msm_vidc_inst *inst) +{ + int max_video_load = 0, max_image_load = 0; + int video_load = 0, image_load = 0; + enum load_calc_quirks quirks = LOAD_ADMISSION_CONTROL; + + if (inst->state == MSM_VIDC_OPEN_DONE) { + image_load = msm_comm_get_device_load(inst->core, + MSM_VIDC_ENCODER, MSM_VIDC_IMAGE, + quirks); + video_load = msm_comm_get_device_load(inst->core, + MSM_VIDC_DECODER, MSM_VIDC_VIDEO, + quirks); + video_load += msm_comm_get_device_load(inst->core, + MSM_VIDC_ENCODER, MSM_VIDC_VIDEO, + quirks); + + max_video_load = inst->core->resources.max_load; + max_image_load = inst->core->resources.max_image_load; + + if (video_load > max_video_load) { + s_vpr_e(inst->sid, + "H/W is overloaded. needed: %d max: %d\n", + video_load, max_video_load); + msm_vidc_print_running_insts(inst->core); + return -ENOMEM; + } + + if (video_load + image_load > max_video_load + max_image_load) { + s_vpr_e(inst->sid, + "H/W is overloaded. needed: [video + image][%d + %d], max: [video + image][%d + %d]\n", + video_load, image_load, + max_video_load, max_image_load); + msm_vidc_print_running_insts(inst->core); + return -ENOMEM; + } + } + return 0; +} + +int msm_vidc_check_scaling_supported(struct msm_vidc_inst *inst) +{ + u32 x_min, x_max, y_min, y_max; + u32 input_height, input_width, output_height, output_width; + struct v4l2_format *f; + + if (is_grid_session(inst) || is_decode_session(inst)) { + s_vpr_h(inst->sid, "Skip scaling check\n"); + return 0; + } + + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + input_height = f->fmt.pix_mp.height; + input_width = f->fmt.pix_mp.width; + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + output_height = f->fmt.pix_mp.height; + output_width = f->fmt.pix_mp.width; + + if (!input_height || !input_width || !output_height || !output_width) { + s_vpr_e(inst->sid, "Invalid : Input height = %d width = %d", + input_height, input_width); + s_vpr_e(inst->sid, " output height = %d width = %d\n", + output_height, output_width); + return -ENOTSUPP; + } + + if (!inst->capability.cap[CAP_SCALE_X].min || + !inst->capability.cap[CAP_SCALE_X].max || + !inst->capability.cap[CAP_SCALE_Y].min || + !inst->capability.cap[CAP_SCALE_Y].max) { + + if (input_width * input_height != + output_width * output_height) { + s_vpr_e(inst->sid, + "%s: scaling is not supported (%dx%d != %dx%d)\n", + __func__, input_width, input_height, + output_width, output_height); + return -ENOTSUPP; + } + + s_vpr_h(inst->sid, "%s: supported WxH = %dx%d\n", + __func__, input_width, input_height); + return 0; + } + + x_min = (1<<16)/inst->capability.cap[CAP_SCALE_X].min; + y_min = (1<<16)/inst->capability.cap[CAP_SCALE_Y].min; + x_max = inst->capability.cap[CAP_SCALE_X].max >> 16; + y_max = inst->capability.cap[CAP_SCALE_Y].max >> 16; + + if (input_height > output_height) { + if (input_height > x_min * output_height) { + s_vpr_e(inst->sid, + "Unsupported height min height %d vs %d\n", + input_height / x_min, output_height); + return -ENOTSUPP; + } + } else { + if (output_height > x_max * input_height) { + s_vpr_e(inst->sid, + "Unsupported height max height %d vs %d\n", + x_max * input_height, output_height); + return -ENOTSUPP; + } + } + if (input_width > output_width) { + if (input_width > y_min * output_width) { + s_vpr_e(inst->sid, + "Unsupported width min width %d vs %d\n", + input_width / y_min, output_width); + return -ENOTSUPP; + } + } else { + if (output_width > y_max * input_width) { + s_vpr_e(inst->sid, + "Unsupported width max width %d vs %d\n", + y_max * input_width, output_width); + return -ENOTSUPP; + } + } + return 0; +} + +int msm_vidc_check_session_supported(struct msm_vidc_inst *inst) +{ + struct msm_vidc_capability *capability; + int rc = 0; + struct hfi_device *hdev; + struct msm_vidc_core *core; + u32 output_height, output_width, input_height, input_width; + u32 width_min, width_max, height_min, height_max; + u32 mbpf_max; + struct v4l2_format *f; + u32 sid; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: Invalid parameter\n", __func__); + return -EINVAL; + } + capability = &inst->capability; + hdev = inst->core->device; + core = inst->core; + sid = inst->sid; + rc = msm_vidc_check_mbps_supported(inst); + if (rc) { + s_vpr_e(sid, "%s: Hardware is overloaded\n", __func__); + return rc; + } + + rc = msm_vidc_check_mbpf_supported(inst); + if (rc) + return rc; + + if (!is_thermal_permissible(core)) { + s_vpr_e(sid, + "Thermal level critical, stop all active sessions!\n"); + return -ENOTSUPP; + } + + if (is_secure_session(inst)) { + width_min = capability->cap[CAP_SECURE_FRAME_WIDTH].min; + width_max = capability->cap[CAP_SECURE_FRAME_WIDTH].max; + height_min = capability->cap[CAP_SECURE_FRAME_HEIGHT].min; + height_max = capability->cap[CAP_SECURE_FRAME_HEIGHT].max; + mbpf_max = capability->cap[CAP_SECURE_MBS_PER_FRAME].max; + } else { + width_min = capability->cap[CAP_FRAME_WIDTH].min; + width_max = capability->cap[CAP_FRAME_WIDTH].max; + height_min = capability->cap[CAP_FRAME_HEIGHT].min; + height_max = capability->cap[CAP_FRAME_HEIGHT].max; + mbpf_max = capability->cap[CAP_MBS_PER_FRAME].max; + } + + if (inst->session_type == MSM_VIDC_ENCODER && + inst->rc_type == RATE_CONTROL_LOSSLESS) { + width_min = capability->cap[CAP_LOSSLESS_FRAME_WIDTH].min; + width_max = capability->cap[CAP_LOSSLESS_FRAME_WIDTH].max; + height_min = capability->cap[CAP_LOSSLESS_FRAME_HEIGHT].min; + height_max = capability->cap[CAP_LOSSLESS_FRAME_HEIGHT].max; + mbpf_max = capability->cap[CAP_LOSSLESS_MBS_PER_FRAME].max; + } + + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + output_height = f->fmt.pix_mp.height; + output_width = f->fmt.pix_mp.width; + f = &inst->fmts[INPUT_PORT].v4l2_fmt; + input_height = f->fmt.pix_mp.height; + input_width = f->fmt.pix_mp.width; + + if (is_image_session(inst)) { + if (is_secure_session(inst)) { + s_vpr_e(sid, "Secure image encode isn't supported!\n"); + return -ENOTSUPP; + } + + if (is_grid_session(inst)) { + if (inst->fmts[INPUT_PORT].v4l2_fmt.fmt.pix_mp.pixelformat != + V4L2_PIX_FMT_NV12 && + inst->fmts[INPUT_PORT].v4l2_fmt.fmt.pix_mp.pixelformat != + V4L2_PIX_FMT_NV12_512) + return -ENOTSUPP; + + width_min = + capability->cap[CAP_HEIC_IMAGE_FRAME_WIDTH].min; + width_max = + capability->cap[CAP_HEIC_IMAGE_FRAME_WIDTH].max; + height_min = + capability->cap[CAP_HEIC_IMAGE_FRAME_HEIGHT].min; + height_max = + capability->cap[CAP_HEIC_IMAGE_FRAME_HEIGHT].max; + mbpf_max = capability->cap[CAP_MBS_PER_FRAME].max; + + input_height = ALIGN(input_height, 512); + input_width = ALIGN(input_width, 512); + output_height = input_height; + output_width = input_width; + } else { + width_min = + capability->cap[CAP_HEVC_IMAGE_FRAME_WIDTH].min; + width_max = + capability->cap[CAP_HEVC_IMAGE_FRAME_WIDTH].max; + height_min = + capability->cap[CAP_HEVC_IMAGE_FRAME_HEIGHT].min; + height_max = + capability->cap[CAP_HEVC_IMAGE_FRAME_HEIGHT].max; + mbpf_max = capability->cap[CAP_MBS_PER_FRAME].max; + } + } + + if (inst->session_type == MSM_VIDC_ENCODER && (input_width % 2 != 0 || + input_height % 2 != 0 || output_width % 2 != 0 || + output_height % 2 != 0)) { + s_vpr_e(sid, + "Height and Width should be even numbers for NV12\n"); + s_vpr_e(sid, "Input WxH = (%u)x(%u), Output WxH = (%u)x(%u)\n", + input_width, input_height, + output_width, output_height); + rc = -ENOTSUPP; + } + + output_height = ALIGN(output_height, 16); + output_width = ALIGN(output_width, 16); + + if (!rc) { + if (output_width < width_min || + output_height < height_min) { + s_vpr_e(sid, + "Unsupported WxH (%u)x(%u), min supported is (%u)x(%u)\n", + output_width, output_height, + width_min, height_min); + rc = -ENOTSUPP; + } + if (!rc && (output_width > width_max || + output_height > height_max)) { + s_vpr_e(sid, + "Unsupported WxH (%u)x(%u), max supported is (%u)x(%u)\n", + output_width, output_height, + width_max, height_max); + rc = -ENOTSUPP; + } + + if (!rc && output_height * output_width > + width_max * height_max) { + s_vpr_e(sid, + "Unsupported WxH = (%u)x(%u), max supported is (%u)x(%u)\n", + output_width, output_height, + width_max, height_max); + rc = -ENOTSUPP; + } + /* Image size max capability has equal width and height, + * hence, don't check mbpf for image sessions. + */ + if (!rc && !(is_image_session(inst) || + is_grid_session(inst)) && + NUM_MBS_PER_FRAME(input_width, input_height) > + mbpf_max) { + s_vpr_e(sid, "Unsupported mbpf %d, max %d\n", + NUM_MBS_PER_FRAME(input_width, input_height), + mbpf_max); + rc = -ENOTSUPP; + } + if (!rc && inst->pic_struct != + MSM_VIDC_PIC_STRUCT_PROGRESSIVE && + (output_width > INTERLACE_WIDTH_MAX || + output_height > INTERLACE_HEIGHT_MAX || + (NUM_MBS_PER_FRAME(output_height, output_width) > + INTERLACE_MB_PER_FRAME_MAX))) { + s_vpr_e(sid, + "Unsupported interlace WxH = (%u)x(%u), max supported is (%u)x(%u)\n", + output_width, output_height, + INTERLACE_WIDTH_MAX, + INTERLACE_HEIGHT_MAX); + rc = -ENOTSUPP; + } + } + if (rc) { + s_vpr_e(sid, "%s: Resolution unsupported\n", __func__); + } + return rc; +} + +void msm_comm_generate_session_error(struct msm_vidc_inst *inst) +{ + enum hal_command_response cmd = HAL_SESSION_ERROR; + struct msm_vidc_cb_cmd_done response = {0}; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid input parameters\n", __func__); + return; + } + s_vpr_e(inst->sid, "%s: inst %pK\n", __func__, inst); + response.inst_id = inst; + response.status = VIDC_ERR_FAIL; + handle_session_error(cmd, (void *)&response); +} + +void msm_comm_generate_sys_error(struct msm_vidc_inst *inst) +{ + struct msm_vidc_core *core; + enum hal_command_response cmd = HAL_SYS_ERROR; + struct msm_vidc_cb_cmd_done response = {0}; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid input parameters\n", __func__); + return; + } + s_vpr_e(inst->sid, "%s: inst %pK\n", __func__, inst); + core = inst->core; + response.device_id = (u32) core->id; + handle_sys_error(cmd, (void *) &response); + +} + +int msm_comm_kill_session(struct msm_vidc_inst *inst) +{ + int rc = 0; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid input parameters\n", __func__); + return -EINVAL; + } else if (!inst->session) { + s_vpr_e(inst->sid, "%s: no session to kill for inst %pK\n", + __func__, inst); + return 0; + } + + s_vpr_e(inst->sid, "%s: inst %pK, state %d\n", __func__, + inst, inst->state); + /* + * We're internally forcibly killing the session, if fw is aware of + * the session send session_abort to firmware to clean up and release + * the session, else just kill the session inside the driver. + */ + if ((inst->state >= MSM_VIDC_OPEN_DONE && + inst->state < MSM_VIDC_CLOSE_DONE) || + inst->state == MSM_VIDC_CORE_INVALID) { + rc = msm_comm_session_abort(inst); + if (rc) { + s_vpr_e(inst->sid, + "%s: inst %pK session abort failed\n", + __func__, inst); + change_inst_state(inst, MSM_VIDC_CORE_INVALID); + } + } + + change_inst_state(inst, MSM_VIDC_CLOSE_DONE); + msm_comm_session_clean(inst); + + s_vpr_e(inst->sid, "%s: inst %pK handled\n", __func__, + inst); + return rc; +} + +int msm_comm_smem_alloc(struct msm_vidc_inst *inst, + size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, + int map_kernel, struct msm_smem *smem) +{ + int rc = 0; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid inst: %pK\n", __func__, inst); + return -EINVAL; + } + rc = msm_smem_alloc(size, align, flags, buffer_type, map_kernel, + &(inst->core->resources), inst->session_type, + smem, inst->sid); + return rc; +} + +void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem) +{ + if (!inst || !inst->core || !mem) { + d_vpr_e("%s: invalid params: %pK %pK\n", + __func__, inst, mem); + return; + } + msm_smem_free(mem, inst->sid); +} + +void msm_vidc_fw_unload_handler(struct work_struct *work) +{ + struct msm_vidc_core *core = NULL; + struct hfi_device *hdev = NULL; + int rc = 0; + + core = container_of(work, struct msm_vidc_core, fw_unload_work.work); + if (!core || !core->device) { + d_vpr_e("%s: invalid work or core handle\n", __func__); + return; + } + + hdev = core->device; + + mutex_lock(&core->lock); + if (list_empty(&core->instances) && + core->state != VIDC_CORE_UNINIT) { + if (core->state > VIDC_CORE_INIT) { + d_vpr_h("Calling vidc_hal_core_release\n"); + rc = call_hfi_op(hdev, core_release, + hdev->hfi_device_data); + if (rc) { + d_vpr_e("Failed to release core, id = %d\n", + core->id); + mutex_unlock(&core->lock); + return; + } + } + core->state = VIDC_CORE_UNINIT; + kfree(core->capabilities); + core->capabilities = NULL; + } + mutex_unlock(&core->lock); +} + +int msm_comm_set_color_format(struct msm_vidc_inst *inst, + enum hal_buffer buffer_type, int fourcc) +{ + struct hfi_uncompressed_format_select hfi_fmt = {0}; + u32 format = HFI_COLOR_FORMAT_NV12_UBWC; + int rc = 0; + struct hfi_device *hdev; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + hdev = inst->core->device; + + format = msm_comm_get_hfi_uncompressed(fourcc, inst->sid); + hfi_fmt.buffer_type = get_hfi_buffer(buffer_type, inst->sid); + hfi_fmt.format = format; + s_vpr_h(inst->sid, "buffer_type %#x, format %#x\n", + hfi_fmt.buffer_type, hfi_fmt.format); + rc = call_hfi_op(hdev, session_set_property, inst->session, + HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT, &hfi_fmt, + sizeof(hfi_fmt)); + if (rc) + s_vpr_e(inst->sid, "Failed to set input color format\n"); + else + s_vpr_h(inst->sid, "Setting uncompressed colorformat to %#x\n", + format); + + return rc; +} + +void msm_comm_print_inst_info(struct msm_vidc_inst *inst) +{ + struct msm_vidc_buffer *mbuf; + struct dma_buf *dbuf; + struct internal_buf *buf; + bool is_decode = false; + enum vidc_ports port; + bool is_secure = false; + struct v4l2_format *f; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return; + } + + is_decode = inst->session_type == MSM_VIDC_DECODER; + port = is_decode ? INPUT_PORT : OUTPUT_PORT; + is_secure = inst->flags & VIDC_SECURE; + f = &inst->fmts[port].v4l2_fmt; + s_vpr_e(inst->sid, + "%s session, %s, Codec type: %s HxW: %d x %d fps: %d bitrate: %d bit-depth: %s\n", + is_decode ? "Decode" : "Encode", + is_secure ? "Secure" : "Non-Secure", + inst->fmts[port].name, + f->fmt.pix_mp.height, f->fmt.pix_mp.width, + inst->clk_data.frame_rate >> 16, inst->prop.bitrate, + !inst->bit_depth ? "8" : "10"); + s_vpr_e(inst->sid, "---Buffer details for inst: %pK of type: %d---\n", + inst, inst->session_type); + mutex_lock(&inst->registeredbufs.lock); + s_vpr_e(inst->sid, "registered buffer list:\n"); + list_for_each_entry(mbuf, &inst->registeredbufs.list, list) + print_vidc_buffer(VIDC_ERR, "buf", inst, mbuf); + mutex_unlock(&inst->registeredbufs.lock); + + mutex_lock(&inst->scratchbufs.lock); + s_vpr_e(inst->sid, "scratch buffer list:\n"); + list_for_each_entry(buf, &inst->scratchbufs.list, list) { + dbuf = (struct dma_buf *)buf->smem.dma_buf; + s_vpr_e(inst->sid, "type: %d addr: %x size: %u inode: %lu ref: %ld\n", + buf->buffer_type, buf->smem.device_addr, buf->smem.size, + (dbuf ? file_inode(dbuf->file)->i_ino : -1), + (dbuf ? file_count(dbuf->file) : -1)); + } + mutex_unlock(&inst->scratchbufs.lock); + + mutex_lock(&inst->persistbufs.lock); + s_vpr_e(inst->sid, "persist buffer list:\n"); + list_for_each_entry(buf, &inst->persistbufs.list, list) { + dbuf = (struct dma_buf *)buf->smem.dma_buf; + s_vpr_e(inst->sid, "type: %d addr: %x size: %u inode: %lu ref: %ld\n", + buf->buffer_type, buf->smem.device_addr, buf->smem.size, + (dbuf ? file_inode(dbuf->file)->i_ino : -1), + (dbuf ? file_count(dbuf->file) : -1)); + } + mutex_unlock(&inst->persistbufs.lock); + + mutex_lock(&inst->outputbufs.lock); + s_vpr_e(inst->sid, "dpb buffer list:\n"); + list_for_each_entry(buf, &inst->outputbufs.list, list) { + dbuf = (struct dma_buf *)buf->smem.dma_buf; + s_vpr_e(inst->sid, "type: %d addr: %x size: %u inode: %lu ref: %ld\n", + buf->buffer_type, buf->smem.device_addr, buf->smem.size, + (dbuf ? file_inode(dbuf->file)->i_ino : -1), + (dbuf ? file_count(dbuf->file) : -1)); + } + mutex_unlock(&inst->outputbufs.lock); +} + +void msm_comm_print_insts_info(struct msm_vidc_core *core) +{ + struct msm_vidc_inst *inst = NULL; + + if (!core) { + d_vpr_e("%s: invalid params\n", __func__); + return; + } + + msm_comm_print_mem_usage(core); + + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) + msm_comm_print_inst_info(inst); + mutex_unlock(&core->lock); +} + +int msm_comm_session_continue(void *instance) +{ + struct msm_vidc_inst *inst = instance; + int rc = 0; + struct hfi_device *hdev; + + if (!inst || !inst->core || !inst->core->device) + return -EINVAL; + hdev = inst->core->device; + mutex_lock(&inst->lock); + if (inst->state >= MSM_VIDC_RELEASE_RESOURCES_DONE || + inst->state < MSM_VIDC_START_DONE) { + s_vpr_h(inst->sid, "Inst %pK : Not in valid state to call %s\n", + inst, __func__); + goto sess_continue_fail; + } + if (inst->session_type == MSM_VIDC_DECODER && inst->in_reconfig) { + s_vpr_h(inst->sid, "send session_continue\n"); + rc = call_hfi_op(hdev, session_continue, + (void *)inst->session); + if (rc) { + s_vpr_e(inst->sid, + "failed to send session_continue\n"); + rc = -EINVAL; + goto sess_continue_fail; + } + inst->in_reconfig = false; + + if (msm_comm_get_stream_output_mode(inst) == + HAL_VIDEO_DECODER_SECONDARY) { + rc = msm_comm_queue_dpb_only_buffers(inst); + if (rc) { + s_vpr_e(inst->sid, + "Failed to queue output buffers\n"); + goto sess_continue_fail; + } + } + } else if (inst->session_type == MSM_VIDC_ENCODER) { + s_vpr_h(inst->sid, + "session_continue not supported for encoder"); + } else { + s_vpr_e(inst->sid, + "session_continue called in wrong state for decoder"); + } + +sess_continue_fail: + mutex_unlock(&inst->lock); + return rc; +} + +void print_vidc_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + struct vb2_buffer *vb2 = NULL; + struct dma_buf *dbuf[2]; + + if (!(tag & msm_vidc_debug) || !inst || !mbuf) + return; + + vb2 = &mbuf->vvb.vb2_buf; + dbuf[0] = (struct dma_buf *)mbuf->smem[0].dma_buf; + dbuf[1] = (struct dma_buf *)mbuf->smem[1].dma_buf; + + if (vb2->num_planes == 1) + dprintk(tag, inst->sid, + "%s: %s: idx %2d fd %d off %d daddr %x inode %lu ref %ld size %d filled %d flags 0x%x ts %lld refcnt %d mflags 0x%x\n", + str, vb2->type == INPUT_MPLANE ? + "OUTPUT" : "CAPTURE", + vb2->index, vb2->planes[0].m.fd, + vb2->planes[0].data_offset, mbuf->smem[0].device_addr, + (dbuf[0] ? file_inode(dbuf[0]->file)->i_ino : -1), + (dbuf[0] ? file_count(dbuf[0]->file) : -1), + vb2->planes[0].length, vb2->planes[0].bytesused, + mbuf->vvb.flags, mbuf->vvb.vb2_buf.timestamp, + mbuf->smem[0].refcount, mbuf->flags); + else + dprintk(tag, inst->sid, + "%s: %s: idx %2d fd %d off %d daddr %x inode %lu ref %ld size %d filled %d flags 0x%x ts %lld refcnt %d mflags 0x%x, extradata: fd %d off %d daddr %x inode %lu ref %ld size %d filled %d refcnt %d\n", + str, vb2->type == INPUT_MPLANE ? + "OUTPUT" : "CAPTURE", + vb2->index, vb2->planes[0].m.fd, + vb2->planes[0].data_offset, mbuf->smem[0].device_addr, + (dbuf[0] ? file_inode(dbuf[0]->file)->i_ino : -1), + (dbuf[0] ? file_count(dbuf[0]->file) : -1), + vb2->planes[0].length, vb2->planes[0].bytesused, + mbuf->vvb.flags, mbuf->vvb.vb2_buf.timestamp, + mbuf->smem[0].refcount, mbuf->flags, + vb2->planes[1].m.fd, vb2->planes[1].data_offset, + mbuf->smem[1].device_addr, + (dbuf[1] ? file_inode(dbuf[1]->file)->i_ino : -1), + (dbuf[1] ? file_count(dbuf[1]->file) : -1), vb2->planes[1].length, + vb2->planes[1].bytesused, mbuf->smem[1].refcount); +} + +void print_vb2_buffer(const char *str, struct msm_vidc_inst *inst, + struct vb2_buffer *vb2) +{ + if (!inst || !vb2) + return; + + if (vb2->num_planes == 1) + s_vpr_e(inst->sid, + "%s: %s: idx %2d fd %d off %d size %d filled %d\n", + str, vb2->type == INPUT_MPLANE ? "OUTPUT" : "CAPTURE", + vb2->index, vb2->planes[0].m.fd, + vb2->planes[0].data_offset, vb2->planes[0].length, + vb2->planes[0].bytesused); + else + s_vpr_e(inst->sid, + "%s: %s: idx %2d fd %d off %d size %d filled %d, extradata: fd %d off %d size %d filled %d\n", + str, vb2->type == INPUT_MPLANE ? "OUTPUT" : "CAPTURE", + vb2->index, vb2->planes[0].m.fd, + vb2->planes[0].data_offset, vb2->planes[0].length, + vb2->planes[0].bytesused, vb2->planes[1].m.fd, + vb2->planes[1].data_offset, vb2->planes[1].length, + vb2->planes[1].bytesused); +} + +bool msm_comm_compare_vb2_plane(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2, u32 i) +{ + struct vb2_buffer *vb; + + if (!inst || !mbuf || !vb2) { + d_vpr_e("%s: invalid params, %pK %pK %pK\n", + __func__, inst, mbuf, vb2); + return false; + } + + vb = &mbuf->vvb.vb2_buf; + if (vb->planes[i].m.fd == vb2->planes[i].m.fd && + vb->planes[i].length == vb2->planes[i].length) { + return true; + } + + return false; +} + +bool msm_comm_compare_vb2_planes(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2) +{ + unsigned int i = 0; + struct vb2_buffer *vb; + + if (!inst || !mbuf || !vb2) { + d_vpr_e("%s: invalid params, %pK %pK %pK\n", + __func__, inst, mbuf, vb2); + return false; + } + + vb = &mbuf->vvb.vb2_buf; + + if (vb->num_planes != vb2->num_planes) + return false; + + for (i = 0; i < vb->num_planes; i++) { + if (!msm_comm_compare_vb2_plane(inst, mbuf, vb2, i)) + return false; + } + + return true; +} + +bool msm_comm_compare_dma_plane(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, unsigned long *dma_planes, u32 i) +{ + if (!inst || !mbuf || !dma_planes) { + d_vpr_e("%s: invalid params, %pK %pK %pK\n", + __func__, inst, mbuf, dma_planes); + return false; + } + + if ((unsigned long)mbuf->smem[i].dma_buf == dma_planes[i]) + return true; + + return false; +} + +bool msm_comm_compare_dma_planes(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, unsigned long *dma_planes) +{ + unsigned int i = 0; + struct vb2_buffer *vb; + + if (!inst || !mbuf || !dma_planes) { + d_vpr_e("%s: invalid params, %pK %pK %pK\n", + __func__, inst, mbuf, dma_planes); + return false; + } + + vb = &mbuf->vvb.vb2_buf; + for (i = 0; i < vb->num_planes; i++) { + if (!msm_comm_compare_dma_plane(inst, mbuf, dma_planes, i)) + return false; + } + + return true; +} + + +bool msm_comm_compare_device_plane(u32 sid, struct msm_vidc_buffer *mbuf, + u32 type, u32 *planes, u32 i) +{ + if (!mbuf || !planes) { + s_vpr_e(sid, "%s: invalid params, %pK %pK\n", + __func__, mbuf, planes); + return false; + } + + if (mbuf->vvb.vb2_buf.type == type && + mbuf->smem[i].device_addr == planes[i]) + return true; + + return false; +} + +bool msm_comm_compare_device_planes(u32 sid, struct msm_vidc_buffer *mbuf, + u32 type, u32 *planes) +{ + unsigned int i = 0; + + if (!mbuf || !planes) + return false; + + for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) { + if (!msm_comm_compare_device_plane(sid, mbuf, type, planes, i)) + return false; + } + + return true; +} + +struct msm_vidc_buffer *msm_comm_get_buffer_using_device_planes( + struct msm_vidc_inst *inst, u32 type, u32 *planes) +{ + struct msm_vidc_buffer *mbuf; + bool found = false; + + mutex_lock(&inst->registeredbufs.lock); + found = false; + list_for_each_entry(mbuf, &inst->registeredbufs.list, list) { + if (msm_comm_compare_device_planes(inst->sid, mbuf, + type, planes)) { + found = true; + break; + } + } + mutex_unlock(&inst->registeredbufs.lock); + if (!found) { + s_vpr_e(inst->sid, + "%s: data_addr %x, extradata_addr %x not found\n", + __func__, planes[0], planes[1]); + mbuf = NULL; + } + + return mbuf; +} + +int msm_comm_flush_vidc_buffer(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + struct vb2_buffer *vb; + u32 port; + + if (!inst || !mbuf) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, inst, mbuf); + return -EINVAL; + } + + vb = msm_comm_get_vb_using_vidc_buffer(inst, mbuf); + if (!vb) { + print_vidc_buffer(VIDC_ERR, + "vb not found for buf", inst, mbuf); + return -EINVAL; + } + + if (mbuf->vvb.vb2_buf.type == OUTPUT_MPLANE) + port = OUTPUT_PORT; + else if (mbuf->vvb.vb2_buf.type == INPUT_MPLANE) + port = INPUT_PORT; + else + return -EINVAL; + + if (inst->bufq[port].vb2_bufq.streaming) { + vb->planes[0].bytesused = 0; + vb2_buffer_done(vb, VB2_BUF_STATE_DONE); + } else { + s_vpr_e(inst->sid, "%s: port %d is not streaming\n", + __func__, port); + } + + return 0; +} + +int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + int rc = 0; + unsigned int i; + struct vb2_buffer *vb; + bool skip; + + if (!inst || !mbuf) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, inst, mbuf); + return -EINVAL; + } + vb = &mbuf->vvb.vb2_buf; + + for (i = 0; i < vb->num_planes; i++) { + unsigned long offset, size; + enum smem_cache_ops cache_op; + + offset = vb->planes[i].data_offset; + size = vb->planes[i].length - offset; + cache_op = SMEM_CACHE_INVALIDATE; + skip = false; + + if (inst->session_type == MSM_VIDC_DECODER) { + if (vb->type == INPUT_MPLANE) { + if (!i) { /* bitstream */ + size = vb->planes[i].bytesused; + cache_op = SMEM_CACHE_CLEAN_INVALIDATE; + } + } else if (vb->type == OUTPUT_MPLANE) { + if (!i) { /* yuv */ + /* all values are correct */ + } + } + } else if (inst->session_type == MSM_VIDC_ENCODER) { + if (vb->type == INPUT_MPLANE) { + if (!i) { /* yuv */ + size = vb->planes[i].bytesused; + cache_op = SMEM_CACHE_CLEAN_INVALIDATE; + } else { /* extradata */ + cache_op = SMEM_CACHE_CLEAN_INVALIDATE; + } + } else if (vb->type == OUTPUT_MPLANE) { + if (!i && inst->max_filled_len) + size = inst->max_filled_len; + } + } + + if (!skip) { + rc = msm_smem_cache_operations(mbuf->smem[i].dma_buf, + cache_op, offset, size, inst->sid); + if (rc) + print_vidc_buffer(VIDC_ERR, + "qbuf cache ops failed", inst, mbuf); + } + } + + return rc; +} + +int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + int rc = 0; + unsigned int i; + struct vb2_buffer *vb; + bool skip; + + if (!inst || !mbuf) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, inst, mbuf); + return -EINVAL; + } + vb = &mbuf->vvb.vb2_buf; + + for (i = 0; i < vb->num_planes; i++) { + unsigned long offset, size; + enum smem_cache_ops cache_op; + + offset = vb->planes[i].data_offset; + size = vb->planes[i].length - offset; + cache_op = SMEM_CACHE_INVALIDATE; + skip = false; + + if (inst->session_type == MSM_VIDC_DECODER) { + if (vb->type == INPUT_MPLANE) { + if (!i) /* bitstream */ + skip = true; + } else if (vb->type == OUTPUT_MPLANE) { + if (!i) { /* yuv */ + /* All values are correct */ + } + } + } else if (inst->session_type == MSM_VIDC_ENCODER) { + if (vb->type == INPUT_MPLANE) { + /* yuv and extradata */ + skip = true; + } else if (vb->type == OUTPUT_MPLANE) { + if (!i) { /* bitstream */ + /* + * Include vp8e header bytes as well + * by making offset equal to zero + */ + offset = 0; + size = vb->planes[i].bytesused + + vb->planes[i].data_offset; + } + } + } + + if (!skip) { + rc = msm_smem_cache_operations(mbuf->smem[i].dma_buf, + cache_op, offset, size, inst->sid); + if (rc) + print_vidc_buffer(VIDC_ERR, + "dqbuf cache ops failed", inst, mbuf); + } + } + + return rc; +} + +struct msm_vidc_buffer *msm_comm_get_vidc_buffer(struct msm_vidc_inst *inst, + struct vb2_buffer *vb2) +{ + int rc = 0; + struct vb2_v4l2_buffer *vbuf; + struct vb2_buffer *vb; + unsigned long dma_planes[VB2_MAX_PLANES] = {0}; + struct msm_vidc_buffer *mbuf; + bool found = false; + unsigned int i; + + if (!inst || !vb2) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, inst, vb2); + return NULL; + } + + for (i = 0; i < vb2->num_planes; i++) { + /* + * always compare dma_buf addresses which is guaranteed + * to be same across the processes (duplicate fds). + */ + dma_planes[i] = (unsigned long)msm_smem_get_dma_buf( + vb2->planes[i].m.fd, inst->sid); + if (!dma_planes[i]) + return NULL; + msm_smem_put_dma_buf((struct dma_buf *)dma_planes[i], + inst->sid); + } + + mutex_lock(&inst->registeredbufs.lock); + /* + * for encoder input, client may queue the same buffer with different + * fd before driver returned old buffer to the client. This buffer + * should be treated as new buffer Search the list with fd so that + * it will be treated as new msm_vidc_buffer. + */ + if (is_encode_session(inst) && vb2->type == INPUT_MPLANE) { + list_for_each_entry(mbuf, &inst->registeredbufs.list, list) { + if (msm_comm_compare_vb2_planes(inst, mbuf, vb2)) { + found = true; + break; + } + } + } else { + list_for_each_entry(mbuf, &inst->registeredbufs.list, list) { + if (msm_comm_compare_dma_planes(inst, mbuf, + dma_planes)) { + found = true; + break; + } + } + } + + if (!found) { + /* this is new vb2_buffer */ + mbuf = kzalloc(sizeof(struct msm_vidc_buffer), GFP_KERNEL); + if (!mbuf) { + s_vpr_e(inst->sid, "%s: alloc msm_vidc_buffer failed\n", + __func__); + rc = -ENOMEM; + goto exit; + } + kref_init(&mbuf->kref); + } + + /* Initially assume all the buffer are going to be deferred */ + mbuf->flags |= MSM_VIDC_FLAG_DEFERRED; + + vbuf = to_vb2_v4l2_buffer(vb2); + memcpy(&mbuf->vvb, vbuf, sizeof(struct vb2_v4l2_buffer)); + vb = &mbuf->vvb.vb2_buf; + + for (i = 0; i < vb->num_planes; i++) { + mbuf->smem[i].buffer_type = get_hal_buffer_type(vb->type, i); + mbuf->smem[i].fd = vb->planes[i].m.fd; + mbuf->smem[i].offset = vb->planes[i].data_offset; + mbuf->smem[i].size = vb->planes[i].length; + rc = inst->smem_ops->smem_map_dma_buf(inst, &mbuf->smem[i]); + if (rc) { + s_vpr_e(inst->sid, "%s: map failed.\n", __func__); + goto exit; + } + /* increase refcount as we get both fbd and rbr */ + rc = inst->smem_ops->smem_map_dma_buf(inst, &mbuf->smem[i]); + if (rc) { + s_vpr_e(inst->sid, "%s: map failed..\n", __func__); + goto exit; + } + } + /* dma cache operations need to be performed after dma_map */ + msm_comm_qbuf_cache_operations(inst, mbuf); + + /* special handling for decoder */ + if (inst->session_type == MSM_VIDC_DECODER) { + if (found) { + rc = -EEXIST; + } else { + bool found_plane0 = false; + struct msm_vidc_buffer *temp; + /* + * client might have queued same plane[0] but different + * plane[1] search plane[0] and if found don't queue the + * buffer, the buffer will be queued when rbr event + * arrived. + */ + list_for_each_entry(temp, &inst->registeredbufs.list, + list) { + if (msm_comm_compare_dma_plane(inst, temp, + dma_planes, 0)) { + found_plane0 = true; + break; + } + } + if (found_plane0) + rc = -EEXIST; + } + if (rc == -EEXIST) { + print_vidc_buffer(VIDC_HIGH, + "existing qbuf", inst, mbuf); + /* enable RBR pending */ + mbuf->flags |= MSM_VIDC_FLAG_RBR_PENDING; + } + } + + /* add the new buffer to list */ + if (!found) + list_add_tail(&mbuf->list, &inst->registeredbufs.list); + + mutex_unlock(&inst->registeredbufs.lock); + + /* + * Return mbuf if decode batching is enabled as this buffer + * may trigger queuing full batch to firmware, also this buffer + * will not be queued to firmware while full batch queuing, + * it will be queued when rbr event arrived from firmware. + */ + if (rc == -EEXIST && !inst->batch.enable) + return ERR_PTR(rc); + + return mbuf; + +exit: + s_vpr_e(inst->sid, "%s: %d\n", __func__, rc); + msm_comm_unmap_vidc_buffer(inst, mbuf); + if (!found) + kref_put_mbuf(mbuf); + mutex_unlock(&inst->registeredbufs.lock); + + return ERR_PTR(rc); +} + +void msm_comm_put_vidc_buffer(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + struct msm_vidc_buffer *temp; + bool found = false; + unsigned int i = 0; + + if (!inst || !mbuf) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, inst, mbuf); + return; + } + + mutex_lock(&inst->registeredbufs.lock); + /* check if mbuf was not removed by any chance */ + list_for_each_entry(temp, &inst->registeredbufs.list, list) { + if (msm_comm_compare_vb2_planes(inst, mbuf, + &temp->vvb.vb2_buf)) { + found = true; + break; + } + } + if (!found) { + print_vidc_buffer(VIDC_ERR, "buf was removed", inst, mbuf); + goto unlock; + } + + print_vidc_buffer(VIDC_HIGH, "dqbuf", inst, mbuf); + for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) { + if (inst->smem_ops->smem_unmap_dma_buf(inst, &mbuf->smem[i])) + print_vidc_buffer(VIDC_ERR, + "dqbuf: unmap failed.", inst, mbuf); + + if (!(mbuf->vvb.flags & V4L2_BUF_FLAG_READONLY)) { + /* rbr won't come for this buffer */ + if (inst->smem_ops->smem_unmap_dma_buf(inst, + &mbuf->smem[i])) + print_vidc_buffer(VIDC_ERR, + "dqbuf: unmap failed..", inst, mbuf); + } else { + /* RBR event expected */ + mbuf->flags |= MSM_VIDC_FLAG_RBR_PENDING; + } + } + /* + * remove the entry if plane[0].refcount is zero else + * don't remove as client queued same buffer that's why + * plane[0].refcount is not zero + */ + if (!mbuf->smem[0].refcount) { + list_del(&mbuf->list); + kref_put_mbuf(mbuf); + } +unlock: + mutex_unlock(&inst->registeredbufs.lock); +} + +void handle_release_buffer_reference(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + int rc = 0; + struct msm_vidc_buffer *temp; + bool found = false; + unsigned int i = 0; + u32 planes[VIDEO_MAX_PLANES] = {0}; + + mutex_lock(&inst->bufq[OUTPUT_PORT].lock); + mutex_lock(&inst->registeredbufs.lock); + found = false; + /* check if mbuf was not removed by any chance */ + list_for_each_entry(temp, &inst->registeredbufs.list, list) { + if (msm_comm_compare_vb2_planes(inst, mbuf, + &temp->vvb.vb2_buf)) { + found = true; + break; + } + } + if (found) { + /* save device_addr */ + for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) + planes[i] = mbuf->smem[i].device_addr; + + /* send RBR event to client */ + msm_vidc_queue_rbr_event(inst, + mbuf->vvb.vb2_buf.planes[0].m.fd, + mbuf->vvb.vb2_buf.planes[0].data_offset); + + /* clear RBR_PENDING flag */ + mbuf->flags &= ~MSM_VIDC_FLAG_RBR_PENDING; + + for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) { + if (inst->smem_ops->smem_unmap_dma_buf(inst, + &mbuf->smem[i])) + print_vidc_buffer(VIDC_ERR, + "rbr unmap failed.", inst, mbuf); + } + /* refcount is not zero if client queued the same buffer */ + if (!mbuf->smem[0].refcount) { + list_del(&mbuf->list); + kref_put_mbuf(mbuf); + mbuf = NULL; + } + } else { + print_vidc_buffer(VIDC_ERR, "mbuf not found", inst, mbuf); + goto unlock; + } + + /* + * 1. client might have pushed same planes in which case mbuf will be + * same and refcounts are positive and buffer wouldn't have been + * removed from the registeredbufs list. + * 2. client might have pushed same planes[0] but different planes[1] + * in which case mbuf will be different. + * 3. in either case we can search mbuf->smem[0].device_addr in the list + * and if found queue it to video hw (if not flushing). + */ + found = false; + list_for_each_entry(temp, &inst->registeredbufs.list, list) { + if (msm_comm_compare_device_plane(inst->sid, temp, + OUTPUT_MPLANE, planes, 0)) { + mbuf = temp; + found = true; + break; + } + } + if (!found) + goto unlock; + + /* buffer found means client queued the buffer already */ + if (inst->in_reconfig || inst->out_flush) { + print_vidc_buffer(VIDC_HIGH, "rbr flush buf", inst, mbuf); + msm_comm_flush_vidc_buffer(inst, mbuf); + msm_comm_unmap_vidc_buffer(inst, mbuf); + /* remove from list */ + list_del(&mbuf->list); + kref_put_mbuf(mbuf); + + /* don't queue the buffer */ + found = false; + } + /* clear required flags as the buffer is going to be queued */ + if (found) { + mbuf->flags &= ~MSM_VIDC_FLAG_DEFERRED; + mbuf->flags &= ~MSM_VIDC_FLAG_RBR_PENDING; + } + +unlock: + mutex_unlock(&inst->registeredbufs.lock); + + if (found) { + rc = msm_comm_qbuf_in_rbr(inst, mbuf); + if (rc) + print_vidc_buffer(VIDC_ERR, + "rbr qbuf failed", inst, mbuf); + } + mutex_unlock(&inst->bufq[OUTPUT_PORT].lock); +} + +int msm_comm_unmap_vidc_buffer(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + int rc = 0; + unsigned int i; + + if (!inst || !mbuf) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, inst, mbuf); + return -EINVAL; + } + if (mbuf->vvb.vb2_buf.num_planes > VIDEO_MAX_PLANES) { + s_vpr_e(inst->sid, "%s: invalid num_planes %d\n", __func__, + mbuf->vvb.vb2_buf.num_planes); + return -EINVAL; + } + + for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) { + u32 refcount = mbuf->smem[i].refcount; + + while (refcount) { + if (inst->smem_ops->smem_unmap_dma_buf(inst, + &mbuf->smem[i])) + print_vidc_buffer(VIDC_ERR, + "unmap failed for buf", inst, mbuf); + refcount--; + } + } + + return rc; +} + +static void kref_free_mbuf(struct kref *kref) +{ + struct msm_vidc_buffer *mbuf = container_of(kref, + struct msm_vidc_buffer, kref); + + kfree(mbuf); +} + +void kref_put_mbuf(struct msm_vidc_buffer *mbuf) +{ + if (!mbuf) + return; + + kref_put(&mbuf->kref, kref_free_mbuf); +} + +bool kref_get_mbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf) +{ + struct msm_vidc_buffer *temp; + bool matches = false; + bool ret = false; + + if (!inst || !mbuf) + return false; + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry(temp, &inst->registeredbufs.list, list) { + if (temp == mbuf) { + matches = true; + break; + } + } + ret = (matches && kref_get_unless_zero(&mbuf->kref)) ? true : false; + mutex_unlock(&inst->registeredbufs.lock); + + return ret; +} + +int msm_comm_store_input_tag(struct msm_vidc_list *data_list, + u32 index, u32 itag, u32 itag2, u32 sid) +{ + struct msm_vidc_buf_data *pdata = NULL; + bool found = false; + int rc = 0; + + if (!data_list) { + s_vpr_e(sid, "%s: invalid params\n", __func__); + return -EINVAL; + } + + mutex_lock(&data_list->lock); + list_for_each_entry(pdata, &data_list->list, list) { + if (pdata->index == index) { + pdata->input_tag = itag; + pdata->input_tag2 = itag2; + found = true; + break; + } + } + + if (!found) { + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + s_vpr_e(sid, "%s: malloc failure.\n", __func__); + rc = -ENOMEM; + goto exit; + } + pdata->index = index; + pdata->input_tag = itag; + pdata->input_tag2 = itag2; + list_add_tail(&pdata->list, &data_list->list); + } + +exit: + mutex_unlock(&data_list->lock); + + return rc; +} + +int msm_comm_fetch_input_tag(struct msm_vidc_list *data_list, + u32 index, u32 *itag, u32 *itag2, u32 sid) +{ + struct msm_vidc_buf_data *pdata = NULL; + int rc = 0; + + if (!data_list || !itag || !itag2) { + s_vpr_e(sid, "%s: invalid params %pK %pK %pK\n", + __func__, data_list, itag, itag2); + return -EINVAL; + } + + *itag = *itag2 = 0; + mutex_lock(&data_list->lock); + list_for_each_entry(pdata, &data_list->list, list) { + if (pdata->index == index) { + *itag = pdata->input_tag; + *itag2 = pdata->input_tag2; + /* clear after fetch */ + pdata->input_tag = pdata->input_tag2 = 0; + break; + } + } + mutex_unlock(&data_list->lock); + + return rc; +} + +int msm_comm_release_input_tag(struct msm_vidc_inst *inst) +{ + struct msm_vidc_buf_data *pdata, *next; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + mutex_lock(&inst->etb_data.lock); + list_for_each_entry_safe(pdata, next, &inst->etb_data.list, list) { + list_del(&pdata->list); + kfree(pdata); + } + mutex_unlock(&inst->etb_data.lock); + + mutex_lock(&inst->fbd_data.lock); + list_for_each_entry_safe(pdata, next, &inst->fbd_data.list, list) { + list_del(&pdata->list); + kfree(pdata); + } + mutex_unlock(&inst->fbd_data.lock); + + return 0; +} + +int msm_comm_set_color_format_constraints(struct msm_vidc_inst *inst, + enum hal_buffer buffer_type, + struct msm_vidc_format_constraint *pix_constraint) +{ + struct hfi_uncompressed_plane_actual_constraints_info + *pconstraint = NULL; + u32 num_planes = 2; + u32 size = 0; + int rc = 0; + struct hfi_device *hdev; + u32 hfi_fmt; + + if (!inst || !inst->core || !inst->core->device) { + d_vpr_e("%s: invalid params %pK\n", __func__, inst); + return -EINVAL; + } + + hdev = inst->core->device; + + size = 2 * sizeof(u32) + + num_planes + * sizeof(struct hfi_uncompressed_plane_constraints); + + pconstraint = kzalloc(size, GFP_KERNEL); + if (!pconstraint) { + s_vpr_e(inst->sid, "No memory cannot alloc constrain\n"); + rc = -ENOMEM; + goto exit; + } + + hfi_fmt = msm_comm_convert_color_fmt(pix_constraint->fourcc, inst->sid); + pconstraint->buffer_type = get_hfi_buffer(buffer_type, inst->sid); + pconstraint->num_planes = pix_constraint->num_planes; + //set Y plan constraints + s_vpr_h(inst->sid, "Set Y plan constraints.\n"); + pconstraint->rg_plane_format[0].stride_multiples = + VENUS_Y_STRIDE(hfi_fmt, 1); + pconstraint->rg_plane_format[0].max_stride = + pix_constraint->y_max_stride; + pconstraint->rg_plane_format[0].min_plane_buffer_height_multiple = + VENUS_Y_SCANLINES(hfi_fmt, 1); + pconstraint->rg_plane_format[0].buffer_alignment = + pix_constraint->y_buffer_alignment; + + //set UV plan constraints + s_vpr_h(inst->sid, "Set UV plan constraints.\n"); + pconstraint->rg_plane_format[1].stride_multiples = + VENUS_UV_STRIDE(hfi_fmt, 1); + pconstraint->rg_plane_format[1].max_stride = + pix_constraint->uv_max_stride; + pconstraint->rg_plane_format[1].min_plane_buffer_height_multiple = + VENUS_UV_SCANLINES(hfi_fmt, 1); + pconstraint->rg_plane_format[1].buffer_alignment = + pix_constraint->uv_buffer_alignment; + + rc = call_hfi_op(hdev, + session_set_property, + inst->session, + HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO, + pconstraint, + size); + if (rc) + s_vpr_e(inst->sid, + "Failed to set input color format constraint\n"); + else + s_vpr_h(inst->sid, "Set color format constraint success\n"); + +exit: + if (pconstraint) + kfree(pconstraint); + return rc; +} + +int msm_comm_set_index_extradata(struct msm_vidc_inst *inst, + uint32_t extradata_id, uint32_t value) +{ + int rc = 0; + struct hfi_index_extradata_config extradata; + struct hfi_device *hdev; + + hdev = inst->core->device; + + extradata.index_extra_data_id = extradata_id; + extradata.enable = value; + + rc = call_hfi_op(hdev, session_set_property, (void *) + inst->session, HFI_PROPERTY_PARAM_INDEX_EXTRADATA, &extradata, + sizeof(extradata)); + + return rc; +} + +int msm_comm_set_extradata(struct msm_vidc_inst *inst, + uint32_t extradata_id, uint32_t value) +{ + int rc = 0; + struct hfi_index_extradata_config extradata; + struct hfi_device *hdev; + + hdev = inst->core->device; + + extradata.index_extra_data_id = extradata_id; + extradata.enable = value; + + rc = call_hfi_op(hdev, session_set_property, (void *) + inst->session, extradata_id, &extradata, + sizeof(extradata)); + + return rc; +} + +int msm_comm_set_cvp_skip_ratio(struct msm_vidc_inst *inst, + uint32_t capture_rate, uint32_t cvp_rate) +{ + int rc = 0; + struct hfi_cvp_skip_ratio cvp_data; + struct hfi_device *hdev; + u32 integral_part, fractional_part, skip_ratio; + + hdev = inst->core->device; + + skip_ratio = 0; + integral_part = ((capture_rate / cvp_rate) << 16); + fractional_part = capture_rate % cvp_rate; + if (fractional_part) { + fractional_part = (fractional_part * 100) / cvp_rate; + skip_ratio = integral_part | ((fractional_part << 16)/100) ; + } + else + skip_ratio = integral_part; + + cvp_data.cvp_skip_ratio = skip_ratio; + rc = call_hfi_op(hdev, session_set_property, (void *) + inst->session, HFI_PROPERTY_CONFIG_CVP_SKIP_RATIO, &cvp_data, + sizeof(cvp_data)); + + return rc; +} + + +bool msm_comm_check_for_inst_overload(struct msm_vidc_core *core) +{ + u32 instance_count = 0; + u32 secure_instance_count = 0; + struct msm_vidc_inst *inst = NULL; + bool overload = false; + + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) { + instance_count++; + if (inst->flags & VIDC_SECURE) + secure_instance_count++; + } + mutex_unlock(&core->lock); + + if (instance_count > core->resources.max_inst_count || + secure_instance_count > core->resources.max_secure_inst_count) { + overload = true; + d_vpr_e( + "%s: inst_count:%u max_inst:%u sec_inst_count:%u max_sec_inst:%u\n", + __func__, instance_count, + core->resources.max_inst_count, secure_instance_count, + core->resources.max_secure_inst_count); + } + return overload; +} + +int msm_comm_check_window_bitrate(struct msm_vidc_inst *inst, + struct vidc_frame_data *frame_data) +{ + struct msm_vidc_window_data *pdata, *temp = NULL; + u32 frame_size, window_size, window_buffer; + u32 max_avg_frame_size, max_frame_size; + int buf_cnt = 1, fps, window_start; + + if (!inst || !inst->core || !frame_data) { + d_vpr_e("%s: Invalid arguments\n", __func__); + return -EINVAL; + } + + if (!inst->core->resources.avsync_window_size || + inst->entropy_mode == HFI_H264_ENTROPY_CAVLC || + !frame_data->filled_len) + return 0; + + fps = inst->clk_data.frame_rate >> 16; + window_size = inst->core->resources.avsync_window_size * fps; + window_size = DIV_ROUND_CLOSEST(window_size, 1000); + window_buffer = inst->clk_data.work_mode == HFI_WORKMODE_2 ? 2 : 0; + + max_frame_size = + inst->core->resources.allowed_clks_tbl[0].clock_rate / fps - + inst->clk_data.entry->vsp_cycles * + msm_vidc_get_mbs_per_frame(inst); + max_avg_frame_size = div_u64((u64)max_frame_size * 100 * + (window_size + window_buffer), (window_size * 135)); + max_frame_size = div_u64((u64)max_frame_size * 100 * + (1 + window_buffer), 135); + + frame_size = frame_data->filled_len; + window_start = inst->count.etb; + + mutex_lock(&inst->window_data.lock); + list_for_each_entry(pdata, &inst->window_data.list, list) { + if (buf_cnt < window_size && pdata->frame_size) { + frame_size += pdata->frame_size; + window_start = pdata->etb_count; + buf_cnt++; + } else { + pdata->frame_size = 0; + temp = pdata; + } + } + + pdata = NULL; + if(!temp) { + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + s_vpr_e(inst->sid, "%s: malloc failure.\n", __func__); + mutex_unlock(&inst->window_data.lock); + return -ENOMEM; + } + } else { + pdata = temp; + list_del(&pdata->list); + } + pdata->frame_size = frame_data->filled_len; + pdata->etb_count = inst->count.etb; + list_add(&pdata->list, &inst->window_data.list); + mutex_unlock(&inst->window_data.lock); + + frame_size = DIV_ROUND_UP((frame_size * 8), window_size); + if (frame_size > max_avg_frame_size) { + s_vpr_p(inst->sid, + "Unsupported avg frame size %u max %u, window size %u [%u,%u]", + frame_size, max_avg_frame_size, window_size, + window_start, inst->count.etb); + } + if (frame_data->filled_len * 8 > max_frame_size) { + s_vpr_p(inst->sid, + "Unsupported frame size(bit) %u max %u [%u]", + frame_data->filled_len * 8, max_frame_size, + inst->count.etb); + } + + return 0; +} + +void msm_comm_clear_window_data(struct msm_vidc_inst *inst) +{ + struct msm_vidc_window_data *pdata; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return; + } + + mutex_lock(&inst->window_data.lock); + list_for_each_entry(pdata, &inst->window_data.list, list) { + pdata->frame_size = 0; + } + mutex_unlock(&inst->window_data.lock); +} + +void msm_comm_release_window_data(struct msm_vidc_inst *inst) +{ + struct msm_vidc_window_data *pdata, *next; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return; + } + + mutex_lock(&inst->window_data.lock); + list_for_each_entry_safe(pdata, next, &inst->window_data.list, list) { + list_del(&pdata->list); + kfree(pdata); + } + mutex_unlock(&inst->window_data.lock); +} + +void msm_comm_release_timestamps(struct msm_vidc_inst *inst) +{ + struct msm_vidc_timestamps *node, *next; + + if (!inst) { + d_vpr_e("%s: invalid parameters\n", __func__); + return; + } + + mutex_lock(&inst->timestamps.lock); + list_for_each_entry_safe(node, next, &inst->timestamps.list, list) { + list_del(&node->list); + kfree(node); + } + INIT_LIST_HEAD(&inst->timestamps.list); + mutex_unlock(&inst->timestamps.lock); +} + +int msm_comm_store_timestamp(struct msm_vidc_inst *inst, s64 timestamp_us, + bool is_eos) +{ + struct msm_vidc_timestamps *entry, *node, *prev = NULL; + struct msm_vidc_timestamps *duplicate; + int count = 0; + int rc = 0; + bool inserted = false; + bool update_next = false; + + if (!inst) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + mutex_lock(&inst->timestamps.lock); + duplicate = NULL; + list_for_each_entry(node, &inst->timestamps.list, list) { + count++; + if (node->is_valid && node->timestamp_us == timestamp_us && + !node->is_eos) + duplicate = node; + } + + /* Maintain a sliding window of size 64 */ + entry = NULL; + if (count >= VIDEO_MAX_FRAME) { + entry = list_first_entry(&inst->timestamps.list, + struct msm_vidc_timestamps, list); + if (!entry->is_valid) { + list_del_init(&entry->list); + } else { + s_vpr_e(inst->sid, "%s: first entry still valid %d\n", + __func__, count); + entry = NULL; + } + } + if (!entry) { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + s_vpr_e(inst->sid, "%s: ts malloc failure\n", + __func__); + rc = -ENOMEM; + goto unlock; + } + } + + if (duplicate) { + entry->timestamp_us = duplicate->timestamp_us; + entry->framerate = duplicate->framerate; + entry->is_valid = true; + entry->is_eos = is_eos; + /* add entry next to duplicate */ + list_add(&entry->list, &duplicate->list); + goto unlock; + } + entry->timestamp_us = timestamp_us; + entry->framerate = inst->clk_data.frame_rate; + entry->is_valid = true; + + /* add new entry into the list in sorted order */ + prev = NULL; + inserted = false; + list_for_each_entry(node, &inst->timestamps.list, list) { + if (entry->timestamp_us < node->timestamp_us && + !node->is_eos) { + /* + * if prev available add entry next to prev else + * entry is first so add it at head. + */ + if (prev) + list_add(&entry->list, &prev->list); + else + list_add(&entry->list, &inst->timestamps.list); + inserted = true; + break; + } + prev = node; + } + + if (!inserted) + list_add_tail(&entry->list, &inst->timestamps.list); + + /* update framerate for both entry and entry->next (if any) */ + prev = NULL; + update_next = false; + list_for_each_entry(node, &inst->timestamps.list, list) { + if (update_next) { + node->framerate = msm_comm_calc_framerate(inst, + node->timestamp_us, prev->timestamp_us); + break; + } + if (node->timestamp_us == entry->timestamp_us && + !node->is_eos) { + if (prev) + node->framerate = msm_comm_calc_framerate(inst, + node->timestamp_us, prev->timestamp_us); + update_next = true; + } + prev = node; + } + + /* mark all entries as eos if is_eos is queued */ + if (is_eos) + list_for_each_entry(node, &inst->timestamps.list, list) { + node->is_eos = true; + } + +unlock: + mutex_unlock(&inst->timestamps.lock); + return rc; +} + +u32 msm_comm_calc_framerate(struct msm_vidc_inst *inst, + u64 timestamp_us, u64 prev_ts) +{ + u32 framerate = inst->clk_data.frame_rate; + u32 interval; + struct msm_vidc_capability *capability; + capability = &inst->capability; + + if (timestamp_us <= prev_ts) { + s_vpr_e(inst->sid, "%s: invalid ts %lld, prev ts %lld\n", + __func__, timestamp_us, prev_ts); + return framerate; + } + interval = (u32)(timestamp_us - prev_ts); + framerate = (1000000 + interval / 2) / interval; + if (framerate > capability->cap[CAP_FRAMERATE].max) + framerate = capability->cap[CAP_FRAMERATE].max; + if (framerate < 1) + framerate = 1; + return framerate << 16; +} + +u32 msm_comm_get_max_framerate(struct msm_vidc_inst *inst) +{ + struct msm_vidc_timestamps *node; + u64 avg_framerate = 0; + u32 count = 0; + + if (!inst) { + d_vpr_e("%s: invalid parameters\n", __func__); + return (1 << 16); + } + + mutex_lock(&inst->timestamps.lock); + list_for_each_entry(node, &inst->timestamps.list, list) { + count++; + avg_framerate += node->framerate; + } + avg_framerate = count ? (div_u64(avg_framerate, count)) : (1 << 16); + + s_vpr_l(inst->sid, "%s: fps %u, list size %d\n", __func__, avg_framerate, count); + mutex_unlock(&inst->timestamps.lock); + return (u32)avg_framerate; +} + +int msm_comm_fetch_ts_framerate(struct msm_vidc_inst *inst, + struct v4l2_buffer *b) +{ + struct msm_vidc_timestamps *node; + int rc = 0; + bool invalidate_extra = false; + u32 input_tag = 0, input_tag2 = 0; + s32 factor = 1000000; + s32 remainder = 0; + + if (!inst || !b) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + input_tag = b->m.planes[0].reserved[MSM_VIDC_INPUT_TAG_1]; + input_tag2 = b->m.planes[0].reserved[MSM_VIDC_INPUT_TAG_2]; + + /* nothing to do for flushed buffers */ + if (!input_tag) + return 0; + + /* set default framerate */ + b->m.planes[0].reserved[MSM_VIDC_FRAMERATE] = DEFAULT_FPS << 16; + + /* to handle interlace, 2 input buffers and 1 output buffer*/ + if (input_tag2 && input_tag2 != input_tag) + invalidate_extra = true; + + mutex_lock(&inst->timestamps.lock); + list_for_each_entry(node, &inst->timestamps.list, list) { + if (!node->is_valid) + continue; + + if (invalidate_extra) { + node->is_valid = false; + invalidate_extra = false; + continue; + } + + /* do not update is_valid flag for subframe buffer */ + if (!(b->flags & V4L2_BUF_FLAG_END_OF_SUBFRAME)) + node->is_valid = false; + + b->timestamp.tv_sec = div_s64_rem(node->timestamp_us, factor, &remainder); + b->timestamp.tv_usec = remainder; + b->m.planes[0].reserved[MSM_VIDC_FRAMERATE] = node->framerate; + break; + } + mutex_unlock(&inst->timestamps.lock); + return rc; +} + +static int msm_comm_memory_regions_prepare(struct msm_vidc_inst *inst) +{ + u32 i = 0; + struct msm_vidc_platform_resources *res; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + res = &inst->core->resources; + + inst->regions.num_regions = res->prefetch_non_pix_buf_count + + res->prefetch_pix_buf_count; + if (inst->regions.num_regions > MEMORY_REGIONS_MAX) { + s_vpr_e(inst->sid, "%s: invalid num_regions: %d, max: %d\n", + __func__, inst->regions.num_regions, + MEMORY_REGIONS_MAX); + return -EINVAL; + } + + s_vpr_h(inst->sid, + "%s: preparing %d nonpixel memory regions of %ld bytes each and %d pixel memory regions of %ld bytes each\n", + __func__, res->prefetch_non_pix_buf_count, + res->prefetch_non_pix_buf_size, res->prefetch_pix_buf_count, + res->prefetch_pix_buf_size); + + for (i = 0; i < res->prefetch_non_pix_buf_count; i++) { + inst->regions.region[i].size = res->prefetch_non_pix_buf_size; + inst->regions.region[i].vmid = ION_FLAG_CP_NON_PIXEL; + } + + for (i = res->prefetch_non_pix_buf_count; + i < inst->regions.num_regions; i++) { + inst->regions.region[i].size = res->prefetch_pix_buf_size; + inst->regions.region[i].vmid = ION_FLAG_CP_PIXEL; + } + + return 0; +} + +int msm_comm_memory_prefetch(struct msm_vidc_inst *inst) +{ + int rc = 0; + + if (!inst || !inst->smem_ops) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + if (inst->memory_ops & MEMORY_PREFETCH) { + s_vpr_h(inst->sid, "%s: prefetch done already\n", __func__); + return 0; + } + + rc = msm_comm_memory_regions_prepare(inst); + if (rc) + return rc; + + if (inst->regions.num_regions == 0) + return 0; + + rc = inst->smem_ops->smem_prefetch(inst); + if (rc) + return rc; + + inst->memory_ops |= MEMORY_PREFETCH; + + return rc; +} + +int msm_comm_memory_drain(struct msm_vidc_inst *inst) +{ + int rc = 0; + + if (!inst || !inst->smem_ops) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + if (!(inst->memory_ops & MEMORY_PREFETCH)) + return 0; + + rc = inst->smem_ops->smem_drain(inst); + if (rc) + return rc; + + inst->memory_ops &= ~MEMORY_PREFETCH; + + return rc; +} + +int msm_comm_check_prefetch_sufficient(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct msm_vidc_platform_resources *res; + struct v4l2_plane_pix_format *fmt; + u32 i, internal_buf_sz = 0; + u32 prefetch_npix_sz = 0; + u32 prefetch_pix_sz = 0; + + if (!inst || !inst->core) { + d_vpr_e("%s: invalid parameters\n", __func__); + return -EINVAL; + } + res = &inst->core->resources; + fmt = inst->fmts[OUTPUT_PORT].v4l2_fmt.fmt.pix_mp.plane_fmt; + + if (!is_secure_session(inst) || !is_decode_session(inst)) + return rc; + + if (!(inst->memory_ops & MEMORY_PREFETCH)) + return -ENOMEM; + + prefetch_npix_sz = res->prefetch_non_pix_buf_count * + res->prefetch_non_pix_buf_size; + prefetch_pix_sz = res->prefetch_pix_buf_count * + res->prefetch_pix_buf_size; + + for (i = 0; i < HAL_BUFFER_MAX; i++) { + struct hal_buffer_requirements *req; + + req = &inst->buff_req.buffer[i]; + if (req->buffer_type == HAL_BUFFER_INTERNAL_SCRATCH || + req->buffer_type == HAL_BUFFER_INTERNAL_SCRATCH_1 || + req->buffer_type == HAL_BUFFER_INTERNAL_SCRATCH_2 || + req->buffer_type == HAL_BUFFER_INTERNAL_PERSIST || + req->buffer_type == HAL_BUFFER_INTERNAL_PERSIST_1) + internal_buf_sz += req->buffer_size; + } + + if (prefetch_npix_sz < internal_buf_sz) { + s_vpr_e(inst->sid, + "insufficient non-pix region prefetched %u, required %u", + internal_buf_sz, prefetch_npix_sz); + rc = -ENOMEM; + } + if (prefetch_pix_sz < fmt->sizeimage) { + s_vpr_e(inst->sid, + "insufficient pix region prefetched %u, required %u", + fmt->sizeimage, prefetch_pix_sz); + rc = -ENOMEM; + } + + return rc; +} diff --git a/techpack/video/msm/vidc/msm_vidc_common.h b/techpack/video/msm/vidc/msm_vidc_common.h new file mode 100644 index 000000000000..3722afbfc30e --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_common.h @@ -0,0 +1,443 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _MSM_VIDC_COMMON_H_ +#define _MSM_VIDC_COMMON_H_ +#include "msm_vidc_internal.h" +#include "msm_vidc_debug.h" + +#define MAX_DEC_BATCH_SIZE 6 +#define SKIP_BATCH_WINDOW 100 +#define MIN_FRAME_QUALITY 0 +#define MAX_FRAME_QUALITY 100 +#define DEFAULT_FRAME_QUALITY 95 +#define FRAME_QUALITY_STEP 1 +#define HEIC_GRID_DIMENSION 512 +#define CBR_MB_LIMIT (((1280+15)/16)*((720+15)/16)*30) +#define CBR_VFR_MB_LIMIT (((640+15)/16)*((480+15)/16)*30) +#define MAX_BITRATE_DECODER_CAVLC 220000000 +#define MAX_BITRATE_DECODER_2STAGE_CABAC 200000000 +#define MAX_BITRATE_DECODER_1STAGE_CABAC 70000000 + +struct vb2_buf_entry { + struct list_head list; + struct vb2_buffer *vb; +}; + +struct getprop_buf { + struct list_head list; + void *data; +}; + +enum load_calc_quirks { + LOAD_POWER = 0, + LOAD_ADMISSION_CONTROL = 1, +}; + +enum client_set_controls { + CLIENT_SET_I_QP = 0x1, + CLIENT_SET_P_QP = 0x2, + CLIENT_SET_B_QP = 0x4, + CLIENT_SET_MIN_QP = 0x8, + CLIENT_SET_MAX_QP = 0x10, +}; + +static inline bool is_turbo_session(struct msm_vidc_inst *inst) +{ + return !!(inst->flags & VIDC_TURBO); +} + +static inline bool is_thumbnail_session(struct msm_vidc_inst *inst) +{ + return !!(inst->flags & VIDC_THUMBNAIL); +} + +static inline bool is_low_power_session(struct msm_vidc_inst *inst) +{ + return !!(inst->flags & VIDC_LOW_POWER); +} + +static inline bool is_cvp_supported(struct msm_vidc_inst *inst) +{ + return inst->core && !inst->core->resources.no_cvp; +} + +static inline struct v4l2_ctrl *get_ctrl(struct msm_vidc_inst *inst, + u32 id) +{ + int i; + + for (i = 0; i < inst->num_ctrls; i++) { + if (inst->ctrls[i]->id == id) + return inst->ctrls[i]; + } + s_vpr_e(inst->sid, "%s: control id (%#x) not found\n", __func__, id); + MSM_VIDC_ERROR(true); + return inst->ctrls[0]; +} + +static inline void update_ctrl(struct v4l2_ctrl *ctrl, s32 val, u32 sid) +{ + switch (ctrl->type) { + case V4L2_CTRL_TYPE_INTEGER: + *ctrl->p_cur.p_s32 = val; + memcpy(ctrl->p_new.p, ctrl->p_cur.p, + ctrl->elems * ctrl->elem_size); + break; + default: + s_vpr_e(sid, "unhandled control type"); + } +} + +static inline u32 get_v4l2_codec(struct msm_vidc_inst *inst) +{ + struct v4l2_format *f; + u32 port; + + port = (inst->session_type == MSM_VIDC_DECODER) ? INPUT_PORT : + OUTPUT_PORT; + f = &inst->fmts[port].v4l2_fmt; + return f->fmt.pix_mp.pixelformat; +} + +static inline bool is_image_session(struct msm_vidc_inst *inst) +{ + /* Grid may or may not be enabled for an image encode session */ + return inst->session_type == MSM_VIDC_ENCODER && + get_v4l2_codec(inst) == V4L2_PIX_FMT_HEVC && + inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ; +} + +static inline bool is_grid_session(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *ctrl = NULL; + if (inst->session_type == MSM_VIDC_ENCODER && + get_v4l2_codec(inst) == V4L2_PIX_FMT_HEVC) { + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_IMG_GRID_SIZE); + return (ctrl->val > 0); + } + return 0; +} + +static inline bool is_heif_decoder(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *ctrl = NULL; + if (inst->session_type == MSM_VIDC_DECODER && + get_v4l2_codec(inst) == V4L2_PIX_FMT_HEVC) { + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VDEC_HEIF_MODE); + return (ctrl->val > 0); + } + return 0; +} + +static inline bool is_video_session(struct msm_vidc_inst *inst) +{ + return !is_grid_session(inst); +} +static inline bool is_realtime_session(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *ctrl; + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY); + return !!ctrl->val; +} + +static inline bool is_low_latency_hint(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *ctrl; + + if (inst->session_type != MSM_VIDC_DECODER) + return false; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_HINT); + return !!ctrl->val; +} + +static inline bool is_secure_session(struct msm_vidc_inst *inst) +{ + return !!(inst->flags & VIDC_SECURE); +} + +static inline bool is_ts_reorder_allowed(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *ctrl; + + if (is_secure_session(inst)) + return false; + + if (inst->session_type != MSM_VIDC_DECODER) + return true; + + if (is_heif_decoder(inst)) + return false; + + ctrl = get_ctrl(inst, + V4L2_CID_MPEG_VIDC_VIDEO_DISABLE_TIMESTAMP_REORDER); + return !ctrl->val; +} + +static inline bool is_decode_session(struct msm_vidc_inst *inst) +{ + return inst->session_type == MSM_VIDC_DECODER; +} + +static inline bool is_encode_session(struct msm_vidc_inst *inst) +{ + return inst->session_type == MSM_VIDC_ENCODER; +} + +static inline bool is_encode_batching(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *ctrl; + + if (inst->session_type != MSM_VIDC_ENCODER) + return false; + + ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_SUPERFRAME); + return !!ctrl->val; +} + +static inline bool is_primary_output_mode(struct msm_vidc_inst *inst) +{ + return inst->stream_output_mode == HAL_VIDEO_DECODER_PRIMARY; +} + +static inline bool is_secondary_output_mode(struct msm_vidc_inst *inst) +{ + return inst->stream_output_mode == HAL_VIDEO_DECODER_SECONDARY; +} + +static inline bool in_port_reconfig(struct msm_vidc_inst *inst) +{ + return inst->in_reconfig && inst->bufq[INPUT_PORT].vb2_bufq.streaming; +} + +static inline bool is_input_buffer(struct msm_vidc_buffer *mbuf) +{ + return mbuf->vvb.vb2_buf.type == INPUT_MPLANE; +} + +static inline bool is_output_buffer(struct msm_vidc_buffer *mbuf) +{ + return mbuf->vvb.vb2_buf.type == OUTPUT_MPLANE; +} + +static inline bool is_internal_buffer(enum hal_buffer type) +{ + u32 buf_type = + HAL_BUFFER_INTERNAL_SCRATCH | + HAL_BUFFER_INTERNAL_SCRATCH_1 | + HAL_BUFFER_INTERNAL_SCRATCH_2 | + HAL_BUFFER_INTERNAL_PERSIST | + HAL_BUFFER_INTERNAL_PERSIST_1 | + HAL_BUFFER_INTERNAL_RECON; + return !!(buf_type & type); +} + +static inline bool is_hier_b_session(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *max_layer = NULL; + struct v4l2_ctrl *frame_t = NULL; + + if (inst->session_type == MSM_VIDC_ENCODER) { + max_layer = get_ctrl(inst, + V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER); + frame_t = get_ctrl(inst, + V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE); + if (get_v4l2_codec(inst) == V4L2_PIX_FMT_HEVC && + max_layer->val > 1 && + frame_t->val == + V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B) + return true; + } + return false; +} + +static inline int msm_comm_g_ctrl(struct msm_vidc_inst *inst, + struct v4l2_control *ctrl) +{ + return v4l2_g_ctrl(&inst->ctrl_handler, ctrl); +} + +static inline int msm_comm_s_ctrl(struct msm_vidc_inst *inst, + struct v4l2_control *ctrl) +{ + return v4l2_s_ctrl(NULL, &inst->ctrl_handler, ctrl); +} + +bool vidc_scalar_enabled(struct msm_vidc_inst *inst); +bool is_single_session(struct msm_vidc_inst *inst, u32 ignore_flags); +bool is_batching_allowed(struct msm_vidc_inst *inst); +enum hal_buffer get_hal_buffer_type(unsigned int type, + unsigned int plane_num); +void put_inst(struct msm_vidc_inst *inst); +struct msm_vidc_inst *get_inst(struct msm_vidc_core *core, + void *inst_id); +void change_inst_state(struct msm_vidc_inst *inst, enum instance_state state); +struct msm_vidc_core *get_vidc_core(int core_id); +const struct msm_vidc_format_desc *msm_comm_get_pixel_fmt_index( + const struct msm_vidc_format_desc fmt[], int size, int index, u32 sid); +struct msm_vidc_format_desc *msm_comm_get_pixel_fmt_fourcc( + struct msm_vidc_format_desc fmt[], int size, int fourcc, u32 sid); +struct msm_vidc_format_constraint *msm_comm_get_pixel_fmt_constraints( + struct msm_vidc_format_constraint fmt[], int size, int fourcc, u32 sid); +int msm_comm_set_color_format_constraints(struct msm_vidc_inst *inst, + enum hal_buffer buffer_type, + struct msm_vidc_format_constraint *pix_constraint); +struct buf_queue *msm_comm_get_vb2q( + struct msm_vidc_inst *inst, enum v4l2_buf_type type); +int msm_comm_try_state(struct msm_vidc_inst *inst, int state); +int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst); +int msm_comm_try_get_buff_req(struct msm_vidc_inst *inst, + union hal_get_property *hprop); +int msm_comm_set_recon_buffers(struct msm_vidc_inst *inst); +int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst); +int msm_comm_set_persist_buffers(struct msm_vidc_inst *inst); +int msm_comm_set_buffer_count(struct msm_vidc_inst *inst, + int host_count, int act_count, enum hal_buffer type); +int msm_comm_set_dpb_only_buffers(struct msm_vidc_inst *inst); +int msm_comm_queue_dpb_only_buffers(struct msm_vidc_inst *inst); +int msm_comm_qbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf); +int msm_comm_qbufs(struct msm_vidc_inst *inst); +void msm_comm_flush_dynamic_buffers(struct msm_vidc_inst *inst); +int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags); +int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst, + bool check_for_reuse); +int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst); +int msm_comm_release_recon_buffers(struct msm_vidc_inst *inst); +void msm_comm_release_eos_buffers(struct msm_vidc_inst *inst); +int msm_comm_release_dpb_only_buffers(struct msm_vidc_inst *inst, + bool force_release); +void msm_comm_validate_output_buffers(struct msm_vidc_inst *inst); +int msm_comm_force_cleanup(struct msm_vidc_inst *inst); +int msm_comm_suspend(int core_id); +int msm_comm_reset_bufreqs(struct msm_vidc_inst *inst, + enum hal_buffer buf_type); +struct hal_buffer_requirements *get_buff_req_buffer( + struct msm_vidc_inst *inst, u32 buffer_type); +#define IS_PRIV_CTRL(idx) (\ + (V4L2_CTRL_ID2WHICH(idx) == V4L2_CTRL_CLASS_MPEG) && \ + V4L2_CTRL_DRIVER_PRIV(idx)) +void msm_comm_session_clean(struct msm_vidc_inst *inst); +int msm_comm_kill_session(struct msm_vidc_inst *inst); +void msm_comm_generate_session_error(struct msm_vidc_inst *inst); +void msm_comm_generate_sys_error(struct msm_vidc_inst *inst); +enum multi_stream msm_comm_get_stream_output_mode(struct msm_vidc_inst *inst); +int msm_comm_set_stream_output_mode(struct msm_vidc_inst *inst, + enum multi_stream mode); +enum hal_buffer msm_comm_get_hal_output_buffer(struct msm_vidc_inst *inst); +int msm_comm_smem_alloc(struct msm_vidc_inst *inst, size_t size, u32 align, + u32 flags, enum hal_buffer buffer_type, int map_kernel, + struct msm_smem *smem); +void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *smem); +int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst, + struct msm_smem *mem, enum smem_cache_ops cache_ops); +enum hal_video_codec get_hal_codec(int fourcc, u32 sid); +enum hal_domain get_hal_domain(int session_type, u32 sid); +int msm_comm_check_core_init(struct msm_vidc_core *core, u32 sid); +int msm_comm_get_inst_load(struct msm_vidc_inst *inst, + enum load_calc_quirks quirks); +int msm_comm_get_inst_load_per_core(struct msm_vidc_inst *inst, + enum load_calc_quirks quirks); +int msm_comm_get_device_load(struct msm_vidc_core *core, + enum session_type sess_type, + enum load_type load_type, + enum load_calc_quirks quirks); +int msm_comm_set_color_format(struct msm_vidc_inst *inst, + enum hal_buffer buffer_type, int fourcc); +int msm_comm_g_ctrl(struct msm_vidc_inst *inst, struct v4l2_control *ctrl); +int msm_comm_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_control *ctrl); +int msm_comm_g_ctrl_for_id(struct msm_vidc_inst *inst, int id); +int msm_comm_ctrl_init(struct msm_vidc_inst *inst, + struct msm_vidc_ctrl *drv_ctrls, u32 num_ctrls, + const struct v4l2_ctrl_ops *ctrl_ops); +int msm_comm_ctrl_deinit(struct msm_vidc_inst *inst); +void msm_comm_cleanup_internal_buffers(struct msm_vidc_inst *inst); +bool msm_comm_turbo_session(struct msm_vidc_inst *inst); +void msm_comm_print_inst_info(struct msm_vidc_inst *inst); +void msm_comm_print_insts_info(struct msm_vidc_core *core); +int msm_comm_v4l2_to_hfi(int id, int value, u32 sid); +int msm_comm_hfi_to_v4l2(int id, int value, u32 sid); +int msm_comm_get_v4l2_profile(int fourcc, int profile, u32 sid); +int msm_comm_get_v4l2_level(int fourcc, int level, u32 sid); +int msm_comm_session_continue(void *instance); +int msm_vidc_send_pending_eos_buffers(struct msm_vidc_inst *inst); +u32 msm_comm_get_hfi_uncompressed(int fourcc, u32 sid); +u32 msm_comm_convert_color_fmt(u32 v4l2_fmt, u32 sid); +struct vb2_buffer *msm_comm_get_vb_using_vidc_buffer( + struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf); +struct msm_vidc_buffer *msm_comm_get_buffer_using_device_planes( + struct msm_vidc_inst *inst, u32 type, u32 *planes); +struct msm_vidc_buffer *msm_comm_get_vidc_buffer(struct msm_vidc_inst *inst, + struct vb2_buffer *vb2); +void msm_comm_put_vidc_buffer(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf); +void handle_release_buffer_reference(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf); +int msm_comm_vb2_buffer_done(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf); +int msm_comm_flush_vidc_buffer(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf); +int msm_comm_unmap_vidc_buffer(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf); +bool msm_comm_compare_dma_plane(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, unsigned long *dma_planes, u32 i); +bool msm_comm_compare_dma_planes(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, unsigned long *dma_planes); +bool msm_comm_compare_vb2_plane(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2, u32 i); +bool msm_comm_compare_vb2_planes(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2); +bool msm_comm_compare_device_plane(u32 sid, struct msm_vidc_buffer *mbuf, + u32 type, u32 *planes, u32 i); +bool msm_comm_compare_device_planes(u32 sid, struct msm_vidc_buffer *mbuf, + u32 type, u32 *planes); +int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf); +int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf); +void print_vidc_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf); +void print_vb2_buffer(const char *str, struct msm_vidc_inst *inst, + struct vb2_buffer *vb2); +void kref_put_mbuf(struct msm_vidc_buffer *mbuf); +bool kref_get_mbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf); +int msm_comm_store_input_tag(struct msm_vidc_list *data_list, + u32 index, u32 itag, u32 itag2, u32 sid); +int msm_comm_fetch_input_tag(struct msm_vidc_list *data_list, + u32 index, u32 *itag, u32 *itag2, u32 sid); +int msm_comm_release_input_tag(struct msm_vidc_inst *inst); +int msm_comm_qbufs_batch(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf); +int msm_comm_qbuf_decode_batch(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf); +int schedule_batch_work(struct msm_vidc_inst *inst); +int cancel_batch_work(struct msm_vidc_inst *inst); +int msm_comm_num_queued_bufs(struct msm_vidc_inst *inst, u32 type); +int msm_comm_set_index_extradata(struct msm_vidc_inst *inst, + uint32_t extradata_id, uint32_t value); +int msm_comm_set_extradata(struct msm_vidc_inst *inst, uint32_t extradata_id, + uint32_t value); +bool msm_comm_check_for_inst_overload(struct msm_vidc_core *core); +void msm_vidc_batch_handler(struct work_struct *work); +int msm_comm_check_window_bitrate(struct msm_vidc_inst *inst, + struct vidc_frame_data *frame_data); +void msm_comm_clear_window_data(struct msm_vidc_inst *inst); +void msm_comm_release_window_data(struct msm_vidc_inst *inst); +int msm_comm_set_cvp_skip_ratio(struct msm_vidc_inst *inst, + uint32_t capture_rate, uint32_t cvp_rate); +int msm_comm_fetch_ts_framerate(struct msm_vidc_inst *inst, + struct v4l2_buffer *b); +int msm_comm_store_timestamp(struct msm_vidc_inst *inst, s64 timestamp_us, + bool is_eos); +void msm_comm_release_timestamps(struct msm_vidc_inst *inst); +u32 msm_comm_get_max_framerate(struct msm_vidc_inst *inst); +u32 msm_comm_calc_framerate(struct msm_vidc_inst *inst, u64 timestamp_us, + u64 prev_ts); +int msm_comm_memory_prefetch(struct msm_vidc_inst *inst); +int msm_comm_memory_drain(struct msm_vidc_inst *inst); +int msm_comm_check_prefetch_sufficient(struct msm_vidc_inst *inst); +int msm_comm_check_memory_supported(struct msm_vidc_inst *vidc_inst); +int msm_comm_update_dpb_bufreqs(struct msm_vidc_inst *inst); +#endif diff --git a/techpack/video/msm/vidc/msm_vidc_debug.c b/techpack/video/msm/vidc/msm_vidc_debug.c new file mode 100644 index 000000000000..263840a68581 --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_debug.c @@ -0,0 +1,753 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#define CREATE_TRACE_POINTS +#define MAX_SSR_STRING_LEN 64 +#define MAX_DEBUG_LEVEL_STRING_LEN 15 +#include "msm_vidc_debug.h" +#include "vidc_hfi_api.h" + +int msm_vidc_debug = VIDC_ERR | VIDC_PRINTK | + FW_ERROR | FW_FATAL | FW_FTRACE; +EXPORT_SYMBOL(msm_vidc_debug); + +bool msm_vidc_lossless_encode = !true; +EXPORT_SYMBOL(msm_vidc_lossless_encode); + +int msm_vidc_fw_debug_mode = HFI_DEBUG_MODE_QUEUE; +bool msm_vidc_fw_coverage = !true; +bool msm_vidc_thermal_mitigation_disabled = !true; +int msm_vidc_clock_voting = !1; +bool msm_vidc_syscache_disable = !true; +bool msm_vidc_cvp_usage = true; +int msm_vidc_err_recovery_disable = !1; +int msm_vidc_vpp_delay; + +#define MAX_DBG_BUF_SIZE 4096 + +#define DYNAMIC_BUF_OWNER(__binfo) ({ \ + atomic_read(&__binfo->ref_count) >= 2 ? "video driver" : "firmware";\ +}) + +struct core_inst_pair { + struct msm_vidc_core *core; + struct msm_vidc_inst *inst; +}; + +static u32 write_str(char *buffer, + size_t size, const char *fmt, ...) +{ + va_list args; + u32 len; + + va_start(args, fmt); + len = vscnprintf(buffer, size, fmt, args); + va_end(args); + return len; +} + +static ssize_t core_info_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct msm_vidc_core *core = file->private_data; + struct hfi_device *hdev; + struct hal_fw_info fw_info = { {0} }; + char *dbuf, *cur, *end; + int i = 0, rc = 0; + ssize_t len = 0; + + if (!core || !core->device) { + d_vpr_e("%s: invalid params %pK\n", __func__, core); + return 0; + } + + dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL); + if (!dbuf) { + d_vpr_e("%s: Allocation failed!\n", __func__); + return -ENOMEM; + } + cur = dbuf; + end = cur + MAX_DBG_BUF_SIZE; + hdev = core->device; + + cur += write_str(cur, end - cur, "===============================\n"); + cur += write_str(cur, end - cur, "CORE %d: %pK\n", core->id, core); + cur += write_str(cur, end - cur, "===============================\n"); + cur += write_str(cur, end - cur, "Core state: %d\n", core->state); + rc = call_hfi_op(hdev, get_fw_info, hdev->hfi_device_data, &fw_info); + if (rc) { + d_vpr_e("Failed to read FW info\n"); + goto err_fw_info; + } + + cur += write_str(cur, end - cur, + "FW version : %s\n", &fw_info.version); + cur += write_str(cur, end - cur, + "base addr: 0x%x\n", fw_info.base_addr); + cur += write_str(cur, end - cur, + "register_base: 0x%x\n", fw_info.register_base); + cur += write_str(cur, end - cur, + "register_size: %u\n", fw_info.register_size); + cur += write_str(cur, end - cur, "irq: %u\n", fw_info.irq); + cur += write_str(cur, end - cur, + "ddr_type: %d\n", of_fdt_get_ddrtype()); + +err_fw_info: + for (i = SYS_MSG_START; i < SYS_MSG_END; i++) { + cur += write_str(cur, end - cur, "completions[%d]: %s\n", i, + completion_done(&core->completions[SYS_MSG_INDEX(i)]) ? + "pending" : "done"); + } + len = simple_read_from_buffer(buf, count, ppos, + dbuf, cur - dbuf); + + kfree(dbuf); + return len; +} + +static const struct file_operations core_info_fops = { + .open = simple_open, + .read = core_info_read, +}; + +static ssize_t trigger_ssr_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long ssr_trigger_val = 0; + int rc = 0; + struct msm_vidc_core *core = filp->private_data; + size_t size = MAX_SSR_STRING_LEN; + char kbuf[MAX_SSR_STRING_LEN + 1] = {0}; + + if (!buf) + return -EINVAL; + + if (!count) + goto exit; + + if (count < size) + size = count; + + if (copy_from_user(kbuf, buf, size)) { + d_vpr_e("%s: User memory fault\n", __func__); + rc = -EFAULT; + goto exit; + } + + rc = kstrtoul(kbuf, 0, &ssr_trigger_val); + if (rc) { + d_vpr_e("returning error err %d\n", rc); + rc = -EINVAL; + } else { + msm_vidc_trigger_ssr(core, ssr_trigger_val); + rc = count; + } +exit: + return rc; +} + +static const struct file_operations ssr_fops = { + .open = simple_open, + .write = trigger_ssr_write, +}; + +static ssize_t debug_level_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + int rc = 0; + struct msm_vidc_core *core = filp->private_data; + char kbuf[MAX_DEBUG_LEVEL_STRING_LEN] = {0}; + + /* filter partial writes and invalid commands */ + if (*ppos != 0 || count >= sizeof(kbuf) || count == 0) { + d_vpr_e("returning error - pos %d, count %d\n", *ppos, count); + rc = -EINVAL; + } + + rc = simple_write_to_buffer(kbuf, sizeof(kbuf) - 1, ppos, buf, count); + if (rc < 0) { + d_vpr_e("%s: User memory fault\n", __func__); + rc = -EFAULT; + goto exit; + } + + rc = kstrtoint(kbuf, 0, &msm_vidc_debug); + if (rc) { + d_vpr_e("returning error err %d\n", rc); + rc = -EINVAL; + goto exit; + } + core->resources.msm_vidc_hw_rsp_timeout = + ((msm_vidc_debug & 0xFF) > (VIDC_ERR | VIDC_HIGH)) ? 1500 : 1000; + rc = count; + d_vpr_h("debug timeout updated to - %d\n", + core->resources.msm_vidc_hw_rsp_timeout); + +exit: + return rc; +} + +static ssize_t debug_level_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + size_t len; + char kbuf[MAX_DEBUG_LEVEL_STRING_LEN]; + + len = scnprintf(kbuf, sizeof(kbuf), "0x%08x\n", msm_vidc_debug); + return simple_read_from_buffer(buf, count, ppos, kbuf, len); +} + +static const struct file_operations debug_level_fops = { + .open = simple_open, + .write = debug_level_write, + .read = debug_level_read, +}; + +struct dentry *msm_vidc_debugfs_init_drv(void) +{ + bool ok = false; + struct dentry *dir = NULL; + + msm_vidc_vpp_delay = 0; + + dir = debugfs_create_dir("msm_vidc", NULL); + if (IS_ERR_OR_NULL(dir)) { + dir = NULL; + goto failed_create_dir; + } + +#define __debugfs_create(__type, __name, __value) ({ \ + struct dentry *f = debugfs_create_##__type(__name, 0644, \ + dir, __value); \ + if (IS_ERR_OR_NULL(f)) { \ + d_vpr_e("Failed creating debugfs file '%pd/%s'\n", \ + dir, __name); \ + f = NULL; \ + } \ + f; \ +}) + + ok = + __debugfs_create(u32, "fw_debug_mode", &msm_vidc_fw_debug_mode) && + __debugfs_create(bool, "fw_coverage", &msm_vidc_fw_coverage) && + __debugfs_create(bool, "disable_thermal_mitigation", + &msm_vidc_thermal_mitigation_disabled) && + __debugfs_create(u32, "core_clock_voting", + &msm_vidc_clock_voting) && + __debugfs_create(bool, "disable_video_syscache", + &msm_vidc_syscache_disable) && + __debugfs_create(bool, "cvp_usage", &msm_vidc_cvp_usage) && + __debugfs_create(bool, "lossless_encoding", + &msm_vidc_lossless_encode) && + __debugfs_create(u32, "disable_err_recovery", + &msm_vidc_err_recovery_disable) && + __debugfs_create(u32, "vpp_delay", &msm_vidc_vpp_delay); + +#undef __debugfs_create + + if (!ok) + goto failed_create_dir; + + return dir; + +failed_create_dir: + if (dir) + debugfs_remove_recursive(vidc_driver->debugfs_root); + + return NULL; +} + +struct dentry *msm_vidc_debugfs_init_core(struct msm_vidc_core *core, + struct dentry *parent) +{ + struct dentry *dir = NULL; + char debugfs_name[MAX_DEBUGFS_NAME]; + + if (!core) { + d_vpr_e("%s: invalid params\n", __func__); + goto failed_create_dir; + } + + snprintf(debugfs_name, MAX_DEBUGFS_NAME, "core%d", core->id); + dir = debugfs_create_dir(debugfs_name, parent); + if (IS_ERR_OR_NULL(dir)) { + dir = NULL; + d_vpr_e("Failed to create debugfs for msm_vidc\n"); + goto failed_create_dir; + } + if (!debugfs_create_file("info", 0444, dir, core, &core_info_fops)) { + d_vpr_e("debugfs_create_file: fail\n"); + goto failed_create_dir; + } + if (!debugfs_create_file("trigger_ssr", 0200, + dir, core, &ssr_fops)) { + d_vpr_e("debugfs_create_file: fail\n"); + goto failed_create_dir; + } + if (!debugfs_create_file("debug_level", 0644, + parent, core, &debug_level_fops)) { + d_vpr_e("debugfs_create_file: fail\n"); + goto failed_create_dir; + } +failed_create_dir: + return dir; +} + +static int inst_info_open(struct inode *inode, struct file *file) +{ + d_vpr_l("Open inode ptr: %pK\n", inode->i_private); + file->private_data = inode->i_private; + return 0; +} + +static int publish_unreleased_reference(struct msm_vidc_inst *inst, + char **dbuf, char *end) +{ + struct msm_vidc_buffer *temp = NULL; + char *cur = *dbuf; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + return -EINVAL; + } + + if (inst->buffer_mode_set[OUTPUT_PORT] == HAL_BUFFER_MODE_DYNAMIC) { + cur += write_str(cur, end - cur, "Pending buffer references\n"); + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry(temp, &inst->registeredbufs.list, list) { + struct vb2_buffer *vb2 = &temp->vvb.vb2_buf; + + if (vb2->type == OUTPUT_MPLANE) { + cur += write_str(cur, end - cur, + "\tbuffer: %#x fd[0] = %d size %d refcount = %d\n", + temp->smem[0].device_addr, + vb2->planes[0].m.fd, + vb2->planes[0].length, + temp->smem[0].refcount); + } + } + mutex_unlock(&inst->registeredbufs.lock); + } + + *dbuf = cur; + return 0; +} + +static void put_inst_helper(struct kref *kref) +{ + struct msm_vidc_inst *inst = container_of(kref, + struct msm_vidc_inst, kref); + + msm_vidc_destroy(inst); +} + +static ssize_t inst_info_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct core_inst_pair *idata = file->private_data; + struct msm_vidc_core *core; + struct msm_vidc_inst *inst, *temp = NULL; + char *dbuf, *cur, *end; + int i, j; + ssize_t len = 0; + struct v4l2_format *f; + + if (!idata || !idata->core || !idata->inst) { + d_vpr_e("%s: invalid params %pK\n", __func__, idata); + return 0; + } + + core = idata->core; + inst = idata->inst; + + mutex_lock(&core->lock); + list_for_each_entry(temp, &core->instances, list) { + if (temp == inst) + break; + } + inst = ((temp == inst) && kref_get_unless_zero(&inst->kref)) ? + inst : NULL; + mutex_unlock(&core->lock); + + if (!inst) { + d_vpr_e("%s: Instance has become obsolete", __func__); + return 0; + } + + dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL); + if (!dbuf) { + s_vpr_e(inst->sid, "%s: Allocation failed!\n", __func__); + len = -ENOMEM; + goto failed_alloc; + } + cur = dbuf; + end = cur + MAX_DBG_BUF_SIZE; + + f = &inst->fmts[OUTPUT_PORT].v4l2_fmt; + cur += write_str(cur, end - cur, "==============================\n"); + cur += write_str(cur, end - cur, "INSTANCE: %pK (%s)\n", inst, + inst->session_type == MSM_VIDC_ENCODER ? "Encoder" : "Decoder"); + cur += write_str(cur, end - cur, "==============================\n"); + cur += write_str(cur, end - cur, "core: %pK\n", inst->core); + cur += write_str(cur, end - cur, "height: %d\n", f->fmt.pix_mp.height); + cur += write_str(cur, end - cur, "width: %d\n", f->fmt.pix_mp.width); + cur += write_str(cur, end - cur, "fps: %d\n", + inst->clk_data.frame_rate >> 16); + cur += write_str(cur, end - cur, "state: %d\n", inst->state); + cur += write_str(cur, end - cur, "secure: %d\n", + !!(inst->flags & VIDC_SECURE)); + cur += write_str(cur, end - cur, "-----------Formats-------------\n"); + for (i = 0; i < MAX_PORT_NUM; i++) { + f = &inst->fmts[i].v4l2_fmt; + cur += write_str(cur, end - cur, "capability: %s\n", + i == INPUT_PORT ? "Output" : "Capture"); + cur += write_str(cur, end - cur, "name : %s\n", + inst->fmts[i].name); + cur += write_str(cur, end - cur, "planes : %d\n", + f->fmt.pix_mp.num_planes); + cur += write_str(cur, end - cur, + "type: %s\n", i == INPUT_PORT ? + "Output" : "Capture"); + switch (inst->buffer_mode_set[i]) { + case HAL_BUFFER_MODE_STATIC: + cur += write_str(cur, end - cur, + "buffer mode : %s\n", "static"); + break; + case HAL_BUFFER_MODE_DYNAMIC: + cur += write_str(cur, end - cur, + "buffer mode : %s\n", "dynamic"); + break; + default: + cur += write_str(cur, end - cur, + "buffer mode : unsupported\n"); + } + + cur += write_str(cur, end - cur, "count: %u\n", + inst->bufq[i].vb2_bufq.num_buffers); + + for (j = 0; j < f->fmt.pix_mp.num_planes; j++) + cur += write_str(cur, end - cur, + "size for plane %d: %u\n", + j, f->fmt.pix_mp.plane_fmt[j].sizeimage); + + if (i < MAX_PORT_NUM - 1) + cur += write_str(cur, end - cur, "\n"); + } + cur += write_str(cur, end - cur, "-------------------------------\n"); + for (i = SESSION_MSG_START; i < SESSION_MSG_END; i++) { + cur += write_str(cur, end - cur, "completions[%d]: %s\n", i, + completion_done(&inst->completions[SESSION_MSG_INDEX(i)]) ? + "pending" : "done"); + } + cur += write_str(cur, end - cur, "ETB Count: %d\n", inst->count.etb); + cur += write_str(cur, end - cur, "EBD Count: %d\n", inst->count.ebd); + cur += write_str(cur, end - cur, "FTB Count: %d\n", inst->count.ftb); + cur += write_str(cur, end - cur, "FBD Count: %d\n", inst->count.fbd); + + publish_unreleased_reference(inst, &cur, end); + len = simple_read_from_buffer(buf, count, ppos, + dbuf, cur - dbuf); + + kfree(dbuf); +failed_alloc: + kref_put(&inst->kref, put_inst_helper); + return len; +} + +static int inst_info_release(struct inode *inode, struct file *file) +{ + d_vpr_l("Release inode ptr: %pK\n", inode->i_private); + file->private_data = NULL; + return 0; +} + +static const struct file_operations inst_info_fops = { + .open = inst_info_open, + .read = inst_info_read, + .release = inst_info_release, +}; + +struct dentry *msm_vidc_debugfs_init_inst(struct msm_vidc_inst *inst, + struct dentry *parent) +{ + struct dentry *dir = NULL, *info = NULL; + char debugfs_name[MAX_DEBUGFS_NAME]; + struct core_inst_pair *idata = NULL; + + if (!inst) { + d_vpr_e("%s: invalid params\n", __func__); + goto exit; + } + snprintf(debugfs_name, MAX_DEBUGFS_NAME, "inst_%d", inst->sid); + + idata = kzalloc(sizeof(struct core_inst_pair), GFP_KERNEL); + if (!idata) { + s_vpr_e(inst->sid, "%s: Allocation failed!\n", __func__); + goto exit; + } + + idata->core = inst->core; + idata->inst = inst; + + dir = debugfs_create_dir(debugfs_name, parent); + if (IS_ERR_OR_NULL(dir)) { + dir = NULL; + s_vpr_e(inst->sid, "Failed to create debugfs for msm_vidc\n"); + goto failed_create_dir; + } + + info = debugfs_create_file("info", 0444, dir, + idata, &inst_info_fops); + if (IS_ERR_OR_NULL(info)) { + s_vpr_e(inst->sid, "debugfs_create_file: fail\n"); + goto failed_create_file; + } + + dir->d_inode->i_private = info->d_inode->i_private; + inst->debug.pdata[FRAME_PROCESSING].sampling = true; + return dir; + +failed_create_file: + debugfs_remove_recursive(dir); + dir = NULL; +failed_create_dir: + kfree(idata); +exit: + return dir; +} + +void msm_vidc_debugfs_deinit_inst(struct msm_vidc_inst *inst) +{ + struct dentry *dentry = NULL; + + if (!inst || !inst->debugfs_root) + return; + + dentry = inst->debugfs_root; + if (dentry->d_inode) { + s_vpr_l(inst->sid, "Destroy %pK\n", dentry->d_inode->i_private); + kfree(dentry->d_inode->i_private); + dentry->d_inode->i_private = NULL; + } + debugfs_remove_recursive(dentry); + inst->debugfs_root = NULL; +} + +void msm_vidc_debugfs_update(struct msm_vidc_inst *inst, + enum msm_vidc_debugfs_event e) +{ + struct msm_vidc_debug *d = &inst->debug; + char a[64] = "Frame processing"; + + switch (e) { + case MSM_VIDC_DEBUGFS_EVENT_ETB: + inst->count.etb++; + trace_msm_v4l2_vidc_buffer_counter("ETB", + inst->count.etb, inst->count.ebd, + inst->count.ftb, inst->count.fbd); + if (inst->count.ebd && inst->count.ftb > inst->count.fbd) { + d->pdata[FRAME_PROCESSING].name[0] = '\0'; + tic(inst, FRAME_PROCESSING, a); + } + break; + case MSM_VIDC_DEBUGFS_EVENT_EBD: + inst->count.ebd++; + trace_msm_v4l2_vidc_buffer_counter("EBD", + inst->count.etb, inst->count.ebd, + inst->count.ftb, inst->count.fbd); + if (inst->count.ebd && inst->count.ebd == inst->count.etb) { + toc(inst, FRAME_PROCESSING); + s_vpr_p(inst->sid, "EBD: FW needs input buffers\n"); + } + if (inst->count.ftb == inst->count.fbd) + s_vpr_p(inst->sid, "EBD: FW needs output buffers\n"); + break; + case MSM_VIDC_DEBUGFS_EVENT_FTB: { + inst->count.ftb++; + trace_msm_v4l2_vidc_buffer_counter("FTB", + inst->count.etb, inst->count.ebd, + inst->count.ftb, inst->count.fbd); + if (inst->count.ebd && inst->count.etb > inst->count.ebd) { + d->pdata[FRAME_PROCESSING].name[0] = '\0'; + tic(inst, FRAME_PROCESSING, a); + } + } + break; + case MSM_VIDC_DEBUGFS_EVENT_FBD: + inst->count.fbd++; + inst->debug.samples++; + trace_msm_v4l2_vidc_buffer_counter("FBD", + inst->count.etb, inst->count.ebd, + inst->count.ftb, inst->count.fbd); + if (inst->count.fbd && + inst->count.fbd == inst->count.ftb) { + toc(inst, FRAME_PROCESSING); + s_vpr_p(inst->sid, "FBD: FW needs output buffers\n"); + } + if (inst->count.etb == inst->count.ebd) + s_vpr_p(inst->sid, "FBD: FW needs input buffers\n"); + break; + default: + s_vpr_e(inst->sid, "Invalid state in debugfs: %d\n", e); + break; + } +} + +int msm_vidc_check_ratelimit(void) +{ + static DEFINE_RATELIMIT_STATE(_rs, + VIDC_DBG_SESSION_RATELIMIT_INTERVAL, + VIDC_DBG_SESSION_RATELIMIT_BURST); + return __ratelimit(&_rs); +} + +/** + * get_sid() must be called under "&core->lock" + * to avoid race condition at occupying empty slot. + */ +int get_sid(u32 *sid, u32 session_type) +{ + int i; + + for (i = 0; i < vidc_driver->num_ctxt; i++) { + if (!vidc_driver->ctxt[i].used) { + vidc_driver->ctxt[i].used = 1; + *sid = i+1; + update_log_ctxt(*sid, session_type, 0); + break; + } + } + + return (i == vidc_driver->num_ctxt); +} + +inline void update_log_ctxt(u32 sid, u32 session_type, u32 fourcc) +{ + const char *codec; + char type; + u32 s_type = 0; + + if (!sid || sid > vidc_driver->num_ctxt) { + d_vpr_e("%s: invalid sid %#x\n", + __func__, sid); + } + + switch (fourcc) { + case V4L2_PIX_FMT_H264: + case V4L2_PIX_FMT_H264_NO_SC: + codec = "h264"; + break; + case V4L2_PIX_FMT_H264_MVC: + codec = " mvc"; + break; + case V4L2_PIX_FMT_MPEG1: + codec = "mpg1"; + break; + case V4L2_PIX_FMT_MPEG2: + codec = "mpg2"; + break; + case V4L2_PIX_FMT_VP8: + codec = " vp8"; + break; + case V4L2_PIX_FMT_VP9: + codec = " vp9"; + break; + case V4L2_PIX_FMT_HEVC: + codec = "h265"; + break; + default: + codec = "...."; + break; + } + + switch (session_type) { + case MSM_VIDC_ENCODER: + type = 'e'; + s_type = VIDC_ENCODER; + break; + case MSM_VIDC_DECODER: + type = 'd'; + s_type = VIDC_DECODER; + break; + default: + type = '.'; + break; + } + + vidc_driver->ctxt[sid-1].session_type = s_type; + vidc_driver->ctxt[sid-1].codec_type = fourcc; + memcpy(&vidc_driver->ctxt[sid-1].name, codec, 4); + vidc_driver->ctxt[sid-1].name[4] = type; + vidc_driver->ctxt[sid-1].name[5] = '\0'; +} + +/* Mock all the missing parts for successful compilation starts here */ +void do_gettimeofday(struct timeval *__ddl_tv) +{ +} + +#ifndef CONFIG_VIDEOBUF2_CORE +void vb2_queue_release(struct vb2_queue *q) +{ + (void) q; +} + +int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) +{ + (void) q; + (void) req; + + return 0; +} + +int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev, + struct v4l2_buffer *b) +{ + (void) q; + (void) mdev; + (void) b; + + return 0; +} + +int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) +{ + (void) q; + (void) b; + (void) nonblocking; + + return 0; +} + +int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type) +{ + (void) q; + (void) type; + + return 0; +} + +int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) +{ + (void) q; + (void) type; + + return 0; +} + +int vb2_queue_init(struct vb2_queue *q) +{ + (void) q; + + return 0; +} + +void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) +{ + (void) vb; + (void) state; +} +#endif diff --git a/techpack/video/msm/vidc/msm_vidc_debug.h b/techpack/video/msm/vidc/msm_vidc_debug.h new file mode 100644 index 000000000000..4c10d93863a4 --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_debug.h @@ -0,0 +1,341 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __MSM_VIDC_DEBUG__ +#define __MSM_VIDC_DEBUG__ +#include +#include +#include "msm_vidc_events.h" + +/* Mock all the missing parts for successful compilation starts here */ +#include +#include +#include +#include +#include "msm_vidc_internal.h" + +// void disable_irq_nosync(unsigned int irq); +// void enable_irq(unsigned int irq); + +void do_gettimeofday(struct timeval *__ddl_tv); + +#ifndef CONFIG_VIDEOBUF2_CORE +int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req); +int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev, + struct v4l2_buffer *b); +int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking); +int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type); +int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type); +int vb2_queue_init(struct vb2_queue *q); +void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state); +#endif + +#define SMEM_IMAGE_VERSION_TABLE 469 +/* Mock all the missing parts for successful compilation ends */ + +#ifndef VIDC_DBG_LABEL +#define VIDC_DBG_LABEL "msm_vidc" +#endif + +/* + * This enforces a rate limit: not more than 6 messages + * in every 1s. + */ + +#define VIDC_DBG_SESSION_RATELIMIT_INTERVAL (1 * HZ) +#define VIDC_DBG_SESSION_RATELIMIT_BURST 6 + +#define VIDC_DBG_TAG VIDC_DBG_LABEL ": %6s: %08x: %5s: " +#define FW_DBG_TAG VIDC_DBG_LABEL ": %6s: " +#define DEFAULT_SID ((u32)-1) + +/* To enable messages OR these values and + * echo the result to debugfs file. + * + * To enable all messages set debug_level = 0x101F + */ + +enum vidc_msg_prio { + VIDC_ERR = 0x00000001, + VIDC_HIGH = 0x00000002, + VIDC_LOW = 0x00000004, + VIDC_PERF = 0x00000008, + VIDC_PKT = 0x00000010, + VIDC_BUS = 0x00000020, + VIDC_ENCODER = 0x00000100, + VIDC_DECODER = 0x00000200, + VIDC_PRINTK = 0x00001000, + VIDC_FTRACE = 0x00002000, + FW_LOW = 0x00010000, + FW_MEDIUM = 0x00020000, + FW_HIGH = 0x00040000, + FW_ERROR = 0x00080000, + FW_FATAL = 0x00100000, + FW_PERF = 0x00200000, + FW_PRINTK = 0x10000000, + FW_FTRACE = 0x20000000, +}; +#define FW_LOGSHIFT 16 +#define FW_LOGMASK 0x0FFF0000 + +enum msm_vidc_debugfs_event { + MSM_VIDC_DEBUGFS_EVENT_ETB, + MSM_VIDC_DEBUGFS_EVENT_EBD, + MSM_VIDC_DEBUGFS_EVENT_FTB, + MSM_VIDC_DEBUGFS_EVENT_FBD, +}; + +enum vidc_err_recovery_disable { + VIDC_DISABLE_NOC_ERR_RECOV = 0x0001, + VIDC_DISABLE_NON_NOC_ERR_RECOV = 0x0002 +}; + +extern int msm_vidc_debug; +extern int msm_vidc_fw_debug_mode; +extern bool msm_vidc_fw_coverage; +extern bool msm_vidc_thermal_mitigation_disabled; +extern int msm_vidc_clock_voting; +extern bool msm_vidc_syscache_disable; +extern bool msm_vidc_lossless_encode; +extern bool msm_vidc_cvp_usage; +extern int msm_vidc_err_recovery_disable; +extern int msm_vidc_vpp_delay; + +#define dprintk(__level, sid, __fmt, ...) \ + do { \ + if (is_print_allowed(sid, __level)) { \ + if (msm_vidc_debug & VIDC_FTRACE) { \ + char trace_logbuf[MAX_TRACER_LOG_LENGTH]; \ + int log_length = snprintf(trace_logbuf, \ + MAX_TRACER_LOG_LENGTH, \ + VIDC_DBG_TAG __fmt, \ + get_debug_level_str(__level), \ + sid, \ + get_codec_name(sid), \ + ##__VA_ARGS__); \ + trace_msm_vidc_printf(trace_logbuf, \ + log_length); \ + } \ + if (msm_vidc_debug & VIDC_PRINTK) { \ + pr_info(VIDC_DBG_TAG __fmt, \ + get_debug_level_str(__level), \ + sid, \ + get_codec_name(sid), \ + ##__VA_ARGS__); \ + } \ + } \ + } while (0) + +#define s_vpr_e(sid, __fmt, ...) dprintk(VIDC_ERR, sid, __fmt, ##__VA_ARGS__) +#define s_vpr_h(sid, __fmt, ...) dprintk(VIDC_HIGH, sid, __fmt, ##__VA_ARGS__) +#define s_vpr_l(sid, __fmt, ...) dprintk(VIDC_LOW, sid, __fmt, ##__VA_ARGS__) +#define s_vpr_p(sid, __fmt, ...) dprintk(VIDC_PERF, sid, __fmt, ##__VA_ARGS__) +#define s_vpr_t(sid, __fmt, ...) dprintk(VIDC_PKT, sid, __fmt, ##__VA_ARGS__) +#define s_vpr_b(sid, __fmt, ...) dprintk(VIDC_BUS, sid, __fmt, ##__VA_ARGS__) +#define s_vpr_hp(sid, __fmt, ...) \ + dprintk(VIDC_HIGH|VIDC_PERF, sid, __fmt, ##__VA_ARGS__) + +#define d_vpr_e(__fmt, ...) \ + dprintk(VIDC_ERR, DEFAULT_SID, __fmt, ##__VA_ARGS__) +#define d_vpr_h(__fmt, ...) \ + dprintk(VIDC_HIGH, DEFAULT_SID, __fmt, ##__VA_ARGS__) +#define d_vpr_l(__fmt, ...) \ + dprintk(VIDC_LOW, DEFAULT_SID, __fmt, ##__VA_ARGS__) +#define d_vpr_p(__fmt, ...) \ + dprintk(VIDC_PERF, DEFAULT_SID, __fmt, ##__VA_ARGS__) +#define d_vpr_t(__fmt, ...) \ + dprintk(VIDC_PKT, DEFAULT_SID, __fmt, ##__VA_ARGS__) +#define d_vpr_b(__fmt, ...) \ + dprintk(VIDC_BUS, DEFAULT_SID, __fmt, ##__VA_ARGS__) + +#define dprintk_firmware(__level, __fmt, ...) \ + do { \ + if (__level & FW_FTRACE) { \ + char trace_logbuf[MAX_TRACER_LOG_LENGTH]; \ + int log_length = snprintf(trace_logbuf, \ + MAX_TRACER_LOG_LENGTH, \ + FW_DBG_TAG __fmt, \ + "fw", \ + ##__VA_ARGS__); \ + trace_msm_vidc_printf(trace_logbuf, \ + log_length); \ + } \ + if (__level & FW_PRINTK) { \ + pr_info(FW_DBG_TAG __fmt, \ + "fw", \ + ##__VA_ARGS__); \ + } \ + } while (0) + +#define dprintk_ratelimit(__level, __fmt, arg...) \ + do { \ + if (msm_vidc_check_ratelimit()) { \ + dprintk(__level, DEFAULT_SID, __fmt, arg); \ + } \ + } while (0) + +#define MSM_VIDC_ERROR(value) \ + do { if (value) \ + d_vpr_e("BugOn"); \ + BUG_ON(value); \ + } while (0) + +struct dentry *msm_vidc_debugfs_init_drv(void); +struct dentry *msm_vidc_debugfs_init_core(struct msm_vidc_core *core, + struct dentry *parent); +struct dentry *msm_vidc_debugfs_init_inst(struct msm_vidc_inst *inst, + struct dentry *parent); +void msm_vidc_debugfs_deinit_inst(struct msm_vidc_inst *inst); +void msm_vidc_debugfs_update(struct msm_vidc_inst *inst, + enum msm_vidc_debugfs_event e); +int msm_vidc_check_ratelimit(void); +int get_sid(u32 *sid, u32 session_type); +void update_log_ctxt(u32 sid, u32 session_type, u32 fourcc); + +static inline char *get_debug_level_str(int level) +{ + switch (level) { + case VIDC_ERR: + return "err "; + case VIDC_HIGH|VIDC_PERF: + case VIDC_HIGH: + return "high"; + case VIDC_LOW: + return "low "; + case VIDC_PERF: + return "perf"; + case VIDC_PKT: + return "pkt "; + case VIDC_BUS: + return "bus "; + default: + return "????"; + } +} + +/** + * 0xx -> allow prints for all sessions + * 1xx -> allow only encoder prints + * 2xx -> allow only decoder prints + * 4xx -> allow only cvp prints + */ +static inline bool is_print_allowed(u32 sid, u32 level) +{ + if (!(msm_vidc_debug & level)) + return false; + + if (!((msm_vidc_debug >> 8) & 0xF)) + return true; + + if (!sid || sid > vidc_driver->num_ctxt) + return true; + + if (vidc_driver->ctxt[sid-1].session_type & msm_vidc_debug) + return true; + + return false; +} + +static inline char *get_codec_name(u32 sid) +{ + if (!sid || sid > vidc_driver->num_ctxt) + return "....."; + + return vidc_driver->ctxt[sid-1].name; +} + +static inline void put_sid(u32 sid) +{ + if (!sid || sid > vidc_driver->num_ctxt) { + d_vpr_e("%s: invalid sid %#x\n", + __func__, sid); + return; + } + if (vidc_driver->ctxt[sid-1].used) + vidc_driver->ctxt[sid-1].used = 0; +} + +static inline void tic(struct msm_vidc_inst *i, enum profiling_points p, + char *b) +{ + struct timeval __ddl_tv = { 0 }; + + if (!i->debug.pdata[p].name[0]) + memcpy(i->debug.pdata[p].name, b, 64); + if ((msm_vidc_debug & VIDC_PERF) && + i->debug.pdata[p].sampling) { + do_gettimeofday(&__ddl_tv); + i->debug.pdata[p].start = + (__ddl_tv.tv_sec * 1000) + (__ddl_tv.tv_usec / 1000); + i->debug.pdata[p].sampling = false; + } +} + +static inline void toc(struct msm_vidc_inst *i, enum profiling_points p) +{ + struct timeval __ddl_tv = { 0 }; + + if ((msm_vidc_debug & VIDC_PERF) && + !i->debug.pdata[p].sampling) { + do_gettimeofday(&__ddl_tv); + i->debug.pdata[p].stop = (__ddl_tv.tv_sec * 1000) + + (__ddl_tv.tv_usec / 1000); + i->debug.pdata[p].cumulative += i->debug.pdata[p].stop - + i->debug.pdata[p].start; + i->debug.pdata[p].sampling = true; + } +} + +static inline void show_stats(struct msm_vidc_inst *i) +{ + int x; + + for (x = 0; x < MAX_PROFILING_POINTS; x++) { + if (i->debug.pdata[x].name[0] && + (msm_vidc_debug & VIDC_PERF)) { + if (i->debug.samples) { + s_vpr_p(i->sid, "%s averaged %d ms/sample\n", + i->debug.pdata[x].name, + i->debug.pdata[x].cumulative / + i->debug.samples); + } + + s_vpr_p(i->sid, "%s Samples: %d\n", + i->debug.pdata[x].name, i->debug.samples); + } + } +} + +static inline void msm_vidc_res_handle_fatal_hw_error( + struct msm_vidc_platform_resources *resources, + bool enable_fatal) +{ + enable_fatal &= resources->debug_timeout; + MSM_VIDC_ERROR(enable_fatal); +} + +static inline void msm_vidc_handle_hw_error(struct msm_vidc_core *core) +{ + bool enable_fatal = true; + + /* + * In current implementation user-initiated SSR triggers + * a fatal error from hardware. However, there is no way + * to know if fatal error is due to SSR or not. Handle + * user SSR as non-fatal. + */ + if (core->trigger_ssr) { + core->trigger_ssr = false; + enable_fatal = false; + } + + /* Video driver can decide FATAL handling of HW errors + * based on multiple factors. This condition check will + * be enhanced later. + */ + msm_vidc_res_handle_fatal_hw_error(&core->resources, enable_fatal); +} + +#endif diff --git a/techpack/video/msm/vidc/msm_vidc_events.h b/techpack/video/msm/vidc/msm_vidc_events.h new file mode 100644 index 000000000000..d3db860d093e --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_events.h @@ -0,0 +1,419 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM msm_vidc_events +#define TRACE_INCLUDE_FILE msm_vidc_events + +#if !defined(_TRACE_MSM_VIDC_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MSM_VIDC_H +#include +#include + +DECLARE_EVENT_CLASS(msm_v4l2_vidc, + + TP_PROTO(char *dummy), + + TP_ARGS(dummy), + + TP_STRUCT__entry( + __field(char *, dummy) + ), + + TP_fast_assign( + __entry->dummy = dummy; + ), + + TP_printk("%s", __entry->dummy) +); + +DEFINE_EVENT(msm_v4l2_vidc, msm_v4l2_vidc_open_start, + + TP_PROTO(char *dummy), + + TP_ARGS(dummy) +); + +DEFINE_EVENT(msm_v4l2_vidc, msm_v4l2_vidc_open_end, + + TP_PROTO(char *dummy), + + TP_ARGS(dummy) +); + +DEFINE_EVENT(msm_v4l2_vidc, msm_v4l2_vidc_close_start, + + TP_PROTO(char *dummy), + + TP_ARGS(dummy) +); + +DEFINE_EVENT(msm_v4l2_vidc, msm_v4l2_vidc_close_end, + + TP_PROTO(char *dummy), + + TP_ARGS(dummy) +); + +DEFINE_EVENT(msm_v4l2_vidc, msm_v4l2_vidc_fw_load_start, + + TP_PROTO(char *dummy), + + TP_ARGS(dummy) +); + +DEFINE_EVENT(msm_v4l2_vidc, msm_v4l2_vidc_fw_load_end, + + TP_PROTO(char *dummy), + + TP_ARGS(dummy) +); + +DECLARE_EVENT_CLASS(msm_vidc_common, + + TP_PROTO(void *instp, int old_state, int new_state), + + TP_ARGS(instp, old_state, new_state), + + TP_STRUCT__entry( + __field(void *, instp) + __field(int, old_state) + __field(int, new_state) + ), + + TP_fast_assign( + __entry->instp = instp; + __entry->old_state = old_state; + __entry->new_state = new_state; + ), + + TP_printk("Moved inst: %p from 0x%x to 0x%x", + __entry->instp, + __entry->old_state, + __entry->new_state) +); + +DEFINE_EVENT(msm_vidc_common, msm_vidc_common_state_change, + + TP_PROTO(void *instp, int old_state, int new_state), + + TP_ARGS(instp, old_state, new_state) +); + +DECLARE_EVENT_CLASS(venus_hfi_var, + + TP_PROTO(u32 cp_start, u32 cp_size, + u32 cp_nonpixel_start, u32 cp_nonpixel_size), + + TP_ARGS(cp_start, cp_size, cp_nonpixel_start, cp_nonpixel_size), + + TP_STRUCT__entry( + __field(u32, cp_start) + __field(u32, cp_size) + __field(u32, cp_nonpixel_start) + __field(u32, cp_nonpixel_size) + ), + + TP_fast_assign( + __entry->cp_start = cp_start; + __entry->cp_size = cp_size; + __entry->cp_nonpixel_start = cp_nonpixel_start; + __entry->cp_nonpixel_size = cp_nonpixel_size; + ), + + TP_printk( + "TZBSP_MEM_PROTECT_VIDEO_VAR done, cp_start : 0x%x, cp_size : 0x%x, cp_nonpixel_start : 0x%x, cp_nonpixel_size : 0x%x", + __entry->cp_start, + __entry->cp_size, + __entry->cp_nonpixel_start, + __entry->cp_nonpixel_size) +); + +DEFINE_EVENT(venus_hfi_var, venus_hfi_var_done, + + TP_PROTO(u32 cp_start, u32 cp_size, + u32 cp_nonpixel_start, u32 cp_nonpixel_size), + + TP_ARGS(cp_start, cp_size, cp_nonpixel_start, cp_nonpixel_size) +); + +DECLARE_EVENT_CLASS(msm_v4l2_vidc_count_events, + + TP_PROTO(char *event_type, + u32 etb, u32 ebd, u32 ftb, u32 fbd), + + TP_ARGS(event_type, etb, ebd, ftb, fbd), + + TP_STRUCT__entry( + __field(char *, event_type) + __field(u32, etb) + __field(u32, ebd) + __field(u32, ftb) + __field(u32, fbd) + ), + + TP_fast_assign( + __entry->event_type = event_type; + __entry->etb = etb; + __entry->ebd = ebd; + __entry->ftb = ftb; + __entry->fbd = fbd; + ), + + TP_printk( + "%s, ETB %u EBD %u FTB %u FBD %u", + __entry->event_type, + __entry->etb, + __entry->ebd, + __entry->ftb, + __entry->fbd) +); + +DEFINE_EVENT(msm_v4l2_vidc_count_events, msm_v4l2_vidc_buffer_counter, + + TP_PROTO(char *event_type, + u32 etb, u32 ebd, u32 ftb, u32 fbd), + + TP_ARGS(event_type, + etb, ebd, ftb, fbd) +); + +DECLARE_EVENT_CLASS(msm_v4l2_vidc_buffer_events, + + TP_PROTO(char *event_type, u32 device_addr, int64_t timestamp, + u32 alloc_len, u32 filled_len, u32 offset), + + TP_ARGS(event_type, device_addr, timestamp, alloc_len, + filled_len, offset), + + TP_STRUCT__entry( + __field(char *, event_type) + __field(u32, device_addr) + __field(int64_t, timestamp) + __field(u32, alloc_len) + __field(u32, filled_len) + __field(u32, offset) + ), + + TP_fast_assign( + __entry->event_type = event_type; + __entry->device_addr = device_addr; + __entry->timestamp = timestamp; + __entry->alloc_len = alloc_len; + __entry->filled_len = filled_len; + __entry->offset = offset; + ), + + TP_printk( + "%s, device_addr : 0x%x, timestamp : %lld, alloc_len : 0x%x, filled_len : 0x%x, offset : 0x%x", + __entry->event_type, + __entry->device_addr, + __entry->timestamp, + __entry->alloc_len, + __entry->filled_len, + __entry->offset) +); + +DEFINE_EVENT(msm_v4l2_vidc_buffer_events, msm_v4l2_vidc_buffer_event_start, + + TP_PROTO(char *event_type, u32 device_addr, int64_t timestamp, + u32 alloc_len, u32 filled_len, u32 offset), + + TP_ARGS(event_type, device_addr, timestamp, alloc_len, + filled_len, offset) +); + +DEFINE_EVENT(msm_v4l2_vidc_buffer_events, msm_v4l2_vidc_buffer_event_end, + + TP_PROTO(char *event_type, u32 device_addr, int64_t timestamp, + u32 alloc_len, u32 filled_len, u32 offset), + + TP_ARGS(event_type, device_addr, timestamp, alloc_len, + filled_len, offset) +); + +DECLARE_EVENT_CLASS(msm_smem_buffer_dma_ops, + + TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask, + size_t size, u32 align, u32 flags, int map_kernel), + + TP_ARGS(buffer_op, buffer_type, heap_mask, size, align, + flags, map_kernel), + + TP_STRUCT__entry( + __field(char *, buffer_op) + __field(u32, buffer_type) + __field(u32, heap_mask) + __field(u32, size) + __field(u32, align) + __field(u32, flags) + __field(int, map_kernel) + ), + + TP_fast_assign( + __entry->buffer_op = buffer_op; + __entry->buffer_type = buffer_type; + __entry->heap_mask = heap_mask; + __entry->size = size; + __entry->align = align; + __entry->flags = flags; + __entry->map_kernel = map_kernel; + ), + + TP_printk( + "%s, buffer_type : 0x%x, heap_mask : 0x%x, size : 0x%x, align : 0x%x, flags : 0x%x, map_kernel : %d", + __entry->buffer_op, + __entry->buffer_type, + __entry->heap_mask, + __entry->size, + __entry->align, + __entry->flags, + __entry->map_kernel) +); + +DEFINE_EVENT(msm_smem_buffer_dma_ops, msm_smem_buffer_dma_op_start, + + TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask, + size_t size, u32 align, u32 flags, int map_kernel), + + TP_ARGS(buffer_op, buffer_type, heap_mask, size, align, + flags, map_kernel) +); + +DEFINE_EVENT(msm_smem_buffer_dma_ops, msm_smem_buffer_dma_op_end, + + TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask, + size_t size, u32 align, u32 flags, int map_kernel), + + TP_ARGS(buffer_op, buffer_type, heap_mask, size, align, + flags, map_kernel) +); + +DECLARE_EVENT_CLASS(msm_smem_buffer_iommu_ops, + + TP_PROTO(char *buffer_op, int domain_num, int partition_num, + unsigned long align, unsigned long iova, + unsigned long buffer_size), + + TP_ARGS(buffer_op, domain_num, partition_num, align, iova, buffer_size), + + TP_STRUCT__entry( + __field(char *, buffer_op) + __field(int, domain_num) + __field(int, partition_num) + __field(unsigned long, align) + __field(unsigned long, iova) + __field(unsigned long, buffer_size) + ), + + TP_fast_assign( + __entry->buffer_op = buffer_op; + __entry->domain_num = domain_num; + __entry->partition_num = partition_num; + __entry->align = align; + __entry->iova = iova; + __entry->buffer_size = buffer_size; + ), + + TP_printk( + "%s, domain : %d, partition : %d, align : %lx, iova : 0x%lx, buffer_size=%lx", + __entry->buffer_op, + __entry->domain_num, + __entry->partition_num, + __entry->align, + __entry->iova, + __entry->buffer_size) +); + +DEFINE_EVENT(msm_smem_buffer_iommu_ops, msm_smem_buffer_iommu_op_start, + + TP_PROTO(char *buffer_op, int domain_num, int partition_num, + unsigned long align, unsigned long iova, + unsigned long buffer_size), + + TP_ARGS(buffer_op, domain_num, partition_num, align, iova, buffer_size) +); + +DEFINE_EVENT(msm_smem_buffer_iommu_ops, msm_smem_buffer_iommu_op_end, + + TP_PROTO(char *buffer_op, int domain_num, int partition_num, + unsigned long align, unsigned long iova, + unsigned long buffer_size), + + TP_ARGS(buffer_op, domain_num, partition_num, align, iova, buffer_size) +); + +DECLARE_EVENT_CLASS(msm_vidc_perf, + + TP_PROTO(const char *name, unsigned long value), + + TP_ARGS(name, value), + + TP_STRUCT__entry( + __field(const char *, name) + __field(unsigned long, value) + ), + + TP_fast_assign( + __entry->name = name; + __entry->value = value; + ), + + TP_printk("%s %lu", __entry->name, __entry->value) +); + +DEFINE_EVENT(msm_vidc_perf, msm_vidc_perf_clock_scale, + + TP_PROTO(const char *clock_name, unsigned long frequency), + + TP_ARGS(clock_name, frequency) +); + +DEFINE_EVENT(msm_vidc_perf, msm_vidc_perf_bus_vote, + + TP_PROTO(const char *governor_mode, unsigned long ab), + + TP_ARGS(governor_mode, ab) +); + +#define MAX_TRACER_LOG_LENGTH 128 + +DECLARE_EVENT_CLASS(msm_v4l2_vidc_log, + + TP_PROTO(char *dummy, int length), + + TP_ARGS(dummy, length), + + TP_STRUCT__entry( + __array(char, dummy, MAX_TRACER_LOG_LENGTH) + __field(int, length) + ), + + TP_fast_assign( + __entry->length = length < MAX_TRACER_LOG_LENGTH ? + length : MAX_TRACER_LOG_LENGTH; + __entry->dummy[0] = '\0'; + if (__entry->length > 0) { + memcpy(__entry->dummy, dummy, __entry->length); + if (__entry->dummy[__entry->length - 1] == '\n') + __entry->dummy[__entry->length - 1] = '\0'; + } + ), + + TP_printk("%s", __entry->dummy) +); + +DEFINE_EVENT(msm_v4l2_vidc_log, msm_vidc_printf, + + TP_PROTO(char *dummy, int length), + + TP_ARGS(dummy, length) +); +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../techpack/video/msm/vidc + +#include diff --git a/techpack/video/msm/vidc/msm_vidc_internal.h b/techpack/video/msm/vidc/msm_vidc_internal.h new file mode 100644 index 000000000000..62e8c0b6a110 --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_internal.h @@ -0,0 +1,673 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _MSM_VIDC_INTERNAL_H_ +#define _MSM_VIDC_INTERNAL_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "msm_vidc.h" +#include "vidc/media/msm_media_info.h" +#include "vidc_hfi_api.h" +#include "vidc_hfi_helper.h" + +#define MSM_VIDC_DRV_NAME "msm_vidc_driver" + +/* kernel/msm-4.19 */ +#define MSM_VIDC_VERSION ((0 << 16) + (4 << 8) + 19) + +#define MAX_DEBUGFS_NAME 50 +#define DEFAULT_TIMEOUT 3 +#define DEFAULT_HEIGHT 240 +#define DEFAULT_WIDTH 320 +#define MIN_SUPPORTED_WIDTH 32 +#define MIN_SUPPORTED_HEIGHT 32 +#define DEFAULT_FPS 30 +#define MINIMUM_FPS 1 +#define MAXIMUM_FPS 960 +#define SINGLE_INPUT_BUFFER 1 +#define SINGLE_OUTPUT_BUFFER 1 +#define MAX_NUM_INPUT_BUFFERS VIDEO_MAX_FRAME // same as VB2_MAX_FRAME +#define MAX_NUM_OUTPUT_BUFFERS VIDEO_MAX_FRAME // same as VB2_MAX_FRAME + +#define MAX_SUPPORTED_INSTANCES 16 +#define MAX_SUPPORTED_INSTANCES_24 24 +#define MAX_BSE_VPP_DELAY 6 +#define DEFAULT_BSE_VPP_DELAY 2 + +/* Maintains the number of FTB's between each FBD over a window */ +#define DCVS_FTB_WINDOW 16 +/* Superframe can have maximum of 32 frames */ +#define VIDC_SUPERFRAME_MAX 32 +#define COLOR_RANGE_UNSPECIFIED (-1) + +#define V4L2_EVENT_VIDC_BASE 10 +#define INPUT_MPLANE V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE +#define OUTPUT_MPLANE V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE + +#define RATE_CONTROL_OFF (V4L2_MPEG_VIDEO_BITRATE_MODE_CQ + 1) +#define RATE_CONTROL_LOSSLESS (V4L2_MPEG_VIDEO_BITRATE_MODE_CQ + 2) +#define SYS_MSG_START HAL_SYS_INIT_DONE +#define SYS_MSG_END HAL_SYS_ERROR +#define SESSION_MSG_START HAL_SESSION_EVENT_CHANGE +#define SESSION_MSG_END HAL_SESSION_ERROR +#define SYS_MSG_INDEX(__msg) (__msg - SYS_MSG_START) +#define SESSION_MSG_INDEX(__msg) (__msg - SESSION_MSG_START) + +#define MAX_NAME_LENGTH 64 + +#define DB_DISABLE_SLICE_BOUNDARY \ + V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY + +#define NUM_MBS_PER_SEC(__height, __width, __fps) \ + (NUM_MBS_PER_FRAME(__height, __width) * __fps) + +#define NUM_MBS_PER_FRAME(__height, __width) \ + ((ALIGN(__height, 16) / 16) * (ALIGN(__width, 16) / 16)) + +#define call_core_op(c, op, ...) \ + (((c) && (c)->core_ops && (c)->core_ops->op) ? \ + ((c)->core_ops->op(__VA_ARGS__)) : 0) + +/* + * Convert Q16 number into Integer and Fractional part upto 2 places. + * Ex : 105752 / 65536 = 1.61; 1.61 in Q16 = 105752; + * Integer part = 105752 / 65536 = 1; + * Reminder = 105752 * 0xFFFF = 40216; Last 16 bits. + * Fractional part = 40216 * 100 / 65536 = 61; + * Now convert to FP(1, 61, 100). + */ +#define Q16_INT(q) ((q) >> 16) +#define Q16_FRAC(q) ((((q) & 0xFFFF) * 100) >> 16) + +struct msm_vidc_inst; + +enum vidc_ports { + INPUT_PORT, + OUTPUT_PORT, + MAX_PORT_NUM +}; + +enum vidc_core_state { + VIDC_CORE_UNINIT = 0, + VIDC_CORE_INIT, + VIDC_CORE_INIT_DONE, +}; + +/* + * Do not change the enum values unless + * you know what you are doing + */ +enum instance_state { + MSM_VIDC_CORE_UNINIT_DONE = 0x0001, + MSM_VIDC_CORE_INIT, + MSM_VIDC_CORE_INIT_DONE, + MSM_VIDC_OPEN, + MSM_VIDC_OPEN_DONE, + MSM_VIDC_LOAD_RESOURCES, + MSM_VIDC_LOAD_RESOURCES_DONE, + MSM_VIDC_START, + MSM_VIDC_START_DONE, + MSM_VIDC_STOP, + MSM_VIDC_STOP_DONE, + MSM_VIDC_RELEASE_RESOURCES, + MSM_VIDC_RELEASE_RESOURCES_DONE, + MSM_VIDC_CLOSE, + MSM_VIDC_CLOSE_DONE, + MSM_VIDC_CORE_UNINIT, + MSM_VIDC_CORE_INVALID +}; + +struct buf_info { + struct list_head list; + struct vb2_buffer *buf; +}; + +struct msm_vidc_list { + struct list_head list; + struct mutex lock; +}; + +static inline void INIT_MSM_VIDC_LIST(struct msm_vidc_list *mlist) +{ + mutex_init(&mlist->lock); + INIT_LIST_HEAD(&mlist->list); +} + +static inline void DEINIT_MSM_VIDC_LIST(struct msm_vidc_list *mlist) +{ + mutex_destroy(&mlist->lock); +} + +enum buffer_owner { + DRIVER, + FIRMWARE, + CLIENT, + MAX_OWNER +}; + +struct vidc_freq_data { + struct list_head list; + u32 device_addr; + unsigned long freq; + bool turbo; +}; + +struct vidc_input_cr_data { + struct list_head list; + u32 index; + u32 input_cr; +}; + +struct recon_buf { + struct list_head list; + u32 buffer_index; + u32 CR; + u32 CF; +}; + +struct eos_buf { + struct list_head list; + struct msm_smem smem; + u32 is_queued; +}; + +struct internal_buf { + struct list_head list; + enum hal_buffer buffer_type; + struct msm_smem smem; + enum buffer_owner buffer_ownership; + bool mark_remove; +}; + +struct msm_vidc_csc_coeff { + u32 *vpe_csc_custom_matrix_coeff; + u32 *vpe_csc_custom_bias_coeff; + u32 *vpe_csc_custom_limit_coeff; +}; + +struct msm_vidc_buf_data { + struct list_head list; + u32 index; + u32 input_tag; + u32 input_tag2; +}; + +struct msm_vidc_window_data { + struct list_head list; + u32 frame_size; + u32 etb_count; +}; + +struct msm_vidc_common_data { + char key[128]; + int value; +}; + +struct msm_vidc_codec_data { + u32 fourcc; + enum session_type session_type; + int vpp_cycles; + int vsp_cycles; + int low_power_cycles; +}; + +struct msm_vidc_codec_capability { + enum hal_capability capability_type; + enum hal_domain domains; + enum hal_video_codec codecs; + u32 min; + u32 max; + u32 step_size; + u32 default_value; +}; + +struct msm_vidc_vpss_capability { + u32 width; + u32 height; +}; + +struct msm_vidc_codec { + enum hal_domain domain; + enum hal_video_codec codec; +}; + +struct msm_vidc_timestamps { + struct list_head list; + s64 timestamp_us; + u32 framerate; + bool is_valid; + bool is_eos; +}; + +enum efuse_purpose { + SKU_VERSION = 0, +}; + +enum sku_version { + SKU_VERSION_0 = 0, + SKU_VERSION_1, + SKU_VERSION_2, +}; + +struct msm_vidc_efuse_data { + u32 start_address; + u32 size; + u32 mask; + u32 shift; + enum efuse_purpose purpose; +}; + +enum vpu_version { + VPU_VERSION_AR50 = 1, + VPU_VERSION_IRIS1, + VPU_VERSION_IRIS2, + VPU_VERSION_IRIS2_1, + VPU_VERSION_AR50_LITE, +}; + +struct msm_vidc_ubwc_config_data { + struct { + u32 max_channel_override : 1; + u32 mal_length_override : 1; + u32 hb_override : 1; + u32 bank_swzl_level_override : 1; + u32 bank_spreading_override : 1; + u32 reserved : 27; + } override_bit_info; + + u32 max_channels; + u32 mal_length; + u32 highest_bank_bit; + u32 bank_swzl_level; + u32 bank_spreading; +}; + +struct msm_vidc_platform_data { + struct msm_vidc_common_data *common_data; + unsigned int common_data_length; + struct msm_vidc_codec_data *codec_data; + unsigned int codec_data_length; + struct allowed_clock_rates_table *clock_data; + unsigned int clock_data_length; + struct msm_vidc_codec *codecs; + uint32_t codecs_count; + struct msm_vidc_codec_capability *codec_caps; + uint32_t codec_caps_count; + struct msm_vidc_vpss_capability *vpss_caps; + uint32_t vpss_caps_count; + struct msm_vidc_csc_coeff csc_data; + struct msm_vidc_efuse_data *efuse_data; + unsigned int efuse_data_length; + unsigned int sku_version; + uint32_t vpu_ver; + uint32_t num_vpp_pipes; + struct msm_vidc_ubwc_config_data *ubwc_config; + uint32_t max_inst_count; +}; + +struct msm_vidc_format_desc { + char name[MAX_NAME_LENGTH]; + u8 description[32]; + u32 fourcc; +}; + +struct msm_vidc_format { + char name[MAX_NAME_LENGTH]; + u8 description[32]; + u32 count_min; + u32 count_min_host; + u32 count_actual; + struct v4l2_format v4l2_fmt; +}; + +struct msm_vidc_format_constraint { + u32 fourcc; + u32 num_planes; + u32 y_max_stride; + u32 y_buffer_alignment; + u32 uv_max_stride; + u32 uv_buffer_alignment; +}; + +struct log_cookie { + u32 used; + u32 session_type; + u32 codec_type; + char name[20]; +}; + +struct msm_vidc_drv { + struct mutex lock; + struct list_head cores; + int num_cores; + struct dentry *debugfs_root; + int thermal_level; + u32 sku_version; + struct log_cookie *ctxt; + u32 num_ctxt; +}; + +struct msm_video_device { + int type; + struct video_device vdev; +}; + +struct session_prop { + u32 fps; + u32 bitrate; + u32 extradata_ctrls; +}; + +struct buf_queue { + struct vb2_queue vb2_bufq; + struct mutex lock; +}; + +enum profiling_points { + SYS_INIT = 0, + SESSION_INIT, + LOAD_RESOURCES, + FRAME_PROCESSING, + FW_IDLE, + MAX_PROFILING_POINTS, +}; + +struct buf_count { + int etb; + int ftb; + int fbd; + int ebd; +}; + +struct batch_mode { + bool enable; + u32 size; +}; + +enum dcvs_flags { + MSM_VIDC_DCVS_INCR = BIT(0), + MSM_VIDC_DCVS_DECR = BIT(1), +}; + +struct clock_data { + int buffer_counter; + int min_threshold; + int nom_threshold; + int max_threshold; + bool dcvs_mode; + u32 dcvs_window; + unsigned long bitrate; + unsigned long min_freq; + unsigned long curr_freq; + u32 vpss_cycles; + u32 ise_cycles; + u32 ddr_bw; + u32 sys_cache_bw; + u32 operating_rate; + struct msm_vidc_codec_data *entry; + u32 core_id; + u32 dpb_fourcc; + u32 opb_fourcc; + u32 work_mode; + bool low_latency_mode; + bool is_legacy_cbr; + u32 work_route; + u32 dcvs_flags; + u32 frame_rate; +}; + +struct vidc_bus_vote_data { + u32 sid; + enum hal_domain domain; + enum hal_video_codec codec; + u32 color_formats[2]; + int num_formats; /* 1 = DPB-OPB unified; 2 = split */ + int input_height, input_width, bitrate; + int output_height, output_width; + int rotation; + int compression_ratio; + int complexity_factor; + int input_cr; + unsigned int lcu_size; + unsigned int fps; + enum msm_vidc_power_mode power_mode; + u32 work_mode; + bool use_sys_cache; + bool b_frames_enabled; + unsigned long calc_bw_ddr; + unsigned long calc_bw_llcc; + u32 num_vpp_pipes; +}; + +struct profile_data { + int start; + int stop; + int cumulative; + char name[64]; + int sampling; + int average; +}; + +struct msm_vidc_debug { + struct profile_data pdata[MAX_PROFILING_POINTS]; + int profile; + int samples; +}; + +enum msm_vidc_modes { + VIDC_SECURE = BIT(0), + VIDC_TURBO = BIT(1), + VIDC_THUMBNAIL = BIT(2), + VIDC_LOW_POWER = BIT(3), +}; + +struct msm_vidc_core_ops { + unsigned long (*calc_freq)(struct msm_vidc_inst *inst, u32 filled_len); + int (*decide_work_route)(struct msm_vidc_inst *inst); + int (*decide_work_mode)(struct msm_vidc_inst *inst); + int (*decide_core_and_power_mode)(struct msm_vidc_inst *inst); + int (*calc_bw)(struct vidc_bus_vote_data *vidc_data); +}; + +struct msm_vidc_ssr { + enum hal_ssr_trigger_type ssr_type; + u32 sub_client_id; + u32 test_addr; +}; + +struct msm_vidc_core { + struct list_head list; + struct mutex lock; + int id; + struct hfi_device *device; + struct msm_vidc_platform_data *platform_data; + struct msm_video_device vdev[MSM_VIDC_MAX_DEVICES]; + struct v4l2_device v4l2_dev; + struct list_head instances; + struct dentry *debugfs_root; + enum vidc_core_state state; + struct completion completions[SYS_MSG_END - SYS_MSG_START + 1]; + enum msm_vidc_hfi_type hfi_type; + struct msm_vidc_platform_resources resources; + struct msm_vidc_capability *capabilities; + struct delayed_work fw_unload_work; + struct work_struct ssr_work; + struct workqueue_struct *vidc_core_workq; + struct msm_vidc_ssr ssr; + bool smmu_fault_handled; + bool trigger_ssr; + unsigned long min_freq; + unsigned long curr_freq; + struct msm_vidc_core_ops *core_ops; +}; + +struct msm_vidc_inst; +struct msm_vidc_inst_smem_ops { + int (*smem_map_dma_buf)(struct msm_vidc_inst *inst, + struct msm_smem *smem); + int (*smem_unmap_dma_buf)(struct msm_vidc_inst *inst, + struct msm_smem *smem); + int (*smem_prefetch)(struct msm_vidc_inst *inst); + int (*smem_drain)(struct msm_vidc_inst *inst); +}; + +struct msm_vidc_inst { + struct list_head list; + struct mutex sync_lock, lock; + struct msm_vidc_core *core; + enum session_type session_type; + void *session; + u32 sid; + struct session_prop prop; + enum instance_state state; + struct msm_vidc_format fmts[MAX_PORT_NUM]; + struct buf_queue bufq[MAX_PORT_NUM]; + struct msm_vidc_list input_crs; + struct msm_vidc_list scratchbufs; + struct msm_vidc_list persistbufs; + struct msm_vidc_list pending_getpropq; + struct msm_vidc_list outputbufs; + struct msm_vidc_list refbufs; + struct msm_vidc_list eosbufs; + struct msm_vidc_list registeredbufs; + struct msm_vidc_list etb_data; + struct msm_vidc_list fbd_data; + struct msm_vidc_list window_data; + struct msm_vidc_list client_data; + struct msm_vidc_list timestamps; + struct buffer_requirements buff_req; + struct vidc_frame_data superframe_data[VIDC_SUPERFRAME_MAX]; + struct v4l2_ctrl_handler ctrl_handler; + struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1]; + struct v4l2_fh event_handler; + struct msm_smem *extradata_handle; + bool in_reconfig; + struct dentry *debugfs_root; + void *priv; + struct msm_vidc_debug debug; + struct buf_count count; + struct clock_data clk_data; + struct vidc_bus_vote_data bus_data; + enum msm_vidc_modes flags; + struct msm_vidc_capability capability; + u32 buffer_size_limit; + enum buffer_mode_type buffer_mode_set[MAX_PORT_NUM]; + enum multi_stream stream_output_mode; + struct v4l2_ctrl **ctrls; + u32 num_ctrls; + int bit_depth; + struct kref kref; + bool in_flush; + bool out_flush; + bool flush_timestamps; + u32 pic_struct; + u32 colour_space; + u32 profile; + u32 level; + u32 entropy_mode; + u32 rc_type; + u32 hybrid_hp; + u32 layer_bitrate; + u32 client_set_ctrls; + bool static_rotation_flip_enabled; + bool external_blur; + struct internal_buf *dpb_extra_binfo; + struct msm_vidc_codec_data *codec_data; + bool hdr10_sei_enabled; + struct hal_hdr10_pq_sei hdr10_sei_params; + struct batch_mode batch; + struct delayed_work batch_work; + struct msm_vidc_inst_smem_ops *smem_ops; + enum memory_ops memory_ops; + struct memory_regions regions; + int (*buffer_size_calculators)(struct msm_vidc_inst *inst); + bool all_intra; + bool is_perf_eligible_session; + u32 max_filled_len; + int full_range; + struct mutex ubwc_stats_lock; + struct msm_vidc_ubwc_stats ubwc_stats; + u32 bse_vpp_delay; + u32 first_reconfig_done; + u64 last_qbuf_time_ns; + bool active; + bool has_bframe; + bool boost_enabled; + bool boost_qp_enabled; + u32 boost_min_qp; + u32 boost_max_qp; +}; + +extern struct msm_vidc_drv *vidc_driver; + +struct msm_vidc_ctrl { + u32 id; + char name[MAX_NAME_LENGTH]; + enum v4l2_ctrl_type type; + s64 minimum; + s64 maximum; + s64 default_value; + u32 step; + u32 menu_skip_mask; + u32 flags; + const char * const *qmenu; +}; + +void handle_cmd_response(enum hal_command_response cmd, void *data); +int msm_vidc_trigger_ssr(struct msm_vidc_core *core, + u64 trigger_ssr_val); +int msm_vidc_noc_error_info(struct msm_vidc_core *core); +int msm_vidc_check_session_supported(struct msm_vidc_inst *inst); +int msm_vidc_check_scaling_supported(struct msm_vidc_inst *inst); +void msm_vidc_queue_v4l2_event(struct msm_vidc_inst *inst, int event_type); + +enum msm_vidc_flags { + MSM_VIDC_FLAG_DEFERRED = BIT(0), + MSM_VIDC_FLAG_RBR_PENDING = BIT(1), + MSM_VIDC_FLAG_QUEUED = BIT(2), +}; + +struct msm_vidc_buffer { + struct list_head list; + struct kref kref; + struct msm_smem smem[VIDEO_MAX_PLANES]; + struct vb2_v4l2_buffer vvb; + enum msm_vidc_flags flags; +}; + +void msm_comm_handle_thermal_event(void); +int msm_smem_alloc(size_t size, u32 align, u32 flags, + enum hal_buffer buffer_type, int map_kernel, + void *res, u32 session_type, struct msm_smem *smem, u32 sid); +int msm_smem_free(struct msm_smem *smem, u32 sid); + +struct context_bank_info *msm_smem_get_context_bank(u32 session_type, + bool is_secure, struct msm_vidc_platform_resources *res, + enum hal_buffer buffer_type, u32 sid); +int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem); +int msm_smem_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem); +struct dma_buf *msm_smem_get_dma_buf(int fd, u32 sid); +void msm_smem_put_dma_buf(void *dma_buf, u32 sid); +int msm_smem_cache_operations(struct dma_buf *dbuf, + enum smem_cache_ops cache_op, unsigned long offset, + unsigned long size, u32 sid); +int msm_smem_memory_prefetch(struct msm_vidc_inst *inst); +int msm_smem_memory_drain(struct msm_vidc_inst *inst); +void msm_vidc_fw_unload_handler(struct work_struct *work); +void msm_vidc_ssr_handler(struct work_struct *work); +/* + * XXX: normally should be in msm_vidc.h, but that's meant for public APIs, + * whereas this is private + */ +int msm_vidc_destroy(struct msm_vidc_inst *inst); +void *vidc_get_drv_data(struct device *dev); +#endif diff --git a/techpack/video/msm/vidc/msm_vidc_platform.c b/techpack/video/msm/vidc/msm_vidc_platform.c new file mode 100644 index 000000000000..740c5d0587ff --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_platform.c @@ -0,0 +1,2512 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include "msm_vidc_internal.h" +#include "msm_vidc_debug.h" + + +#define DDR_TYPE_LPDDR4 0x6 +#define DDR_TYPE_LPDDR4X 0x7 +#define DDR_TYPE_LPDDR5 0x8 +#define DDR_TYPE_LPDDR5X 0x9 + +#define CODEC_ENTRY(n, p, vsp, vpp, lp) \ +{ \ + .fourcc = n, \ + .session_type = p, \ + .vsp_cycles = vsp, \ + .vpp_cycles = vpp, \ + .low_power_cycles = lp \ +} + +#define EFUSE_ENTRY(sa, s, m, sh, p) \ +{ \ + .start_address = sa, \ + .size = s, \ + .mask = m, \ + .shift = sh, \ + .purpose = p \ +} + +#define UBWC_CONFIG(mco, mlo, hbo, bslo, bso, rs, mc, ml, hbb, bsl, bsp) \ +{ \ + .override_bit_info.max_channel_override = mco, \ + .override_bit_info.mal_length_override = mlo, \ + .override_bit_info.hb_override = hbo, \ + .override_bit_info.bank_swzl_level_override = bslo, \ + .override_bit_info.bank_spreading_override = bso, \ + .override_bit_info.reserved = rs, \ + .max_channels = mc, \ + .mal_length = ml, \ + .highest_bank_bit = hbb, \ + .bank_swzl_level = bsl, \ + .bank_spreading = bsp, \ +} + +static struct msm_vidc_codec_data default_codec_data[] = { + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_ENCODER, 125, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_DECODER, 125, 675, 320), +}; + +/* Update with Lahaina data */ +static struct msm_vidc_codec_data lahaina_codec_data[] = { + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_ENCODER, 25, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_ENCODER, 25, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_MPEG2, MSM_VIDC_DECODER, 25, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_DECODER, 25, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_DECODER, 25, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_VP9, MSM_VIDC_DECODER, 60, 200, 200), +}; + +static struct msm_vidc_codec_data bengal_codec_data[] = { + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_ENCODER, 0, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_ENCODER, 0, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_DECODER, 0, 440, 440), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_DECODER, 0, 440, 440), + CODEC_ENTRY(V4L2_PIX_FMT_VP9, MSM_VIDC_DECODER, 0, 440, 440), +}; + +static struct msm_vidc_codec_data shima_codec_data[] = { + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_ENCODER, 25, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_ENCODER, 25, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_MPEG2, MSM_VIDC_DECODER, 25, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_DECODER, 25, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_DECODER, 25, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_VP9, MSM_VIDC_DECODER, 60, 200, 200), +}; + +static struct msm_vidc_codec_data holi_codec_data[] = { + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_ENCODER, 0, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_ENCODER, 0, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_DECODER, 0, 440, 440), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_DECODER, 0, 440, 440), + CODEC_ENTRY(V4L2_PIX_FMT_VP9, MSM_VIDC_DECODER, 0, 440, 440), +}; + +static struct msm_vidc_codec_data scuba_codec_data[] = { + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_ENCODER, 0, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_ENCODER, 0, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_DECODER, 0, 440, 440), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_DECODER, 0, 440, 440), + CODEC_ENTRY(V4L2_PIX_FMT_VP9, MSM_VIDC_DECODER, 0, 440, 440), +}; + +static struct msm_vidc_codec_data yupik_codec_data[] = { + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_ENCODER, 25, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_ENCODER, 25, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_MPEG2, MSM_VIDC_DECODER, 25, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_DECODER, 25, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_DECODER, 25, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_VP9, MSM_VIDC_DECODER, 60, 200, 200), +}; + +#define ENC HAL_VIDEO_DOMAIN_ENCODER +#define DEC HAL_VIDEO_DOMAIN_DECODER +#define H264 HAL_VIDEO_CODEC_H264 +#define HEVC HAL_VIDEO_CODEC_HEVC +#define VP9 HAL_VIDEO_CODEC_VP9 +#define MPEG2 HAL_VIDEO_CODEC_MPEG2 +#define DOMAINS_ALL (HAL_VIDEO_DOMAIN_ENCODER | HAL_VIDEO_DOMAIN_DECODER) +#define CODECS_ALL (HAL_VIDEO_CODEC_H264 | HAL_VIDEO_CODEC_HEVC | \ + HAL_VIDEO_CODEC_VP9 | HAL_VIDEO_CODEC_MPEG2) + +static struct msm_vidc_codec bengal_codecs[] = { + /* {domain, codec} */ + {DEC, H264}, {DEC, HEVC}, {DEC, VP9}, + {ENC, H264}, {ENC, HEVC}, +}; + +static struct msm_vidc_codec shima_codecs[] = { + /* {domain, codec} */ + {DEC, H264}, {DEC, HEVC}, {DEC, VP9}, {DEC, MPEG2}, + {ENC, H264}, {ENC, HEVC}, +}; + +static struct msm_vidc_codec holi_codecs[] = { + /* {domain, codec} */ + {DEC, H264}, {DEC, HEVC}, {DEC, VP9}, + {ENC, H264}, {ENC, HEVC}, +}; + +static struct msm_vidc_codec scuba_codecs[] = { + /* {domain, codec} */ + {DEC, H264}, {DEC, HEVC}, {DEC, VP9}, + {ENC, H264}, {ENC, HEVC}, +}; + +static struct msm_vidc_codec default_codecs[] = { + /* {domain, codec} */ + {DEC, H264}, {DEC, HEVC}, {DEC, VP9}, {DEC, MPEG2}, + {ENC, H264}, {ENC, HEVC}, +}; + +static struct msm_vidc_codec_capability bengal_capabilities_v0[] = { + /* {cap_type, domains, codecs, min, max, step_size, default_value} */ + {CAP_FRAME_WIDTH, DEC, CODECS_ALL, 128, 1920, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, CODECS_ALL, 128, 1920, 1, 1080}, + {CAP_FRAME_WIDTH, ENC, CODECS_ALL, 128, 1920, 2, 1920}, + {CAP_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 1920, 2, 1080}, + /* ((1920 * 1088) / 256) */ + {CAP_MBS_PER_FRAME, DOMAINS_ALL, CODECS_ALL, 64, 8160, 1, 8160}, + /* 1080@30 decode + 1080@30 encode */ + {CAP_MBS_PER_SECOND, DOMAINS_ALL, CODECS_ALL, 64, 489600, 1, 244800}, + {CAP_FRAMERATE, DOMAINS_ALL, CODECS_ALL, 1, 120, 1, 30}, + {CAP_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 60000000, 1, 20000000}, + {CAP_HIER_P_NUM_ENH_LAYERS, ENC, H264|HEVC, 0, 6, 1, 0}, + {CAP_LTR_COUNT, ENC, H264|HEVC, 0, 4, 1, 0}, + /* ((1920 * 1088) / 256) * 30 fps */ + {CAP_MBS_PER_SECOND_POWER_SAVE, ENC, CODECS_ALL, + 0, 244800, 1, 244800}, + {CAP_CABAC_BITRATE, ENC, H264, 1, 60000000, 1, 20000000}, + {CAP_I_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 10}, + {CAP_P_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + {CAP_B_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + + /* 10 slices */ + {CAP_SLICE_BYTE, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_SLICE_MB, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_MAX_VIDEOCORES, DOMAINS_ALL, CODECS_ALL, 0, 1, 1, 1}, + + /* Secure usecase specific */ + {CAP_SECURE_FRAME_WIDTH, DEC, CODECS_ALL, 128, 1920, 1, 1920}, + {CAP_SECURE_FRAME_HEIGHT, DEC, CODECS_ALL, 128, 1920, 1, 1080}, + {CAP_SECURE_FRAME_WIDTH, ENC, CODECS_ALL, 128, 1920, 2, 1920}, + {CAP_SECURE_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 1920, 2, 1080}, + /* (1920 * 1088) / 256 */ + {CAP_SECURE_MBS_PER_FRAME, DOMAINS_ALL, CODECS_ALL, 64, 8160, 1, 8160}, + {CAP_SECURE_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 35000000, 1, 20000000}, + + /* Image specific */ + {CAP_HEVC_IMAGE_FRAME_WIDTH, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEVC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEIC_IMAGE_FRAME_WIDTH, ENC, HEVC, 512, 8192, 2, 8192}, + {CAP_HEIC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 512, 8192, 2, 8192}, + + /* Level for AVC and HEVC encoder specific. + * Default for levels is UNKNOWN value. But if we use unknown + * value here to set as default, max value needs to be set to + * unknown as well, which creates a problem of allowing client + * to set higher level than supported + */ + {CAP_H264_LEVEL, ENC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_0, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_1}, + {CAP_HEVC_LEVEL, ENC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1}, + + /* Level for AVC and HEVC decoder specific */ + {CAP_H264_LEVEL, DEC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_0, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_1}, + {CAP_HEVC_LEVEL, DEC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1}, +}; + +static struct msm_vidc_codec_capability bengal_capabilities_v1[] = { + /* {cap_type, domains, codecs, min, max, step_size, default_value} */ + {CAP_FRAME_WIDTH, DEC, CODECS_ALL, 128, 1920, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, CODECS_ALL, 128, 1920, 1, 1080}, + {CAP_FRAME_WIDTH, ENC, CODECS_ALL, 128, 1920, 2, 1920}, + {CAP_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 1920, 2, 1080}, + /* ((1920 * 1088) / 256) */ + {CAP_MBS_PER_FRAME, DOMAINS_ALL, CODECS_ALL, 64, 8160, 1, 8160}, + /* 1920*1088 @30fps */ + {CAP_MBS_PER_SECOND, DOMAINS_ALL, CODECS_ALL, 64, 244800, 1, 244800}, + {CAP_FRAMERATE, DOMAINS_ALL, CODECS_ALL, 1, 120, 1, 30}, + {CAP_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 60000000, 1, 20000000}, + {CAP_HIER_P_NUM_ENH_LAYERS, ENC, H264|HEVC, 0, 6, 1, 0}, + {CAP_LTR_COUNT, ENC, H264|HEVC, 0, 4, 1, 0}, + /* ((1920 * 1088) / 256) * 30 fps */ + {CAP_MBS_PER_SECOND_POWER_SAVE, ENC, CODECS_ALL, + 0, 244800, 1, 244800}, + {CAP_CABAC_BITRATE, ENC, H264, 1, 60000000, 1, 20000000}, + {CAP_I_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 10}, + {CAP_P_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + {CAP_B_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + + /* 10 slices */ + {CAP_SLICE_BYTE, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_SLICE_MB, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_MAX_VIDEOCORES, DOMAINS_ALL, CODECS_ALL, 0, 1, 1, 1}, + + /* Secure usecase specific */ + {CAP_SECURE_FRAME_WIDTH, DEC, CODECS_ALL, 128, 1920, 1, 1920}, + {CAP_SECURE_FRAME_HEIGHT, DEC, CODECS_ALL, 128, 1920, 1, 1080}, + {CAP_SECURE_FRAME_WIDTH, ENC, CODECS_ALL, 128, 1920, 2, 1920}, + {CAP_SECURE_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 1920, 2, 1080}, + /* (1920 * 1088) / 256 */ + {CAP_SECURE_MBS_PER_FRAME, DOMAINS_ALL, CODECS_ALL, 64, 8160, 1, 8160}, + {CAP_SECURE_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 35000000, 1, 20000000}, + + /* Image specific */ + {CAP_HEVC_IMAGE_FRAME_WIDTH, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEVC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEIC_IMAGE_FRAME_WIDTH, ENC, HEVC, 512, 8192, 2, 8192}, + {CAP_HEIC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 512, 8192, 2, 8192}, + + /* Level for AVC and HEVC encoder specific. + * Default for levels is UNKNOWN value. But if we use unknown + * value here to set as default, max value needs to be set to + * unknown as well, which creates a problem of allowing client + * to set higher level than supported + */ + {CAP_H264_LEVEL, ENC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_0, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_1}, + {CAP_HEVC_LEVEL, ENC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1}, + + /* Level for AVC and HEVC decoder specific */ + {CAP_H264_LEVEL, DEC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_0, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_1}, + {CAP_HEVC_LEVEL, DEC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1}, +}; + +static struct msm_vidc_codec_capability holi_capabilities[] = { + /* {cap_type, domains, codecs, min, max, step_size, default_value} */ + {CAP_FRAME_WIDTH, DEC, CODECS_ALL, 96, 1920, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 1920, 1, 1080}, + {CAP_FRAME_WIDTH, ENC, CODECS_ALL, 128, 1920, 2, 1920}, + {CAP_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 1920, 2, 1080}, + /* ((1920 * 1088) / 256) */ + {CAP_MBS_PER_FRAME, DEC, CODECS_ALL, 36, 8160, 1, 8160}, + {CAP_MBS_PER_FRAME, ENC, CODECS_ALL, 64, 8160, 1, 8160}, + /* 1080@30 decode + 1080@30 encode */ + {CAP_MBS_PER_SECOND, DEC, CODECS_ALL, 36, 489600, 1, 244800}, + {CAP_MBS_PER_SECOND, ENC, CODECS_ALL, 64, 489600, 1, 244800}, + {CAP_FRAMERATE, DOMAINS_ALL, CODECS_ALL, 1, 120, 1, 30}, + {CAP_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 60000000, 1, 20000000}, + {CAP_CABAC_BITRATE, ENC, H264, 1, 60000000, 1, 20000000}, + {CAP_HIER_P_NUM_ENH_LAYERS, ENC, H264|HEVC, 0, 6, 1, 0}, + {CAP_LTR_COUNT, ENC, H264|HEVC, 0, 4, 1, 0}, + /* ((1920 * 1088) / 256) * 30 fps */ + {CAP_MBS_PER_SECOND_POWER_SAVE, ENC, CODECS_ALL, + 0, 244800, 1, 244800}, + {CAP_I_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 10}, + {CAP_P_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + {CAP_B_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + + /* 10 slices */ + {CAP_SLICE_BYTE, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_SLICE_MB, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_MAX_VIDEOCORES, DOMAINS_ALL, CODECS_ALL, 0, 1, 1, 1}, + + /* Secure usecase specific */ + {CAP_SECURE_FRAME_WIDTH, DEC, CODECS_ALL, 96, 1920, 1, 1920}, + {CAP_SECURE_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 1920, 1, 1080}, + {CAP_SECURE_FRAME_WIDTH, ENC, CODECS_ALL, 128, 1920, 2, 1920}, + {CAP_SECURE_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 1920, 2, 1080}, + /* (1920 * 1088) / 256 */ + {CAP_SECURE_MBS_PER_FRAME, DOMAINS_ALL, CODECS_ALL, 64, 8160, 1, 8160}, + {CAP_SECURE_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 35000000, 1, 20000000}, + + /* All intra encoding usecase specific */ + {CAP_ALLINTRA_MAX_FPS, ENC, H264|HEVC, 1, 60, 1, 30}, + + /* Image specific */ + {CAP_HEVC_IMAGE_FRAME_WIDTH, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEVC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEIC_IMAGE_FRAME_WIDTH, ENC, HEVC, 512, 8192, 2, 8192}, + {CAP_HEIC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 512, 8192, 2, 8192}, + + /* Level for AVC and HEVC encoder specific. + * Default for levels is UNKNOWN value. But if we use unknown + * value here to set as default, max value needs to be set to + * unknown as well, which creates a problem of allowing client + * to set higher level than supported + */ + {CAP_H264_LEVEL, ENC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_0, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_1}, + {CAP_HEVC_LEVEL, ENC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1}, + + /* Level for AVC and HEVC decoder specific */ + {CAP_H264_LEVEL, DEC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_0, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_1}, + {CAP_HEVC_LEVEL, DEC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1}, +}; + +static struct msm_vidc_codec_capability scuba_capabilities[] = { + /* {cap_type, domains, codecs, min, max, step_size, default_value} */ + {CAP_FRAME_WIDTH, DEC, CODECS_ALL, 96, 1920, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 1920, 1, 1080}, + {CAP_FRAME_WIDTH, ENC, CODECS_ALL, 128, 1920, 2, 1920}, + {CAP_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 1920, 2, 1080}, + /* ((1920 * 1088) / 256) */ + {CAP_MBS_PER_FRAME, DEC, CODECS_ALL, 36, 8160, 1, 8160}, + {CAP_MBS_PER_FRAME, ENC, CODECS_ALL, 64, 8160, 1, 8160}, + /* 1080@30 decode + 1080@30 encode */ + {CAP_MBS_PER_SECOND, DEC, CODECS_ALL, 36, 489600, 1, 244800}, + {CAP_MBS_PER_SECOND, ENC, CODECS_ALL, 64, 489600, 1, 244800}, + {CAP_FRAMERATE, DOMAINS_ALL, CODECS_ALL, 1, 120, 1, 30}, + {CAP_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 60000000, 1, 20000000}, + {CAP_CABAC_BITRATE, ENC, H264, 1, 60000000, 1, 20000000}, + {CAP_HIER_P_NUM_ENH_LAYERS, ENC, H264|HEVC, 0, 6, 1, 0}, + {CAP_LTR_COUNT, ENC, H264|HEVC, 0, 4, 1, 0}, + /* ((1920 * 1088) / 256) * 30 fps */ + {CAP_MBS_PER_SECOND_POWER_SAVE, ENC, CODECS_ALL, + 0, 244800, 1, 244800}, + {CAP_I_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 10}, + {CAP_P_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + {CAP_B_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + + /* 10 slices */ + {CAP_SLICE_BYTE, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_SLICE_MB, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_MAX_VIDEOCORES, DOMAINS_ALL, CODECS_ALL, 0, 1, 1, 1}, + + /* Secure usecase specific */ + {CAP_SECURE_FRAME_WIDTH, DEC, CODECS_ALL, 96, 1920, 1, 1920}, + {CAP_SECURE_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 1920, 1, 1080}, + {CAP_SECURE_FRAME_WIDTH, ENC, CODECS_ALL, 128, 1920, 2, 1920}, + {CAP_SECURE_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 1920, 2, 1080}, + /* (1920 * 1088) / 256 */ + {CAP_SECURE_MBS_PER_FRAME, DOMAINS_ALL, CODECS_ALL, 64, 8160, 1, 8160}, + {CAP_SECURE_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 35000000, 1, 20000000}, + + /* All intra encoding usecase specific */ + {CAP_ALLINTRA_MAX_FPS, ENC, H264|HEVC, 1, 60, 1, 30}, + + /* Image specific */ + {CAP_HEVC_IMAGE_FRAME_WIDTH, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEVC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEIC_IMAGE_FRAME_WIDTH, ENC, HEVC, 512, 8192, 2, 8192}, + {CAP_HEIC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 512, 8192, 2, 8192}, + + /* Level for AVC and HEVC encoder specific. + * Default for levels is UNKNOWN value. But if we use unknown + * value here to set as default, max value needs to be set to + * unknown as well, which creates a problem of allowing client + * to set higher level than supported + */ + {CAP_H264_LEVEL, ENC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_0, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_1}, + {CAP_HEVC_LEVEL, ENC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1}, + + /* Level for AVC and HEVC decoder specific */ + {CAP_H264_LEVEL, DEC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_0, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_1}, + {CAP_HEVC_LEVEL, DEC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1}, +}; + +static struct msm_vidc_codec_capability lahaina_capabilities[] = { + /* {cap_type, domains, codecs, min, max, step_size, default_value,} */ + {CAP_FRAME_WIDTH, DEC, CODECS_ALL, 96, 8192, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 8192, 1, 1080}, + {CAP_FRAME_WIDTH, ENC, CODECS_ALL, 128, 8192, 2, 1920}, + {CAP_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 8192, 2, 1080}, + /* (8192 * 4320) / 256 */ + {CAP_MBS_PER_FRAME, DEC, CODECS_ALL, 36, 138240, 1, 138240}, + {CAP_MBS_PER_FRAME, ENC, CODECS_ALL, 64, 138240, 1, 138240}, + /* ((1920 * 1088) / 256) * 960 fps */ + {CAP_MBS_PER_SECOND, DEC, CODECS_ALL, 36, 7833600, 1, 7833600}, + {CAP_MBS_PER_SECOND, ENC, CODECS_ALL, 64, 7833600, 1, 7833600}, + {CAP_FRAMERATE, DOMAINS_ALL, CODECS_ALL, 1, 960, 1, 30}, + {CAP_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 220000000, 1, 20000000}, + {CAP_BITRATE, ENC, HEVC, 1, 160000000, 1, 20000000}, + {CAP_CABAC_BITRATE, ENC, H264, 1, 160000000, 1, 20000000}, + {CAP_SCALE_X, ENC, CODECS_ALL, 8192, 65536, 1, 8192}, + {CAP_SCALE_Y, ENC, CODECS_ALL, 8192, 65536, 1, 8192}, + {CAP_SCALE_X, DEC, CODECS_ALL, 65536, 65536, 1, 65536}, + {CAP_SCALE_Y, DEC, CODECS_ALL, 65536, 65536, 1, 65536}, + {CAP_BFRAME, ENC, H264|HEVC, 0, 1, 1, 0}, + {CAP_HIER_P_NUM_ENH_LAYERS, ENC, H264|HEVC, 0, 6, 1, 0}, + {CAP_LTR_COUNT, ENC, H264|HEVC, 0, 2, 1, 0}, + /* ((4096 * 2304) / 256) * 60 fps */ + {CAP_MBS_PER_SECOND_POWER_SAVE, ENC, CODECS_ALL, + 0, 2211840, 1, 2211840}, + {CAP_I_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 10}, + {CAP_P_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + {CAP_B_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + {CAP_I_FRAME_QP, ENC, VP9, 0, 127, 1, 20}, + {CAP_P_FRAME_QP, ENC, VP9, 0, 127, 1, 40}, + {CAP_B_FRAME_QP, ENC, VP9, 0, 127, 1, 40}, + /* 10 slices */ + {CAP_SLICE_BYTE, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_SLICE_MB, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_MAX_VIDEOCORES, DOMAINS_ALL, CODECS_ALL, 0, 1, 1, 1}, + + /* Mpeg2 decoder specific */ + {CAP_FRAME_WIDTH, DEC, MPEG2, 96, 1920, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, MPEG2, 96, 1920, 1, 1080}, + /* (1920 * 1088) / 256 */ + {CAP_MBS_PER_FRAME, DEC, MPEG2, 36, 8160, 1, 8160}, + /* ((1920 * 1088) / 256) * 30*/ + {CAP_MBS_PER_SECOND, DEC, MPEG2, 36, 244800, 1, 244800}, + {CAP_FRAMERATE, DEC, MPEG2, 1, 30, 1, 30}, + {CAP_BITRATE, DEC, MPEG2, 1, 40000000, 1, 20000000}, + + /* VP9 decoder-specific */ + {CAP_FRAME_WIDTH, DEC, VP9, 96, 4096, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, VP9, 96, 4096, 1, 1080}, + /* (4096 * 2304) / 256 */ + {CAP_MBS_PER_FRAME, DEC, VP9, 36, 36864, 1, 36864}, + /* ((4096 * 2304) / 256) * 60*/ + {CAP_MBS_PER_SECOND, DEC, VP9, 36, 2211840, 1, 2211840}, + {CAP_FRAMERATE, DEC, VP9, 1, 60, 1, 60}, + {CAP_BITRATE, DEC, VP9, 1, 100000000, 1, 20000000}, + + /* Secure usecase specific */ + {CAP_SECURE_FRAME_WIDTH, DEC, CODECS_ALL, 96, 4096, 1, 1920}, + {CAP_SECURE_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 4096, 1, 1080}, + {CAP_SECURE_FRAME_WIDTH, ENC, CODECS_ALL, 128, 4096, 2, 1920}, + {CAP_SECURE_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 4096, 2, 1080}, + /* (4096 * 2304) / 256 */ + {CAP_SECURE_MBS_PER_FRAME, DEC, CODECS_ALL, 36, 36864, 1, 36864}, + {CAP_SECURE_MBS_PER_FRAME, ENC, CODECS_ALL, 64, 36864, 1, 36864}, + {CAP_SECURE_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 40000000, 1, 20000000}, + + /* Batch Mode Decode */ + {CAP_BATCH_MAX_MB_PER_FRAME, DEC, CODECS_ALL, 36, 34816, 1, 34816}, + /* (4096 * 2176) / 256 */ + {CAP_BATCH_MAX_FPS, DEC, CODECS_ALL, 1, 120, 1, 120}, + + /* Lossless encoding usecase specific */ + {CAP_LOSSLESS_FRAME_WIDTH, ENC, H264|HEVC, 128, 4096, 2, 1920}, + {CAP_LOSSLESS_FRAME_HEIGHT, ENC, H264|HEVC, 128, 4096, 2, 1080}, + /* (4096 * 2304) / 256 */ + {CAP_LOSSLESS_MBS_PER_FRAME, ENC, H264|HEVC, 64, 36864, 1, 36864}, + + /* All intra encoding usecase specific */ + {CAP_ALLINTRA_MAX_FPS, ENC, H264|HEVC, 1, 240, 1, 30}, + + /* Image specific */ + {CAP_HEVC_IMAGE_FRAME_WIDTH, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEVC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEIC_IMAGE_FRAME_WIDTH, ENC, HEVC, 512, 16384, 2, 16384}, + {CAP_HEIC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 512, 16384, 2, 16384}, + + /* + * Level for AVC and HEVC encoder specific. + * Default for levels is UNKNOWN value. But if we use unknown + * value here to set as default, max value needs to be set to + * unknown as well, which creates a problem of allowing client + * to set higher level than supported. + */ + {CAP_H264_LEVEL, ENC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_6_0, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_6_0}, + {CAP_HEVC_LEVEL, ENC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_6, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_6}, + + /* Level for AVC and HEVC decoder specific */ + {CAP_H264_LEVEL, DEC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_6_1, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_5_0}, + {CAP_HEVC_LEVEL, DEC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5}, +}; + +static struct msm_vidc_codec_capability yupik_capabilities_v0[] = { + /* {cap_type, domains, codecs, min, max, step_size, default_value,} */ + /* Decode spec */ + {CAP_FRAME_WIDTH, DEC, CODECS_ALL, 96, 5760, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 5760, 1, 1080}, + /* (5760 * 2880) / 256 */ + {CAP_MBS_PER_FRAME, DEC, CODECS_ALL, 36, 64800, 1, 8160}, + /* ((4096 * 2176) / 256) * 60 fps */ + {CAP_MBS_PER_SECOND, DEC, CODECS_ALL, 36, 2088960, 1, 244800}, + {CAP_FRAMERATE, DEC, CODECS_ALL, 1, 480, 1, 30}, + + /* Encode spec */ + {CAP_FRAME_WIDTH, ENC, CODECS_ALL, 128, 4096, 2, 1920}, + {CAP_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 4096, 2, 1080}, + /* (4096 * 2176) / 256 */ + {CAP_MBS_PER_FRAME, ENC, CODECS_ALL, 64, 34816, 1, 8160}, + /* ((4096 * 2176) / 256) * 30 fps */ + {CAP_MBS_PER_SECOND, ENC, CODECS_ALL, 64, 1044480, 1, 244800}, + {CAP_FRAMERATE, ENC, CODECS_ALL, 1, 240, 1, 30}, + + {CAP_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 100000000, 1, 20000000}, + {CAP_CABAC_BITRATE, ENC, H264, 1, 100000000, 1, 20000000}, + {CAP_SCALE_X, ENC, CODECS_ALL, 8192, 65536, 1, 8192}, + {CAP_SCALE_Y, ENC, CODECS_ALL, 8192, 65536, 1, 8192}, + {CAP_SCALE_X, DEC, CODECS_ALL, 65536, 65536, 1, 65536}, + {CAP_SCALE_Y, DEC, CODECS_ALL, 65536, 65536, 1, 65536}, + {CAP_BFRAME, ENC, H264|HEVC, 0, 1, 1, 0}, + {CAP_HIER_P_NUM_ENH_LAYERS, ENC, H264|HEVC, 0, 6, 1, 0}, + {CAP_LTR_COUNT, ENC, H264|HEVC, 0, 2, 1, 0}, + /* ((1920 * 1088) / 256) * 30 fps */ + {CAP_MBS_PER_SECOND_POWER_SAVE, ENC, CODECS_ALL, + 0, 244800, 1, 244800}, + {CAP_I_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 10}, + {CAP_P_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + {CAP_B_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + {CAP_I_FRAME_QP, DEC, VP9, 0, 127, 1, 20}, + {CAP_P_FRAME_QP, DEC, VP9, 0, 127, 1, 40}, + {CAP_B_FRAME_QP, DEC, VP9, 0, 127, 1, 40}, + /* 10 slices */ + {CAP_SLICE_BYTE, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_SLICE_MB, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_MAX_VIDEOCORES, DOMAINS_ALL, CODECS_ALL, 0, 1, 1, 1}, + + /* Mpeg2 decoder specific */ + {CAP_FRAME_WIDTH, DEC, MPEG2, 96, 1920, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, MPEG2, 96, 1920, 1, 1088}, + /* (1920 * 1088) / 256 */ + {CAP_MBS_PER_FRAME, DEC, MPEG2, 36, 8160, 1, 8160}, + /* ((1920 * 1088) / 256) * 30*/ + {CAP_MBS_PER_SECOND, DEC, MPEG2, 36, 244800, 1, 244800}, + {CAP_FRAMERATE, DEC, MPEG2, 1, 30, 1, 30}, + {CAP_BITRATE, DEC, MPEG2, 1, 40000000, 1, 20000000}, + + /* Vp9 specific */ + {CAP_FRAME_WIDTH, DEC, VP9, 96, 4096, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, VP9, 96, 4096, 1, 1080}, + /* (4096 * 2176) / 256 */ + {CAP_MBS_PER_FRAME, DEC, VP9, 36, 34816, 1, 8160}, + /* ((4096 * 2176) / 256) * 60*/ + {CAP_MBS_PER_SECOND, DEC, VP9, 36, 2088960, 1, 244800}, + {CAP_FRAMERATE, DEC, VP9, 1, 60, 1, 60}, + {CAP_BITRATE, DEC, VP9, 1, 100000000, 1, 20000000}, + + /* Secure usecase specific */ + {CAP_SECURE_FRAME_WIDTH, DEC, CODECS_ALL, 96, 4096, 1, 1920}, + {CAP_SECURE_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 4096, 1, 1080}, + {CAP_SECURE_FRAME_WIDTH, ENC, CODECS_ALL, 128, 4096, 2, 1920}, + {CAP_SECURE_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 4096, 2, 1080}, + /* (4096 * 2176) / 256 */ + {CAP_SECURE_MBS_PER_FRAME, DEC, CODECS_ALL, 36, 34816, 1, 8160}, + {CAP_SECURE_MBS_PER_FRAME, ENC, CODECS_ALL, 64, 34816, 1, 8160}, + {CAP_SECURE_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 40000000, 1, 20000000}, + + /* Batch Mode Decode */ + {CAP_BATCH_MAX_MB_PER_FRAME, DEC, CODECS_ALL, 36, 8160, 1, 8160}, + /* (1920 * 1088) / 256 */ + {CAP_BATCH_MAX_FPS, DEC, CODECS_ALL, 1, 60, 1, 30}, + + /* Lossless encoding usecase specific */ + {CAP_LOSSLESS_FRAME_WIDTH, ENC, H264|HEVC, 128, 4096, 2, 1920}, + {CAP_LOSSLESS_FRAME_HEIGHT, ENC, H264|HEVC, 128, 4096, 2, 1080}, + /* (4096 * 2176) / 256 */ + {CAP_LOSSLESS_MBS_PER_FRAME, ENC, H264|HEVC, 64, 34816, 1, 8160}, + + /* All intra encoding usecase specific */ + {CAP_ALLINTRA_MAX_FPS, ENC, H264|HEVC, 1, 120, 1, 30}, + + /* Image specific */ + {CAP_HEVC_IMAGE_FRAME_WIDTH, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEVC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEIC_IMAGE_FRAME_WIDTH, ENC, HEVC, 512, 16834, 2, 16834}, + {CAP_HEIC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 512, 16834, 2, 16834}, + + /* Level for AVC and HEVC encoder specific. + * Default for levels is UNKNOWN value. But if we use unknown + * value here to set as default, max value needs to be set to + * unknown as well, which creates a problem of allowing client + * to set higher level than supported + */ + {CAP_H264_LEVEL, ENC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_2, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_0}, + {CAP_HEVC_LEVEL, ENC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4}, + + /* Level for AVC and HEVC decoder specific */ + {CAP_H264_LEVEL, DEC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_2, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_0}, + {CAP_HEVC_LEVEL, DEC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4}, +}; + +static struct msm_vidc_codec_capability yupik_capabilities_v1[] = { + /* {cap_type, domains, codecs, min, max, step_size, default_value,} */ + {CAP_FRAME_WIDTH, DEC, CODECS_ALL, 96, 4096, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 4096, 1, 1080}, + {CAP_FRAME_WIDTH, ENC, CODECS_ALL, 128, 4096, 2, 1920}, + {CAP_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 4096, 2, 1080}, + /* (4096 * 2176) / 256 */ + {CAP_MBS_PER_FRAME, DEC, CODECS_ALL, 36, 34816, 1, 8160}, + {CAP_MBS_PER_FRAME, ENC, CODECS_ALL, 64, 34816, 1, 8160}, + /* ((4096 * 2176) / 256) * 30 fps */ + {CAP_MBS_PER_SECOND, DEC, CODECS_ALL, 36, 1044480, 1, 244800}, + {CAP_MBS_PER_SECOND, ENC, CODECS_ALL, 64, 1044480, 1, 244800}, + {CAP_FRAMERATE, DOMAINS_ALL, CODECS_ALL, 1, 240, 1, 30}, + {CAP_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 100000000, 1, 20000000}, + {CAP_CABAC_BITRATE, ENC, H264, 1, 100000000, 1, 20000000}, + {CAP_SCALE_X, ENC, CODECS_ALL, 8192, 65536, 1, 8192}, + {CAP_SCALE_Y, ENC, CODECS_ALL, 8192, 65536, 1, 8192}, + {CAP_SCALE_X, DEC, CODECS_ALL, 65536, 65536, 1, 65536}, + {CAP_SCALE_Y, DEC, CODECS_ALL, 65536, 65536, 1, 65536}, + {CAP_BFRAME, ENC, H264|HEVC, 0, 1, 1, 0}, + {CAP_HIER_P_NUM_ENH_LAYERS, ENC, H264|HEVC, 0, 6, 1, 0}, + {CAP_LTR_COUNT, ENC, H264|HEVC, 0, 2, 1, 0}, + /* ((1920 * 1088) / 256) * 30 fps */ + {CAP_MBS_PER_SECOND_POWER_SAVE, ENC, CODECS_ALL, + 0, 244800, 1, 244800}, + {CAP_I_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 10}, + {CAP_P_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + {CAP_B_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + {CAP_I_FRAME_QP, DEC, VP9, 0, 127, 1, 20}, + {CAP_P_FRAME_QP, DEC, VP9, 0, 127, 1, 40}, + {CAP_B_FRAME_QP, DEC, VP9, 0, 127, 1, 40}, + /* 10 slices */ + {CAP_SLICE_BYTE, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_SLICE_MB, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_MAX_VIDEOCORES, DOMAINS_ALL, CODECS_ALL, 0, 1, 1, 1}, + + /* Mpeg2 decoder specific */ + {CAP_FRAME_WIDTH, DEC, MPEG2, 96, 1920, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, MPEG2, 96, 1920, 1, 1088}, + /* (1920 * 1088) / 256 */ + {CAP_MBS_PER_FRAME, DEC, MPEG2, 36, 8160, 1, 8160}, + /* ((1920 * 1088) / 256) * 30*/ + {CAP_MBS_PER_SECOND, DEC, MPEG2, 36, 244800, 1, 244800}, + {CAP_FRAMERATE, DEC, MPEG2, 1, 30, 1, 30}, + {CAP_BITRATE, DEC, MPEG2, 1, 40000000, 1, 20000000}, + + /* Vp9 specific */ + {CAP_FRAME_WIDTH, DEC, VP9, 96, 4096, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, VP9, 96, 4096, 1, 1080}, + /* (4096 * 2176) / 256 */ + {CAP_MBS_PER_FRAME, DEC, VP9, 36, 34816, 1, 8160}, + /* ((4096 * 2176) / 256) * 30*/ + {CAP_MBS_PER_SECOND, DEC, VP9, 36, 1044480, 1, 244800}, + {CAP_FRAMERATE, DEC, VP9, 1, 60, 1, 60}, + {CAP_BITRATE, DEC, VP9, 1, 100000000, 1, 20000000}, + + /* Secure usecase specific */ + {CAP_SECURE_FRAME_WIDTH, DEC, CODECS_ALL, 96, 4096, 1, 1920}, + {CAP_SECURE_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 4096, 1, 1080}, + {CAP_SECURE_FRAME_WIDTH, ENC, CODECS_ALL, 128, 4096, 1, 1920}, + {CAP_SECURE_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 4096, 1, 1080}, + /* (4096 * 2176) / 256 */ + {CAP_SECURE_MBS_PER_FRAME, DEC, CODECS_ALL, 36, 34816, 1, 8160}, + {CAP_SECURE_MBS_PER_FRAME, ENC, CODECS_ALL, 64, 34816, 1, 8160}, + {CAP_SECURE_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 40000000, 1, 20000000}, + + /* Batch Mode Decode */ + {CAP_BATCH_MAX_MB_PER_FRAME, DEC, CODECS_ALL, 36, 8160, 1, 8160}, + /* (1920 * 1088) / 256 */ + {CAP_BATCH_MAX_FPS, DEC, CODECS_ALL, 1, 60, 1, 30}, + + /* Lossless encoding usecase specific */ + {CAP_LOSSLESS_FRAME_WIDTH, ENC, H264|HEVC, 128, 4096, 2, 1920}, + {CAP_LOSSLESS_FRAME_HEIGHT, ENC, H264|HEVC, 128, 4096, 2, 1080}, + /* (4096 * 2176)/ 256 */ + {CAP_LOSSLESS_MBS_PER_FRAME, ENC, H264|HEVC, 64, 34816, 1, 8160}, + + /* All intra encoding usecase specific */ + {CAP_ALLINTRA_MAX_FPS, ENC, H264|HEVC, 1, 120, 1, 30}, + + /* Image specific */ + {CAP_HEVC_IMAGE_FRAME_WIDTH, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEVC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEIC_IMAGE_FRAME_WIDTH, ENC, HEVC, 512, 16834, 2, 16834}, + {CAP_HEIC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 512, 16834, 2, 16834}, + + /* Level for AVC and HEVC encoder specific. + * Default for levels is UNKNOWN value. But if we use unknown + * value here to set as default, max value needs to be set to + * unknown as well, which creates a problem of allowing client + * to set higher level than supported + */ + {CAP_H264_LEVEL, ENC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_2, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_0}, + {CAP_HEVC_LEVEL, ENC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4}, + + /* Level for AVC and HEVC decoder specific */ + {CAP_H264_LEVEL, DEC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_2, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_0}, + {CAP_HEVC_LEVEL, DEC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4}, +}; + +static struct msm_vidc_codec_capability shima_capabilities_v0[] = { + /* {cap_type, domains, codecs, min, max, step_size, default_value,} */ + {CAP_FRAME_WIDTH, DEC, CODECS_ALL, 96, 8192, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 8192, 1, 1080}, + {CAP_FRAME_WIDTH, ENC, CODECS_ALL, 128, 4096, 2, 1920}, + {CAP_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 4096, 2, 1080}, + /* (8192 * 4320) / 256 */ + {CAP_MBS_PER_FRAME, DEC, CODECS_ALL, 36, 138240, 1, 138240}, + /* (4096 * 2176) / 256 */ + {CAP_MBS_PER_FRAME, ENC, CODECS_ALL, 64, 34816, 1, 138240}, + {CAP_FRAMERATE, DOMAINS_ALL, CODECS_ALL, 1, 480, 1, 30}, + + /* Encode spec - 4K@60 */ + /* ((3840 * 2176) / 256) * 60 fps */ + {CAP_MBS_PER_SECOND, ENC, CODECS_ALL, 64, 1958400, 1, 489600}, + + /* Decode spec - 8K@30, 4k@120*/ + /* ((8192 * 4320) / 256) * 30 fps */ + {CAP_MBS_PER_SECOND, DEC, CODECS_ALL, 36, 4147200, 1, 979200}, + + {CAP_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 160000000, 1, 20000000}, + {CAP_CABAC_BITRATE, ENC, H264, 1, 160000000, 1, 20000000}, + {CAP_SCALE_X, ENC, CODECS_ALL, 8192, 65536, 1, 8192}, + {CAP_SCALE_Y, ENC, CODECS_ALL, 8192, 65536, 1, 8192}, + {CAP_SCALE_X, DEC, CODECS_ALL, 65536, 65536, 1, 65536}, + {CAP_SCALE_Y, DEC, CODECS_ALL, 65536, 65536, 1, 65536}, + {CAP_BFRAME, ENC, H264|HEVC, 0, 1, 1, 0}, + {CAP_HIER_P_NUM_ENH_LAYERS, ENC, H264|HEVC, 0, 6, 1, 0}, + {CAP_LTR_COUNT, ENC, H264|HEVC, 0, 2, 1, 0}, + /* ((1920 * 1088) / 256) * 60 fps */ + {CAP_MBS_PER_SECOND_POWER_SAVE, ENC, CODECS_ALL, + 0, 489600, 1, 489600}, + {CAP_I_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 10}, + {CAP_P_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + {CAP_B_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + /* 10 slices */ + {CAP_SLICE_BYTE, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_SLICE_MB, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_MAX_VIDEOCORES, DOMAINS_ALL, CODECS_ALL, 0, 1, 1, 1}, + + /* Mpeg2 decoder specific */ + {CAP_FRAME_WIDTH, DEC, MPEG2, 96, 1920, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, MPEG2, 96, 1920, 1, 1080}, + /* (1920 * 1088) / 256 */ + {CAP_MBS_PER_FRAME, DEC, MPEG2, 36, 8160, 1, 8160}, + /* ((1920 * 1088) / 256) * 30*/ + {CAP_MBS_PER_SECOND, DEC, MPEG2, 36, 244800, 1, 244800}, + {CAP_FRAMERATE, DEC, MPEG2, 1, 30, 1, 30}, + {CAP_BITRATE, DEC, MPEG2, 1, 40000000, 1, 20000000}, + + /* VP9 decoder-specific */ + {CAP_FRAME_WIDTH, DEC, VP9, 96, 4096, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, VP9, 96, 4096, 1, 1080}, + /* (4096 * 2304) / 256 */ + {CAP_MBS_PER_FRAME, DEC, VP9, 36, 36864, 1, 36864}, + /* ((4096 * 2304) / 256) * 60 */ + {CAP_MBS_PER_SECOND, DEC, VP9, 36, 2211840, 1, 2211840}, + {CAP_FRAMERATE, DEC, VP9, 1, 60, 1, 60}, + {CAP_BITRATE, DEC, VP9, 1, 100000000, 1, 20000000}, + + /* Secure usecase specific */ + {CAP_SECURE_FRAME_WIDTH, DEC, CODECS_ALL, 96, 4096, 1, 1920}, + {CAP_SECURE_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 4096, 1, 1080}, + {CAP_SECURE_FRAME_WIDTH, ENC, CODECS_ALL, 128, 4096, 2, 1920}, + {CAP_SECURE_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 4096, 2, 1080}, + /* (3840 * 2176) / 256 */ + {CAP_SECURE_MBS_PER_FRAME, DEC, CODECS_ALL, 36, 34816, 1, 8160}, + {CAP_SECURE_MBS_PER_FRAME, ENC, CODECS_ALL, 64, 34816, 1, 8160}, + {CAP_SECURE_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 40000000, 1, 20000000}, + + /* Batch Mode Decode */ + {CAP_BATCH_MAX_MB_PER_FRAME, DEC, CODECS_ALL, 36, 8160, 1, 8160}, + /* (1920 * 1088) / 256 */ + {CAP_BATCH_MAX_FPS, DEC, CODECS_ALL, 1, 60, 1, 60}, + + /* Lossless encoding usecase specific */ + {CAP_LOSSLESS_FRAME_WIDTH, ENC, H264|HEVC, 128, 4096, 2, 1920}, + {CAP_LOSSLESS_FRAME_HEIGHT, ENC, H264|HEVC, 128, 4096, 2, 1080}, + /* (4096 * 2176) / 256 */ + {CAP_LOSSLESS_MBS_PER_FRAME, ENC, H264|HEVC, 64, 34816, 1, 8160}, + + /* All intra encoding usecase specific */ + {CAP_ALLINTRA_MAX_FPS, ENC, H264|HEVC, 1, 120, 1, 30}, + + /* Image specific */ + {CAP_HEVC_IMAGE_FRAME_WIDTH, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEVC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEIC_IMAGE_FRAME_WIDTH, ENC, HEVC, 512, 16384, 2, 16384}, + {CAP_HEIC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 512, 16384, 2, 16384}, + + /* + * Level for AVC and HEVC encoder specific. + * Default for levels is UNKNOWN value. But if we use unknown + * value here to set as default, max value needs to be set to + * unknown as well, which creates a problem of allowing client + * to set higher level than supported. + */ + {CAP_H264_LEVEL, ENC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_2, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_0}, + {CAP_HEVC_LEVEL, ENC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4}, + + /* Level for AVC and HEVC decoder specific */ + {CAP_H264_LEVEL, DEC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_6_0, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_0}, + {CAP_HEVC_LEVEL, DEC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_6, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4}, +}; + +static struct msm_vidc_codec_capability shima_capabilities_v1[] = { + /* {cap_type, domains, codecs, min, max, step_size, default_value,} */ + {CAP_FRAME_WIDTH, DEC, CODECS_ALL, 96, 4096, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 4096, 1, 1080}, + {CAP_FRAME_WIDTH, ENC, CODECS_ALL, 128, 4096, 2, 1920}, + {CAP_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 4096, 2, 1080}, + /* ((4096 * 2176) / 256) */ + {CAP_MBS_PER_FRAME, DEC, CODECS_ALL, 36, 34816, 1, 8160}, + {CAP_MBS_PER_FRAME, ENC, CODECS_ALL, 64, 34816, 1, 8160}, + /* ((3840 * 2176) / 256) * 60 fps */ + {CAP_MBS_PER_SECOND, DEC, CODECS_ALL, 36, 1958400, 1, 489600}, + {CAP_MBS_PER_SECOND, ENC, CODECS_ALL, 64, 1958400, 1, 489600}, + {CAP_FRAMERATE, DOMAINS_ALL, CODECS_ALL, 1, 480, 1, 30}, + + {CAP_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 100000000, 1, 20000000}, + {CAP_CABAC_BITRATE, ENC, H264, 1, 100000000, 1, 20000000}, + {CAP_SCALE_X, ENC, CODECS_ALL, 8192, 65536, 1, 8192}, + {CAP_SCALE_Y, ENC, CODECS_ALL, 8192, 65536, 1, 8192}, + {CAP_SCALE_X, DEC, CODECS_ALL, 65536, 65536, 1, 65536}, + {CAP_SCALE_Y, DEC, CODECS_ALL, 65536, 65536, 1, 65536}, + {CAP_BFRAME, ENC, H264|HEVC, 0, 1, 1, 0}, + {CAP_HIER_P_NUM_ENH_LAYERS, ENC, H264|HEVC, 0, 6, 1, 0}, + {CAP_LTR_COUNT, ENC, H264|HEVC, 0, 2, 1, 0}, + /* ((1920 * 1088) / 256) * 30 fps */ + {CAP_MBS_PER_SECOND_POWER_SAVE, ENC, CODECS_ALL, + 0, 244800, 1, 244800}, + {CAP_I_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 10}, + {CAP_P_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + {CAP_B_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + /* 10 slices */ + {CAP_SLICE_BYTE, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_SLICE_MB, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_MAX_VIDEOCORES, DOMAINS_ALL, CODECS_ALL, 0, 1, 1, 1}, + + /* Mpeg2 decoder specific */ + {CAP_FRAME_WIDTH, DEC, MPEG2, 96, 1920, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, MPEG2, 96, 1920, 1, 1080}, + /* (1920 * 1088) / 256 */ + {CAP_MBS_PER_FRAME, DEC, MPEG2, 36, 8160, 1, 8160}, + /* ((1920 * 1088) / 256) * 30 */ + {CAP_MBS_PER_SECOND, DEC, MPEG2, 36, 244800, 1, 244800}, + {CAP_FRAMERATE, DEC, MPEG2, 1, 30, 1, 30}, + {CAP_BITRATE, DEC, MPEG2, 1, 40000000, 1, 20000000}, + + /* VP9 decoder-specific */ + {CAP_FRAME_WIDTH, DEC, VP9, 96, 4096, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, VP9, 96, 4096, 1, 1080}, + /* (4096 * 2304) / 256 */ + {CAP_MBS_PER_FRAME, DEC, VP9, 36, 36864, 1, 36864}, + /* ((4096 * 2304) / 256) * 60 */ + {CAP_MBS_PER_SECOND, DEC, VP9, 36, 2211840, 1, 2211840}, + {CAP_FRAMERATE, DEC, VP9, 1, 60, 1, 60}, + {CAP_BITRATE, DEC, VP9, 1, 100000000, 1, 20000000}, + + /* Secure usecase specific */ + {CAP_SECURE_FRAME_WIDTH, DEC, CODECS_ALL, 96, 4096, 1, 1920}, + {CAP_SECURE_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 4096, 1, 1080}, + {CAP_SECURE_FRAME_WIDTH, ENC, CODECS_ALL, 128, 4096, 2, 1920}, + {CAP_SECURE_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 4096, 2, 1080}, + /* (4096 * 2176) / 256 */ + {CAP_SECURE_MBS_PER_FRAME, DEC, CODECS_ALL, 36, 34816, 1, 8160}, + {CAP_SECURE_MBS_PER_FRAME, ENC, CODECS_ALL, 64, 34816, 1, 8160}, + {CAP_SECURE_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 40000000, 1, 20000000}, + + /* Batch Mode Decode */ + {CAP_BATCH_MAX_MB_PER_FRAME, DEC, CODECS_ALL, 36, 8160, 1, 8160}, + /* (1920 * 1088) / 256 */ + {CAP_BATCH_MAX_FPS, DEC, CODECS_ALL, 1, 30, 1, 30}, + + /* Lossless encoding usecase specific */ + {CAP_LOSSLESS_FRAME_WIDTH, ENC, H264|HEVC, 128, 3840, 2, 1920}, + {CAP_LOSSLESS_FRAME_HEIGHT, ENC, H264|HEVC, 128, 3840, 2, 1080}, + /* (3840 * 2176) / 256 */ + {CAP_LOSSLESS_MBS_PER_FRAME, ENC, H264|HEVC, 64, 32640, 1, 8160}, + + /* All intra encoding usecase specific */ + {CAP_ALLINTRA_MAX_FPS, ENC, H264|HEVC, 1, 120, 1, 30}, + + /* Image specific */ + {CAP_HEVC_IMAGE_FRAME_WIDTH, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEVC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEIC_IMAGE_FRAME_WIDTH, ENC, HEVC, 512, 16384, 2, 16384}, + {CAP_HEIC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 512, 16384, 2, 16384}, + + /* + * Level for AVC and HEVC encoder specific. + * Default for levels is UNKNOWN value. But if we use unknown + * value here to set as default, max value needs to be set to + * unknown as well, which creates a problem of allowing client + * to set higher level than supported. + */ + {CAP_H264_LEVEL, ENC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_2, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_0}, + {CAP_HEVC_LEVEL, ENC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4}, + + /* Level for AVC and HEVC decoder specific */ + {CAP_H264_LEVEL, DEC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_2, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_0}, + {CAP_HEVC_LEVEL, DEC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4}, +}; + +static struct msm_vidc_codec_capability shima_capabilities_v2[] = { + /* {cap_type, domains, codecs, min, max, step_size, default_value,} */ + {CAP_FRAME_WIDTH, DEC, CODECS_ALL, 96, 4096, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 4096, 1, 1080}, + {CAP_FRAME_WIDTH, ENC, CODECS_ALL, 128, 4096, 2, 1920}, + {CAP_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 4096, 2, 1080}, + /* (4096 * 2176) / 256 */ + {CAP_MBS_PER_FRAME, DEC, CODECS_ALL, 36, 34816, 1, 8160}, + {CAP_MBS_PER_FRAME, ENC, CODECS_ALL, 64, 34816, 1, 8160}, + /* ((3840 * 2176) / 256) * 30 fps */ + {CAP_MBS_PER_SECOND, DEC, CODECS_ALL, 36, 979200, 1, 244800}, + {CAP_MBS_PER_SECOND, ENC, CODECS_ALL, 64, 979200, 1, 244800}, + {CAP_FRAMERATE, DOMAINS_ALL, CODECS_ALL, 1, 240, 1, 30}, + {CAP_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 100000000, 1, 20000000}, + {CAP_CABAC_BITRATE, ENC, H264, 1, 100000000, 1, 20000000}, + {CAP_SCALE_X, ENC, CODECS_ALL, 8192, 65536, 1, 8192}, + {CAP_SCALE_Y, ENC, CODECS_ALL, 8192, 65536, 1, 8192}, + {CAP_SCALE_X, DEC, CODECS_ALL, 65536, 65536, 1, 65536}, + {CAP_SCALE_Y, DEC, CODECS_ALL, 65536, 65536, 1, 65536}, + {CAP_BFRAME, ENC, H264|HEVC, 0, 1, 1, 0}, + {CAP_HIER_P_NUM_ENH_LAYERS, ENC, H264|HEVC, 0, 6, 1, 0}, + {CAP_LTR_COUNT, ENC, H264|HEVC, 0, 2, 1, 0}, + /* ((1920 * 1088) / 256) * 60 fps */ + {CAP_MBS_PER_SECOND_POWER_SAVE, ENC, CODECS_ALL, + 0, 244800, 1, 244800}, + {CAP_I_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 10}, + {CAP_P_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + {CAP_B_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20}, + /* 10 slices */ + {CAP_SLICE_BYTE, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_SLICE_MB, ENC, H264|HEVC, 1, 10, 1, 10}, + {CAP_MAX_VIDEOCORES, DOMAINS_ALL, CODECS_ALL, 0, 1, 1, 1}, + + /* Mpeg2 decoder specific */ + {CAP_FRAME_WIDTH, DEC, MPEG2, 96, 1920, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, MPEG2, 96, 1920, 1, 1080}, + /* (1920 * 1088) / 256 */ + {CAP_MBS_PER_FRAME, DEC, MPEG2, 36, 8160, 1, 8160}, + /* ((1920 * 1088) / 256) * 30*/ + {CAP_MBS_PER_SECOND, DEC, MPEG2, 36, 244800, 1, 244800}, + {CAP_FRAMERATE, DEC, MPEG2, 1, 30, 1, 30}, + {CAP_BITRATE, DEC, MPEG2, 1, 40000000, 1, 20000000}, + + /* VP9 decoder-specific */ + {CAP_FRAME_WIDTH, DEC, VP9, 96, 4096, 1, 1920}, + {CAP_FRAME_HEIGHT, DEC, VP9, 96, 4096, 1, 1080}, + /* (4096 * 2304) / 256 */ + {CAP_MBS_PER_FRAME, DEC, VP9, 36, 36864, 1, 36864}, + /* ((4096 * 2304) / 256) * 30 */ + {CAP_MBS_PER_SECOND, DEC, VP9, 36, 1105920, 1, 1105920}, + {CAP_FRAMERATE, DEC, VP9, 1, 60, 1, 60}, + {CAP_BITRATE, DEC, VP9, 1, 100000000, 1, 20000000}, + + /* Secure usecase specific */ + {CAP_SECURE_FRAME_WIDTH, DEC, CODECS_ALL, 96, 4096, 1, 1920}, + {CAP_SECURE_FRAME_HEIGHT, DEC, CODECS_ALL, 96, 4096, 1, 1080}, + {CAP_SECURE_FRAME_WIDTH, ENC, CODECS_ALL, 128, 4096, 2, 1920}, + {CAP_SECURE_FRAME_HEIGHT, ENC, CODECS_ALL, 128, 4096, 2, 1080}, + /* (4096 * 2176) / 256 */ + {CAP_SECURE_MBS_PER_FRAME, DEC, CODECS_ALL, 36, 34816, 1, 8160}, + {CAP_SECURE_MBS_PER_FRAME, ENC, CODECS_ALL, 64, 34816, 1, 8160}, + {CAP_SECURE_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 40000000, 1, 20000000}, + + /* Batch Mode Decode */ + {CAP_BATCH_MAX_MB_PER_FRAME, DEC, CODECS_ALL, 36, 8160, 1, 8160}, + /* (1920 * 1088) / 256 */ + {CAP_BATCH_MAX_FPS, DEC, CODECS_ALL, 1, 30, 1, 30}, + + /* Lossless encoding usecase specific */ + {CAP_LOSSLESS_FRAME_WIDTH, ENC, H264|HEVC, 128, 4096, 2, 1920}, + {CAP_LOSSLESS_FRAME_HEIGHT, ENC, H264|HEVC, 128, 4096, 2, 1080}, + /* (4096 * 2176) / 256 */ + {CAP_LOSSLESS_MBS_PER_FRAME, ENC, H264|HEVC, 64, 34816, 1, 8160}, + + /* All intra encoding usecase specific */ + {CAP_ALLINTRA_MAX_FPS, ENC, H264|HEVC, 1, 120, 1, 30}, + + /* Image specific */ + {CAP_HEVC_IMAGE_FRAME_WIDTH, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEVC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 128, 512, 2, 512}, + {CAP_HEIC_IMAGE_FRAME_WIDTH, ENC, HEVC, 512, 16384, 2, 16384}, + {CAP_HEIC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 512, 16384, 2, 16384}, + + /* + * Level for AVC and HEVC encoder specific. + * Default for levels is UNKNOWN value. But if we use unknown + * value here to set as default, max value needs to be set to + * unknown as well, which creates a problem of allowing client + * to set higher level than supported. + */ + {CAP_H264_LEVEL, ENC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_1, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_0}, + {CAP_HEVC_LEVEL, ENC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4}, + + /* Level for AVC and HEVC decoder specific */ + {CAP_H264_LEVEL, DEC, H264, V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_1, 1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_0}, + {CAP_HEVC_LEVEL, DEC, HEVC, V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5, 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4}, +}; + +/* Generally Iris2 VPSS only support 8 multiple encoding if + * rotation/flip is enabled, however customer can require specific + * resolution supports and expand capabilities here. + */ +static struct msm_vidc_vpss_capability vpss_capabilities[] = { + /* {supported width, supported height,} */ + {3840, 1644}, + {1644, 3840}, +}; + +/* + * Custom conversion coefficients for resolution: 176x144 negative + * coeffs are converted to s4.9 format + * (e.g. -22 converted to ((1 << 13) - 22) + * 3x3 transformation matrix coefficients in s4.9 fixed point format + */ +static u32 vpe_csc_custom_matrix_coeff[HAL_MAX_MATRIX_COEFFS] = { + 440, 8140, 8098, 0, 460, 52, 0, 34, 463 +}; + +/* offset coefficients in s9 fixed point format */ +static u32 vpe_csc_custom_bias_coeff[HAL_MAX_BIAS_COEFFS] = { + 53, 0, 4 +}; + +/* clamping value for Y/U/V([min,max] for Y/U/V) */ +static u32 vpe_csc_custom_limit_coeff[HAL_MAX_LIMIT_COEFFS] = { + 16, 235, 16, 240, 16, 240 +}; + +struct allowed_clock_rates_table yupik_clock_data_v1[] = { + {133330000}, {240000000}, {335000000}, {380000000} +}; + +struct allowed_clock_rates_table shima_clock_data_v0[] = { + {240000000}, {338000000}, {366000000}, {444000000} +}; + +struct allowed_clock_rates_table shima_clock_data_v2[] = { + {201600000} +}; + +static struct msm_vidc_common_data default_common_data[] = { + { + .key = "qcom,never-unload-fw", + .value = 1, + }, + { + .key = "qcom,fw-unload-delay", + .value = 1000, + }, +}; + +static struct msm_vidc_common_data lahaina_common_data[] = { + { + .key = "qcom,never-unload-fw", + .value = 1, + }, + { + .key = "qcom,fw-unload-delay", + .value = 1000, + }, + { + .key = "qcom,sw-power-collapse", + .value = 1, + }, + { + .key = "qcom,domain-attr-non-fatal-faults", + .value = 1, + }, + { + .key = "qcom,max-secure-instances", + .value = 3, + }, + { + .key = "qcom,max-hw-load", + .value = 7833600, /* + * 7680x4320@60fps, 3840x2176@240fps + * Greater than 4096x2176@120fps, + * 8192x4320@48fps + */ + }, + { + .key = "qcom,max-image-load", + .value = 1048576, /* ((16384x16384)/256)@1fps */ + }, + { + .key = "qcom,max-mbpf", + .value = 173056, /* (8192x4320)/256 + (4096x2176)/256*/ + }, + { + .key = "qcom,max-hq-mbs-per-frame", + .value = 8160, /* ((1920x1088)/256) */ + }, + { + .key = "qcom,max-hq-mbs-per-sec", + .value = 489600, /* ((1920x1088)/256)@60fps */ + }, + { + .key = "qcom,max-b-frame-mbs-per-frame", + .value = 32640, /* 3840x2176/256 */ + }, + { + .key = "qcom,max-b-frame-mbs-per-sec", + .value = 1958400, /* 3840x2176/256 MBs@60fps */ + }, + { + .key = "qcom,power-collapse-delay", + .value = 1500, + }, + { + .key = "qcom,hw-resp-timeout", + .value = 1000, + }, + { + .key = "qcom,debug-timeout", + .value = 0, + }, + { + .key = "qcom,decode-batching", + .value = 1, + }, + { + .key = "qcom,batch-timeout", + .value = 200, + }, + { + .key = "qcom,dcvs", + .value = 1, + }, + { + .key = "qcom,fw-cycles", + .value = 326389, + }, + { + .key = "qcom,fw-vpp-cycles", + .value = 44156, + }, + { + .key = "qcom,avsync-window-size", + .value = 40, + }, + { + .key = "qcom,prefetch_non_pix_buf_count", + .value = 1, + }, + { + .key = "qcom,prefetch_non_pix_buf_size", + /* + * Internal buffer size is calculated for secure decode session + * of resolution 4k (4096x2160) + * Internal buf size = calculate_scratch_size() + + * calculate_scratch1_size() + calculate_persist1_size() + * Take maximum between VP9 10bit, HEVC 10bit, AVC, MPEG2 secure + * decoder sessions + */ + .value = 209715200, + }, + { + .key = "qcom,prefetch_pix_buf_count", + .value = 18, + }, + { + .key = "qcom,prefetch_pix_buf_size", + /* + * Calculated by VENUS_BUFFER_SIZE for 4096x2160 UBWC + */ + .value = 13434880, + }, + { + .key = "qcom,ubwc_stats_in_fbd", + .value = 1, + }, + { + .key = "qcom,vpp_delay_supported", + .value = 1, + }, + { + .key = "qcom,enc_auto_dynamic_fps", + .value = 1, + }, +}; + +static struct msm_vidc_common_data yupik_common_data_v0[] = { + { + .key = "qcom,never-unload-fw", + .value = 1, + }, + { + .key = "qcom,fw-unload-delay", + .value = 1000, + }, + { + .key = "qcom,sw-power-collapse", + .value = 1, + }, + { + .key = "qcom,domain-attr-non-fatal-faults", + .value = 1, + }, + { + .key = "qcom,max-secure-instances", + .value = 3, + }, + { + .key = "qcom,max-hw-load", + .value = 2088960, + /* ((4096x2176)/256)@60 + * 4k@30 decode + 1080p@30 encode + */ + }, + { + .key = "qcom,max-image-load", + .value = 1048576, /* ((16384x16384)/256)@1fps */ + }, + { + .key = "qcom,max-mbpf", + .value = 139264, /* ((4096x2176)/256) x 4 */ + }, + { + .key = "qcom,max-hq-mbs-per-frame", + .value = 8160, /* ((1920x1088)/256) */ + }, + { + .key = "qcom,max-hq-mbs-per-sec", + .value = 244800, /* ((1920x1088)/256)@30fps */ + }, + { + .key = "qcom,max-b-frame-mbs-per-frame", + .value = 8160, /* ((1920x1088)/256) */ + }, + { + .key = "qcom,max-b-frame-mbs-per-sec", + .value = 489600, /* ((1920x1088)/256) MBs@60fps */ + }, + { + .key = "qcom,power-collapse-delay", + .value = 1500, + }, + { + .key = "qcom,hw-resp-timeout", + .value = 1000, + }, + { + .key = "qcom,debug-timeout", + .value = 0, + }, + { + .key = "qcom,decode-batching", + .value = 1, + }, + { + .key = "qcom,batch-timeout", + .value = 200, + }, + { + .key = "qcom,dcvs", + .value = 1, + }, + { + .key = "qcom,fw-cycles", + .value = 436000, + }, + { + .key = "qcom,fw-vpp-cycles", + .value = 166667, + }, + { + .key = "qcom,avsync-window-size", + .value = 40, + }, + { + .key = "qcom,prefetch_non_pix_buf_count", + .value = 1, + }, + { + .key = "qcom,prefetch_non_pix_buf_size", + /* + * Internal buffer size is calculated for secure decode session + * of resolution 4k (4096x2160) + * Internal buf size = calculate_scratch_size() + + * calculate_scratch1_size() + calculate_persist1_size() + * Take maximum between VP9 10bit, HEVC 10bit, AVC, MPEG2 secure + * decoder sessions + */ + .value = 209715200, + }, + { + .key = "qcom,prefetch_pix_buf_count", + .value = 18, + }, + { + .key = "qcom,prefetch_pix_buf_size", + /* + * Calculated by VENUS_BUFFER_SIZE for 4096x2160 UBWC + */ + .value = 13434880, + }, + { + .key = "qcom,ubwc_stats_in_fbd", + .value = 0, + }, + { + .key = "qcom,vpp_delay_supported", + .value = 0, + }, + { + .key = "qcom,enc_auto_dynamic_fps", + .value = 0, + }, +}; + +static struct msm_vidc_common_data yupik_common_data_v1[] = { + { + .key = "qcom,never-unload-fw", + .value = 1, + }, + { + .key = "qcom,fw-unload-delay", + .value = 1000, + }, + { + .key = "qcom,sw-power-collapse", + .value = 1, + }, + { + .key = "qcom,domain-attr-non-fatal-faults", + .value = 1, + }, + { + .key = "qcom,max-secure-instances", + .value = 3, + }, + { + .key = "qcom,max-hw-load", + .value = 1224000, + /* UHD@30 decode + 1080p@30 encode */ + }, + { + .key = "qcom,max-image-load", + .value = 1048576, /* ((16384x16384)/256)@1fps */ + }, + { + .key = "qcom,max-mbpf", + .value = 139264, /* ((4096x2176)/256) x 4 */ + }, + { + .key = "qcom,max-hq-mbs-per-frame", + .value = 8160, /* ((1920x1088)/256) */ + }, + { + .key = "qcom,max-hq-mbs-per-sec", + .value = 244800, /* ((1920x1088)/256)@30fps */ + }, + { + .key = "qcom,max-b-frame-mbs-per-frame", + .value = 8160, /* ((1920x1088)/256) */ + }, + { + .key = "qcom,max-b-frame-mbs-per-sec", + .value = 489600, /* ((1920x1088)/256) MBs@60fps */ + }, + { + .key = "qcom,power-collapse-delay", + .value = 1500, + }, + { + .key = "qcom,hw-resp-timeout", + .value = 1000, + }, + { + .key = "qcom,debug-timeout", + .value = 0, + }, + { + .key = "qcom,decode-batching", + .value = 1, + }, + { + .key = "qcom,batch-timeout", + .value = 200, + }, + { + .key = "qcom,dcvs", + .value = 1, + }, + { + .key = "qcom,fw-cycles", + .value = 436000, + }, + { + .key = "qcom,fw-vpp-cycles", + .value = 166667, + }, + { + .key = "qcom,avsync-window-size", + .value = 40, + }, + { + .key = "qcom,prefetch_non_pix_buf_count", + .value = 1, + }, + { + .key = "qcom,prefetch_non_pix_buf_size", + /* + * Internal buffer size is calculated for secure decode session + * of resolution 4k (4096x2160) + * Internal buf size = calculate_scratch_size() + + * calculate_scratch1_size() + calculate_persist1_size() + * Take maximum between VP9 10bit, HEVC 10bit, AVC, MPEG2 secure + * decoder sessions + */ + .value = 209715200, + }, + { + .key = "qcom,prefetch_pix_buf_count", + .value = 18, + }, + { + .key = "qcom,prefetch_pix_buf_size", + /* + * Calculated by VENUS_BUFFER_SIZE for 4096x2160 UBWC + */ + .value = 13434880, + }, + { + .key = "qcom,ubwc_stats_in_fbd", + .value = 0, + }, + { + .key = "qcom,vpp_delay_supported", + .value = 0, + }, + { + .key = "qcom,enc_auto_dynamic_fps", + .value = 0, + }, +}; + +static struct msm_vidc_common_data bengal_common_data_v0[] = { + { + .key = "qcom,never-unload-fw", + .value = 1, + }, + { + .key = "qcom,fw-unload-delay", + .value = 1000, + }, + { + .key = "qcom,sw-power-collapse", + .value = 1, + }, + { + .key = "qcom,domain-attr-non-fatal-faults", + .value = 1, + }, + { + .key = "qcom,max-secure-instances", + .value = 3, + }, + { + .key = "qcom,max-hw-load", + .value = 489600, + }, + { + .key = "qcom,max-image-load", + .value = 262144, /* ((8192x8192)/256)@1fps */ + }, + { + .key = "qcom,max-hq-mbs-per-frame", + .value = 8160, + }, + { + .key = "qcom,max-hq-mbs-per-sec", + .value = 244800, /* 1920 x 1088 @ 30 fps */ + }, + { + .key = "qcom,power-collapse-delay", + .value = 1500, + }, + { + .key = "qcom,hw-resp-timeout", + .value = 1000, + }, + { + .key = "qcom,dcvs", + .value = 1, + }, + { + .key = "qcom,fw-cycles", + .value = 733003, + }, + { + .key = "qcom,fw-vpp-cycles", + .value = 225975, + }, + { + .key = "qcom,ubwc_stats_in_fbd", + .value = 0, + }, + { + .key = "qcom,vpp_delay_supported", + .value = 0, + }, +}; + +static struct msm_vidc_common_data bengal_common_data_v1[] = { + { + .key = "qcom,never-unload-fw", + .value = 1, + }, + { + .key = "qcom,fw-unload-delay", + .value = 1000, + }, + { + .key = "qcom,sw-power-collapse", + .value = 1, + }, + { + .key = "qcom,domain-attr-non-fatal-faults", + .value = 1, + }, + { + .key = "qcom,max-secure-instances", + .value = 3, + }, + { + .key = "qcom,max-hw-load", + .value = 244800, + }, + { + .key = "qcom,max-image-load", + .value = 262144, /* ((8192x8192)/256)@1fps */ + }, + { + .key = "qcom,max-hq-mbs-per-frame", + .value = 8160, + }, + { + .key = "qcom,max-hq-mbs-per-sec", + .value = 244800, /* 1920 x 1088 @ 30 fps */ + }, + { + .key = "qcom,power-collapse-delay", + .value = 1500, + }, + { + .key = "qcom,hw-resp-timeout", + .value = 1000, + }, + { + .key = "qcom,dcvs", + .value = 1, + }, + { + .key = "qcom,fw-cycles", + .value = 733003, + }, + { + .key = "qcom,fw-vpp-cycles", + .value = 225975, + }, + { + .key = "qcom,ubwc_stats_in_fbd", + .value = 0, + }, + { + .key = "qcom,vpp_delay_supported", + .value = 0, + }, +}; + +static struct msm_vidc_common_data shima_common_data_v0[] = { + { + .key = "qcom,never-unload-fw", + .value = 1, + }, + { + .key = "qcom,fw-unload-delay", + .value = 1000, + }, + { + .key = "qcom,sw-power-collapse", + .value = 1, + }, + { + .key = "qcom,domain-attr-non-fatal-faults", + .value = 1, + }, + { + .key = "qcom,max-secure-instances", + .value = 3, + }, + { + .key = "qcom,max-hw-load", + .value = 3916800, + /** + * (3840x2176)/256)@120fps decode, + * (7680x4320)/256)@30fps decode + */ + }, + { + .key = "qcom,max-image-load", + .value = 1048576, /* ((16384x16384)/256)@1fps */ + }, + { + .key = "qcom,max-mbpf", + .value = 138240, /* ((8192x4320)/256) */ + }, + { + .key = "qcom,max-hq-mbs-per-frame", + .value = 8160, /* ((1920x1088)/256) */ + }, + { + .key = "qcom,max-hq-mbs-per-sec", + .value = 244800, /* ((1920x1088)/256)@30fps */ + }, + { + .key = "qcom,power-collapse-delay", + .value = 1500, + }, + { + .key = "qcom,hw-resp-timeout", + .value = 1000, + }, + { + .key = "qcom,debug-timeout", + .value = 0, + }, + { + .key = "qcom,decode-batching", + .value = 1, + }, + { + .key = "qcom,batch-timeout", + .value = 200, + }, + { + .key = "qcom,dcvs", + .value = 1, + }, + { + .key = "qcom,fw-cycles", + .value = 326389, + }, + { + .key = "qcom,fw-vpp-cycles", + .value = 44156, + }, + { + .key = "qcom,avsync-window-size", + .value = 40, + }, + { + .key = "qcom,prefetch_non_pix_buf_count", + .value = 1, + }, + { + .key = "qcom,prefetch_non_pix_buf_size", + /* + * Internal buffer size is calculated for secure decode session + * of resolution 4k (4096x2160) + * Internal buf size = calculate_scratch_size() + + * calculate_scratch1_size() + calculate_persist1_size() + * Take maximum between VP9 10bit, HEVC 10bit, AVC, MPEG2 secure + * decoder sessions + */ + .value = 209715200, + }, + { + .key = "qcom,prefetch_pix_buf_count", + .value = 18, + }, + { + .key = "qcom,prefetch_pix_buf_size", + /* + * Calculated by VENUS_BUFFER_SIZE for 4096x2160 UBWC + */ + .value = 13434880, + }, + { + .key = "qcom,ubwc_stats_in_fbd", + .value = 0, + }, + { + .key = "qcom,vpp_delay_supported", + .value = 0, + }, +}; + +static struct msm_vidc_common_data shima_common_data_v1[] = { + { + .key = "qcom,never-unload-fw", + .value = 1, + }, + { + .key = "qcom,fw-unload-delay", + .value = 1000, + }, + { + .key = "qcom,sw-power-collapse", + .value = 1, + }, + { + .key = "qcom,domain-attr-non-fatal-faults", + .value = 1, + }, + { + .key = "qcom,max-secure-instances", + .value = 3, + }, + { + .key = "qcom,max-hw-load", + .value = 1958400, + /** + * (3840x2176)/256)@60fps decode + */ + }, + { + .key = "qcom,max-image-load", + .value = 1048576, /* ((16384x16384)/256)@1fps */ + }, + { + .key = "qcom,max-mbpf", + .value = 130560, /* ((3840x2176)/256) x 4 */ + }, + { + .key = "qcom,max-hq-mbs-per-frame", + .value = 8160, /* ((1920x1088)/256) */ + }, + { + .key = "qcom,max-hq-mbs-per-sec", + .value = 244800, /* ((1920x1088)/256)@30fps */ + }, + { + .key = "qcom,power-collapse-delay", + .value = 1500, + }, + { + .key = "qcom,hw-resp-timeout", + .value = 1000, + }, + { + .key = "qcom,debug-timeout", + .value = 0, + }, + { + .key = "qcom,decode-batching", + .value = 1, + }, + { + .key = "qcom,batch-timeout", + .value = 200, + }, + { + .key = "qcom,dcvs", + .value = 1, + }, + { + .key = "qcom,fw-cycles", + .value = 326389, + }, + { + .key = "qcom,fw-vpp-cycles", + .value = 44156, + }, + { + .key = "qcom,avsync-window-size", + .value = 40, + }, + { + .key = "qcom,prefetch_non_pix_buf_count", + .value = 1, + }, + { + .key = "qcom,prefetch_non_pix_buf_size", + /* + * Internal buffer size is calculated for secure decode session + * of resolution 4k (4096x2160) + * Internal buf size = calculate_scratch_size() + + * calculate_scratch1_size() + calculate_persist1_size() + * Take maximum between VP9 10bit, HEVC 10bit, AVC, MPEG2 secure + * decoder sessions + */ + .value = 209715200, + }, + { + .key = "qcom,prefetch_pix_buf_count", + .value = 18, + }, + { + .key = "qcom,prefetch_pix_buf_size", + /* + * Calculated by VENUS_BUFFER_SIZE for 4096x2160 UBWC + */ + .value = 13434880, + }, + { + .key = "qcom,ubwc_stats_in_fbd", + .value = 0, + }, + { + .key = "qcom,vpp_delay_supported", + .value = 0, + }, +}; + +static struct msm_vidc_common_data shima_common_data_v2[] = { + { + .key = "qcom,never-unload-fw", + .value = 1, + }, + { + .key = "qcom,fw-unload-delay", + .value = 1000, + }, + { + .key = "qcom,sw-power-collapse", + .value = 1, + }, + { + .key = "qcom,domain-attr-non-fatal-faults", + .value = 1, + }, + { + .key = "qcom,max-secure-instances", + .value = 3, + }, + { + .key = "qcom,max-hw-load", + .value = 979200, + /** + * (3840x2176)/256)@30fps decode + */ + }, + { + .key = "qcom,max-image-load", + .value = 1048576, /* ((16384x16384)/256)@1fps */ + }, + { + .key = "qcom,max-mbpf", + .value = 130560, /* ((3840x2176)/256) x 4 */ + }, + { + .key = "qcom,max-hq-mbs-per-frame", + .value = 8160, /* ((1920x1088)/256) */ + }, + { + .key = "qcom,max-hq-mbs-per-sec", + .value = 244800, /* ((1920x1088)/256)@30fps */ + }, + { + .key = "qcom,power-collapse-delay", + .value = 1500, + }, + { + .key = "qcom,hw-resp-timeout", + .value = 1000, + }, + { + .key = "qcom,debug-timeout", + .value = 0, + }, + { + .key = "qcom,decode-batching", + .value = 1, + }, + { + .key = "qcom,batch-timeout", + .value = 200, + }, + { + .key = "qcom,dcvs", + .value = 1, + }, + { + .key = "qcom,fw-cycles", + .value = 326389, + }, + { + .key = "qcom,fw-vpp-cycles", + .value = 44156, + }, + { + .key = "qcom,avsync-window-size", + .value = 40, + }, + { + .key = "qcom,prefetch_non_pix_buf_count", + .value = 1, + }, + { + .key = "qcom,prefetch_non_pix_buf_size", + /* + * Internal buffer size is calculated for secure decode session + * of resolution 4k (4096x2160) + * Internal buf size = calculate_scratch_size() + + * calculate_scratch1_size() + calculate_persist1_size() + * Take maximum between VP9 10bit, HEVC 10bit, AVC, MPEG2 secure + * decoder sessions + */ + .value = 209715200, + }, + { + .key = "qcom,prefetch_pix_buf_count", + .value = 18, + }, + { + .key = "qcom,prefetch_pix_buf_size", + /* + * Calculated by VENUS_BUFFER_SIZE for 4096x2160 UBWC + */ + .value = 13434880, + }, + { + .key = "qcom,ubwc_stats_in_fbd", + .value = 0, + }, + { + .key = "qcom,vpp_delay_supported", + .value = 0, + }, +}; + +static struct msm_vidc_common_data holi_common_data[] = { + { + .key = "qcom,never-unload-fw", + .value = 1, + }, + { + .key = "qcom,fw-unload-delay", + .value = 1000, + }, + { + .key = "qcom,sw-power-collapse", + .value = 1, + }, + { + .key = "qcom,domain-attr-non-fatal-faults", + .value = 1, + }, + { + .key = "qcom,max-secure-instances", + .value = 3, + }, + { + .key = "qcom,max-hw-load", + .value = 489600, /* ((1088x1920)/256)@60fps */ + }, + { + .key = "qcom,max-image-load", + .value = 262144, /* ((8192x8192)/256)@1fps */ + }, + { + .key = "qcom,max-mbpf", + .value = 65280,/* ((3840x2176)/256) x 2 */ + }, + { + .key = "qcom,power-collapse-delay", + .value = 1500, + }, + { + .key = "qcom,hw-resp-timeout", + .value = 1000, + }, + { + .key = "qcom,dcvs", + .value = 1, + }, + { + .key = "qcom,fw-cycles", + .value = 733003, + }, + { + .key = "qcom,fw-vpp-cycles", + .value = 225975, + }, + { + .key = "qcom,no-cvp", + .value = 1, + }, +}; + +static struct msm_vidc_common_data scuba_common_data[] = { + { + .key = "qcom,never-unload-fw", + .value = 1, + }, + { + .key = "qcom,fw-unload-delay", + .value = 1000, + }, + { + .key = "qcom,sw-power-collapse", + .value = 1, + }, + { + .key = "qcom,domain-attr-non-fatal-faults", + .value = 1, + }, + { + .key = "qcom,max-secure-instances", + .value = 3, + }, + { + .key = "qcom,max-hw-load", + .value = 489600, /* ((1088x1920)/256)@60fps */ + }, + { + .key = "qcom,max-image-load", + .value = 262144, /* ((8192x8192)/256)@1fps */ + }, + { + .key = "qcom,max-mbpf", + .value = 65280,/* ((3840x2176)/256) x 2 */ + }, + { + .key = "qcom,power-collapse-delay", + .value = 1500, + }, + { + .key = "qcom,hw-resp-timeout", + .value = 1000, + }, + { + .key = "qcom,dcvs", + .value = 1, + }, + { + .key = "qcom,fw-cycles", + .value = 733003, + }, + { + .key = "qcom,fw-vpp-cycles", + .value = 225975, + }, + { + .key = "qcom,no-cvp", + .value = 1, + }, +}; + +static struct msm_vidc_efuse_data yupik_efuse_data[] = { + /* IRIS_PLL_FMAX - max 4K@30 */ + EFUSE_ENTRY(0x007801E8, 4, 0x00200000, 0x15, SKU_VERSION), +}; + +static struct msm_vidc_efuse_data shima_efuse_data[] = { + /* IRIS_4K60_FMAX_LIMIT_EFUSE - max 4K@60 */ + EFUSE_ENTRY(0x007801E0, 4, 0x00200000, 0x15, SKU_VERSION), + /* IRIS_MULTIPIPE_DISABLE - max 4K@30 */ + EFUSE_ENTRY(0x007801E0, 4, 0x00001000, 0x0B, SKU_VERSION), +}; + +/* Default UBWC config for LPDDR5 */ +static struct msm_vidc_ubwc_config_data lahaina_ubwc_data[] = { + UBWC_CONFIG(1, 1, 1, 0, 0, 0, 8, 32, 16, 0, 0), +}; + +/* Default UBWC config for LPDDR5 */ +static struct msm_vidc_ubwc_config_data yupik_ubwc_data[] = { + UBWC_CONFIG(1, 1, 1, 0, 0, 0, 8, 32, 15, 0, 0), +}; + +/* Default UBWC config for LPDDR5 */ +static struct msm_vidc_ubwc_config_data shima_ubwc_data[] = { + UBWC_CONFIG(1, 1, 1, 0, 0, 0, 8, 32, 15, 0, 0), +}; + +static struct msm_vidc_platform_data default_data = { + .codec_data = default_codec_data, + .codec_data_length = ARRAY_SIZE(default_codec_data), + .common_data = default_common_data, + .common_data_length = ARRAY_SIZE(default_common_data), + .csc_data.vpe_csc_custom_bias_coeff = vpe_csc_custom_bias_coeff, + .csc_data.vpe_csc_custom_matrix_coeff = vpe_csc_custom_matrix_coeff, + .csc_data.vpe_csc_custom_limit_coeff = vpe_csc_custom_limit_coeff, + .efuse_data = NULL, + .efuse_data_length = 0, + .sku_version = 0, + .vpu_ver = VPU_VERSION_IRIS2, + .num_vpp_pipes = 0x4, + .ubwc_config = 0x0, + .max_inst_count = MAX_SUPPORTED_INSTANCES, +}; + +static struct msm_vidc_platform_data lahaina_data = { + .codec_data = lahaina_codec_data, + .codec_data_length = ARRAY_SIZE(lahaina_codec_data), + .clock_data = NULL, + .clock_data_length = 0, + .common_data = lahaina_common_data, + .common_data_length = ARRAY_SIZE(lahaina_common_data), + .csc_data.vpe_csc_custom_bias_coeff = vpe_csc_custom_bias_coeff, + .csc_data.vpe_csc_custom_matrix_coeff = vpe_csc_custom_matrix_coeff, + .csc_data.vpe_csc_custom_limit_coeff = vpe_csc_custom_limit_coeff, + .efuse_data = NULL, + .efuse_data_length = 0, + .sku_version = 0, + .vpu_ver = VPU_VERSION_IRIS2, + .num_vpp_pipes = 0x4, + .ubwc_config = lahaina_ubwc_data, + .codecs = default_codecs, + .codecs_count = ARRAY_SIZE(default_codecs), + .codec_caps = lahaina_capabilities, + .codec_caps_count = ARRAY_SIZE(lahaina_capabilities), + .vpss_caps = vpss_capabilities, + .vpss_caps_count = ARRAY_SIZE(vpss_capabilities), + .max_inst_count = MAX_SUPPORTED_INSTANCES, +}; + +static struct msm_vidc_platform_data yupik_data = { + .codec_data = yupik_codec_data, + .codec_data_length = ARRAY_SIZE(yupik_codec_data), + .clock_data = NULL, + .clock_data_length = 0, + .common_data = yupik_common_data_v0, + .common_data_length = ARRAY_SIZE(yupik_common_data_v0), + .csc_data.vpe_csc_custom_bias_coeff = vpe_csc_custom_bias_coeff, + .csc_data.vpe_csc_custom_matrix_coeff = vpe_csc_custom_matrix_coeff, + .csc_data.vpe_csc_custom_limit_coeff = vpe_csc_custom_limit_coeff, + .efuse_data = yupik_efuse_data, + .efuse_data_length = ARRAY_SIZE(yupik_efuse_data), + .sku_version = 0, + .vpu_ver = VPU_VERSION_IRIS2_1, + .num_vpp_pipes = 0x1, + .ubwc_config = yupik_ubwc_data, + .codecs = default_codecs, + .codecs_count = ARRAY_SIZE(default_codecs), + .codec_caps = yupik_capabilities_v0, + .codec_caps_count = ARRAY_SIZE(yupik_capabilities_v0), + .vpss_caps = vpss_capabilities, + .vpss_caps_count = ARRAY_SIZE(vpss_capabilities), + .max_inst_count = MAX_SUPPORTED_INSTANCES, +}; + +static struct msm_vidc_platform_data bengal_data = { + .codec_data = bengal_codec_data, + .codec_data_length = ARRAY_SIZE(bengal_codec_data), + .clock_data = NULL, + .clock_data_length = 0, + .common_data = bengal_common_data_v0, + .common_data_length = ARRAY_SIZE(bengal_common_data_v0), + .csc_data.vpe_csc_custom_bias_coeff = vpe_csc_custom_bias_coeff, + .csc_data.vpe_csc_custom_matrix_coeff = vpe_csc_custom_matrix_coeff, + .csc_data.vpe_csc_custom_limit_coeff = vpe_csc_custom_limit_coeff, + .efuse_data = NULL, + .efuse_data_length = 0, + .sku_version = 0, + .vpu_ver = VPU_VERSION_AR50_LITE, + .num_vpp_pipes = 0x1, + .ubwc_config = 0x0, + .codecs = bengal_codecs, + .codecs_count = ARRAY_SIZE(bengal_codecs), + .codec_caps = bengal_capabilities_v0, + .codec_caps_count = ARRAY_SIZE(bengal_capabilities_v0), + .vpss_caps = NULL, + .vpss_caps_count = 0, + .max_inst_count = MAX_SUPPORTED_INSTANCES, +}; + +static struct msm_vidc_platform_data shima_data = { + .codec_data = shima_codec_data, + .codec_data_length = ARRAY_SIZE(shima_codec_data), + .clock_data = shima_clock_data_v0, + .clock_data_length = ARRAY_SIZE(shima_clock_data_v0), + .common_data = shima_common_data_v0, + .common_data_length = ARRAY_SIZE(shima_common_data_v0), + .csc_data.vpe_csc_custom_bias_coeff = vpe_csc_custom_bias_coeff, + .csc_data.vpe_csc_custom_matrix_coeff = vpe_csc_custom_matrix_coeff, + .csc_data.vpe_csc_custom_limit_coeff = vpe_csc_custom_limit_coeff, + .efuse_data = shima_efuse_data, + .efuse_data_length = ARRAY_SIZE(shima_efuse_data), + .sku_version = 0, + .vpu_ver = VPU_VERSION_IRIS2, + .num_vpp_pipes = 0x2, + .ubwc_config = shima_ubwc_data, + .codecs = shima_codecs, + .codecs_count = ARRAY_SIZE(shima_codecs), + .codec_caps = shima_capabilities_v0, + .codec_caps_count = ARRAY_SIZE(shima_capabilities_v0), + .vpss_caps = vpss_capabilities, + .vpss_caps_count = ARRAY_SIZE(vpss_capabilities), + .max_inst_count = MAX_SUPPORTED_INSTANCES, +}; + +static struct msm_vidc_platform_data holi_data = { + .codec_data = holi_codec_data, + .codec_data_length = ARRAY_SIZE(holi_codec_data), + .clock_data = NULL, + .clock_data_length = 0, + .common_data = holi_common_data, + .common_data_length = ARRAY_SIZE(holi_common_data), + .csc_data.vpe_csc_custom_bias_coeff = vpe_csc_custom_bias_coeff, + .csc_data.vpe_csc_custom_matrix_coeff = vpe_csc_custom_matrix_coeff, + .csc_data.vpe_csc_custom_limit_coeff = vpe_csc_custom_limit_coeff, + .efuse_data = NULL, + .efuse_data_length = 0, + .sku_version = 0, + .vpu_ver = VPU_VERSION_AR50_LITE, + .num_vpp_pipes = 0x1, + .ubwc_config = 0x0, + .codecs = holi_codecs, + .codecs_count = ARRAY_SIZE(holi_codecs), + .codec_caps = holi_capabilities, + .codec_caps_count = ARRAY_SIZE(holi_capabilities), + .vpss_caps = NULL, + .vpss_caps_count = 0, + .max_inst_count = MAX_SUPPORTED_INSTANCES, +}; + +static struct msm_vidc_platform_data scuba_data = { + .codec_data = scuba_codec_data, + .codec_data_length = ARRAY_SIZE(scuba_codec_data), + .clock_data = NULL, + .clock_data_length = 0, + .common_data = scuba_common_data, + .common_data_length = ARRAY_SIZE(scuba_common_data), + .csc_data.vpe_csc_custom_bias_coeff = vpe_csc_custom_bias_coeff, + .csc_data.vpe_csc_custom_matrix_coeff = vpe_csc_custom_matrix_coeff, + .csc_data.vpe_csc_custom_limit_coeff = vpe_csc_custom_limit_coeff, + .efuse_data = NULL, + .efuse_data_length = 0, + .sku_version = 0, + .vpu_ver = VPU_VERSION_AR50_LITE, + .num_vpp_pipes = 0x1, + .ubwc_config = 0x0, + .codecs = scuba_codecs, + .codecs_count = ARRAY_SIZE(scuba_codecs), + .codec_caps = scuba_capabilities, + .codec_caps_count = ARRAY_SIZE(scuba_capabilities), + .vpss_caps = NULL, + .vpss_caps_count = 0, +}; + +static const struct of_device_id msm_vidc_dt_device[] = { + { + .compatible = "qcom,lahaina-vidc", + .data = &lahaina_data, + }, + { + .compatible = "qcom,shima-vidc", + .data = &shima_data, + }, + { + .compatible = "qcom,bengal-vidc", + .data = &bengal_data, + }, + { + .compatible = "qcom,scuba-vidc", + .data = &scuba_data, + }, + { + .compatible = "qcom,holi-vidc", + .data = &holi_data, + }, + { + .compatible = "qcom,yupik-vidc", + .data = &yupik_data, + }, + { + .compatible = "qcom,qcm6490-vidc", + .data = &yupik_data, + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, msm_vidc_dt_device); + +static int msm_vidc_read_efuse( + struct msm_vidc_platform_data *data, struct device *dev) +{ + void __iomem *base; + uint32_t i; + u32 efuse = 0; + struct msm_vidc_efuse_data *efuse_data = data->efuse_data; + uint32_t efuse_data_count = data->efuse_data_length; + + if (!efuse_data) + return 0; + + for (i = 0; i < efuse_data_count; i++) { + switch ((efuse_data[i]).purpose) { + case SKU_VERSION: + base = devm_ioremap(dev, (efuse_data[i]).start_address, + (efuse_data[i]).size); + if (!base) { + d_vpr_e("failed efuse: start %#x, size %d\n", + (efuse_data[i]).start_address, + (efuse_data[i]).size); + return -EINVAL; + } else { + efuse = readl_relaxed(base); + data->sku_version = + (efuse & (efuse_data[i]).mask) >> + (efuse_data[i]).shift; + devm_iounmap(dev, base); + } + break; + default: + break; + } + if (data->sku_version) { + d_vpr_h("efuse 0x%x, platform version 0x%x\n", + efuse, data->sku_version); + break; + } + } + return 0; +} + +static int msm_vidc_read_rank( + struct msm_vidc_platform_data *data, struct device *dev) +{ + uint32_t num_ranks; + + num_ranks = 0; //TO-DO Read Rank API to be added + data->sku_version = SKU_VERSION_0; + + if (num_ranks == 1) + data->sku_version = SKU_VERSION_1; + + return 0; +} + +static inline void msm_vidc_ddr_ubwc_config( + struct msm_vidc_platform_data *driver_data, u32 hbb_override_val) +{ + uint32_t ddr_type = DDR_TYPE_LPDDR5; + + ddr_type = of_fdt_get_ddrtype(); + if (ddr_type == -ENOENT) { + d_vpr_e("Failed to get ddr type, use LPDDR5\n"); + } + + if (driver_data->ubwc_config && + (ddr_type == DDR_TYPE_LPDDR4 || + ddr_type == DDR_TYPE_LPDDR4X)) + driver_data->ubwc_config->highest_bank_bit = hbb_override_val; + + d_vpr_h("DDR Type 0x%x hbb 0x%x\n", + ddr_type, driver_data->ubwc_config ? + driver_data->ubwc_config->highest_bank_bit : -1); +} + +void *vidc_get_drv_data(struct device *dev) +{ + struct msm_vidc_platform_data *driver_data = NULL; + const struct of_device_id *match; + int rc = 0; + + if (!IS_ENABLED(CONFIG_OF) || !dev->of_node) { + d_vpr_e("Using default_data\n"); + driver_data = &default_data; + goto exit; + } + + match = of_match_node(msm_vidc_dt_device, dev->of_node); + + if (match) + driver_data = (struct msm_vidc_platform_data *)match->data; + + if (!driver_data) + goto exit; + + /* Check for sku version */ + rc = msm_vidc_read_efuse(driver_data, dev); + if (rc) + goto exit; + + if (!strcmp(match->compatible, "qcom,lahaina-vidc")) { + msm_vidc_ddr_ubwc_config(driver_data, 0xf); + } else if (!strcmp(match->compatible, "qcom,bengal-vidc")) { + rc = msm_vidc_read_rank(driver_data, dev); + if (rc) { + d_vpr_e("Failed to get ddr rank, use Dual Rank DDR\n"); + goto exit; + } + if (driver_data->sku_version == SKU_VERSION_1) { + driver_data->common_data = bengal_common_data_v1; + driver_data->common_data_length = + ARRAY_SIZE(bengal_common_data_v1); + driver_data->codec_caps = bengal_capabilities_v1; + driver_data->codec_caps_count = + ARRAY_SIZE(bengal_capabilities_v1); + } + } else if (!strcmp(match->compatible, "qcom,shima-vidc")) { + if (driver_data->sku_version == SKU_VERSION_1) { + driver_data->clock_data = NULL; + driver_data->clock_data_length = 0; + driver_data->common_data = shima_common_data_v1; + driver_data->common_data_length = + ARRAY_SIZE(shima_common_data_v1); + driver_data->codec_caps = shima_capabilities_v1; + driver_data->codec_caps_count = + ARRAY_SIZE(shima_capabilities_v1); + } else if (driver_data->sku_version == SKU_VERSION_2) { + driver_data->clock_data = shima_clock_data_v2; + driver_data->clock_data_length = + ARRAY_SIZE(shima_clock_data_v2); + driver_data->common_data = shima_common_data_v2; + driver_data->common_data_length = + ARRAY_SIZE(shima_common_data_v2); + driver_data->codec_caps = shima_capabilities_v2; + driver_data->codec_caps_count = + ARRAY_SIZE(shima_capabilities_v2); + } + msm_vidc_ddr_ubwc_config(driver_data, 0xe); + } else if (!strcmp(match->compatible, "qcom,yupik-vidc")) { + if (driver_data->sku_version == SKU_VERSION_1) { + driver_data->clock_data = yupik_clock_data_v1; + driver_data->clock_data_length = + ARRAY_SIZE(yupik_clock_data_v1);; + driver_data->common_data = yupik_common_data_v1; + driver_data->common_data_length = + ARRAY_SIZE(yupik_common_data_v1); + driver_data->codec_caps = yupik_capabilities_v1; + driver_data->codec_caps_count = + ARRAY_SIZE(yupik_capabilities_v1); + } + msm_vidc_ddr_ubwc_config(driver_data, 0xe); + } else if (!strcmp(match->compatible, "qcom,qcm6490-vidc")) { + if (driver_data->sku_version == SKU_VERSION_1) { + driver_data->clock_data = yupik_clock_data_v1; + driver_data->clock_data_length = + ARRAY_SIZE(yupik_clock_data_v1); + driver_data->common_data = yupik_common_data_v1; + driver_data->common_data_length = + ARRAY_SIZE(yupik_common_data_v1); + driver_data->codec_caps = yupik_capabilities_v1; + driver_data->codec_caps_count = + ARRAY_SIZE(yupik_capabilities_v1); + } + msm_vidc_ddr_ubwc_config(driver_data, 0xe); + driver_data->max_inst_count = MAX_SUPPORTED_INSTANCES_24; + } +exit: + return driver_data; +} diff --git a/techpack/video/msm/vidc/msm_vidc_res_parse.c b/techpack/video/msm/vidc/msm_vidc_res_parse.c new file mode 100644 index 000000000000..b9dc0d4b6450 --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_res_parse.c @@ -0,0 +1,1287 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include "msm_vidc_debug.h" +#include "msm_vidc_resources.h" +#include "msm_vidc_res_parse.h" + +enum clock_properties { + CLOCK_PROP_HAS_SCALING = 1 << 0, + CLOCK_PROP_HAS_MEM_RETENTION = 1 << 1, +}; + +static struct memory_limit_table memory_limit_tbl_mbytes[] = { + /* target_memory_size - max_video_cap */ + {12288, 4096}, /* 12 GB - 4 Gb*/ + {8192, 3584}, /* 8 GB - 3.5 Gb*/ + {6144, 2560}, /* 6 GB - 2.5 Gb*/ + {4096, 1536}, /* 4 GB - 1.5 Gb*/ + {2048, 768}, /* 2 GB - 0.75 Gb*/ +}; + +static inline struct device *msm_iommu_get_ctx(const char *ctx_name) +{ + return NULL; +} + +static int msm_vidc_populate_legacy_context_bank( + struct msm_vidc_platform_resources *res); + +static size_t get_u32_array_num_elements(struct device_node *np, + char *name) +{ + int len; + size_t num_elements = 0; + + if (!of_get_property(np, name, &len)) { + d_vpr_e("Failed to read %s from device tree\n", name); + goto fail_read; + } + + num_elements = len / sizeof(u32); + if (num_elements <= 0) { + d_vpr_e("%s not specified in device tree\n", name); + goto fail_read; + } + return num_elements; + +fail_read: + return 0; +} + +static inline void msm_vidc_free_allowed_clocks_table( + struct msm_vidc_platform_resources *res) +{ + res->allowed_clks_tbl = NULL; +} + +static inline void msm_vidc_free_cycles_per_mb_table( + struct msm_vidc_platform_resources *res) +{ + res->clock_freq_tbl.clk_prof_entries = NULL; +} + +static inline void msm_vidc_free_reg_table( + struct msm_vidc_platform_resources *res) +{ + res->reg_set.reg_tbl = NULL; +} + +static inline void msm_vidc_free_qdss_addr_table( + struct msm_vidc_platform_resources *res) +{ + res->qdss_addr_set.addr_tbl = NULL; +} + +static inline void msm_vidc_free_bus_table( + struct msm_vidc_platform_resources *res) +{ + res->bus_set.bus_tbl = NULL; + res->bus_set.count = 0; +} + +static inline void msm_vidc_free_buffer_usage_table( + struct msm_vidc_platform_resources *res) +{ + res->buffer_usage_set.buffer_usage_tbl = NULL; +} + +static inline void msm_vidc_free_regulator_table( + struct msm_vidc_platform_resources *res) +{ + int c = 0; + + for (c = 0; c < res->regulator_set.count; ++c) { + struct regulator_info *rinfo = + &res->regulator_set.regulator_tbl[c]; + + rinfo->name = NULL; + } + + res->regulator_set.regulator_tbl = NULL; + res->regulator_set.count = 0; +} + +static inline void msm_vidc_free_clock_table( + struct msm_vidc_platform_resources *res) +{ + res->clock_set.clock_tbl = NULL; + res->clock_set.count = 0; +} + +static inline void msm_vidc_free_cx_ipeak_context( + struct msm_vidc_platform_resources *res) +{ + cx_ipeak_unregister(res->cx_ipeak_context); + res->cx_ipeak_context = NULL; +} + +void msm_vidc_free_platform_resources( + struct msm_vidc_platform_resources *res) +{ + msm_vidc_free_clock_table(res); + msm_vidc_free_regulator_table(res); + msm_vidc_free_allowed_clocks_table(res); + msm_vidc_free_reg_table(res); + msm_vidc_free_qdss_addr_table(res); + msm_vidc_free_bus_table(res); + msm_vidc_free_buffer_usage_table(res); + msm_vidc_free_cx_ipeak_context(res); +} + +static int msm_vidc_load_fw_name(struct msm_vidc_platform_resources *res) +{ + struct platform_device *pdev = res->pdev; + + return of_property_read_string_index(pdev->dev.of_node, + "vidc,firmware-name", 0, &res->fw_name); +} + +static int msm_vidc_load_reg_table(struct msm_vidc_platform_resources *res) +{ + struct reg_set *reg_set; + struct platform_device *pdev = res->pdev; + int i; + int rc = 0; + + if (!of_find_property(pdev->dev.of_node, "qcom,reg-presets", NULL)) { + /* + * qcom,reg-presets is an optional property. It likely won't be + * present if we don't have any register settings to program + */ + d_vpr_h("reg-presets not found\n"); + return 0; + } + + reg_set = &res->reg_set; + reg_set->count = get_u32_array_num_elements(pdev->dev.of_node, + "qcom,reg-presets"); + reg_set->count /= sizeof(*reg_set->reg_tbl) / sizeof(u32); + + if (!reg_set->count) { + d_vpr_h("no elements in reg set\n"); + return rc; + } + + reg_set->reg_tbl = devm_kzalloc(&pdev->dev, reg_set->count * + sizeof(*(reg_set->reg_tbl)), GFP_KERNEL); + if (!reg_set->reg_tbl) { + d_vpr_e("%s: Failed to alloc register table\n", __func__); + return -ENOMEM; + } + + if (of_property_read_u32_array(pdev->dev.of_node, "qcom,reg-presets", + (u32 *)reg_set->reg_tbl, reg_set->count * 3)) { + d_vpr_e("Failed to read register table\n"); + msm_vidc_free_reg_table(res); + return -EINVAL; + } + for (i = 0; i < reg_set->count; i++) { + d_vpr_h("reg = %#x, value = %#x, mask = %#x\n", + reg_set->reg_tbl[i].reg, reg_set->reg_tbl[i].value, + reg_set->reg_tbl[i].mask); + } + return rc; +} +static int msm_vidc_load_qdss_table(struct msm_vidc_platform_resources *res) +{ + struct addr_set *qdss_addr_set; + struct platform_device *pdev = res->pdev; + int i; + int rc = 0; + + if (!of_find_property(pdev->dev.of_node, "qcom,qdss-presets", NULL)) { + /* + * qcom,qdss-presets is an optional property. It likely won't be + * present if we don't have any register settings to program + */ + d_vpr_h("qdss-presets not found\n"); + return rc; + } + + qdss_addr_set = &res->qdss_addr_set; + qdss_addr_set->count = get_u32_array_num_elements(pdev->dev.of_node, + "qcom,qdss-presets"); + qdss_addr_set->count /= sizeof(*qdss_addr_set->addr_tbl) / sizeof(u32); + + if (!qdss_addr_set->count) { + d_vpr_h("no elements in qdss reg set\n"); + return rc; + } + + qdss_addr_set->addr_tbl = devm_kzalloc(&pdev->dev, + qdss_addr_set->count * sizeof(*qdss_addr_set->addr_tbl), + GFP_KERNEL); + if (!qdss_addr_set->addr_tbl) { + d_vpr_e("%s: Failed to alloc register table\n", __func__); + rc = -ENOMEM; + goto err_qdss_addr_tbl; + } + + rc = of_property_read_u32_array(pdev->dev.of_node, "qcom,qdss-presets", + (u32 *)qdss_addr_set->addr_tbl, qdss_addr_set->count * 2); + if (rc) { + d_vpr_e("Failed to read qdss address table\n"); + msm_vidc_free_qdss_addr_table(res); + rc = -EINVAL; + goto err_qdss_addr_tbl; + } + + for (i = 0; i < qdss_addr_set->count; i++) { + d_vpr_h("qdss addr = %x, value = %x\n", + qdss_addr_set->addr_tbl[i].start, + qdss_addr_set->addr_tbl[i].size); + } +err_qdss_addr_tbl: + return rc; +} + +static int msm_vidc_load_subcache_info(struct msm_vidc_platform_resources *res) +{ + int rc = 0, num_subcaches = 0, c; + struct platform_device *pdev = res->pdev; + struct subcache_set *subcaches = &res->subcache_set; + + num_subcaches = of_property_count_strings(pdev->dev.of_node, + "cache-slice-names"); + if (num_subcaches <= 0) { + d_vpr_h("No subcaches found\n"); + goto err_load_subcache_table_fail; + } + + subcaches->subcache_tbl = devm_kzalloc(&pdev->dev, + sizeof(*subcaches->subcache_tbl) * num_subcaches, GFP_KERNEL); + if (!subcaches->subcache_tbl) { + d_vpr_e("Failed to allocate memory for subcache tbl\n"); + rc = -ENOMEM; + goto err_load_subcache_table_fail; + } + + subcaches->count = num_subcaches; + d_vpr_h("Found %d subcaches\n", num_subcaches); + + for (c = 0; c < num_subcaches; ++c) { + struct subcache_info *vsc = &res->subcache_set.subcache_tbl[c]; + + of_property_read_string_index(pdev->dev.of_node, + "cache-slice-names", c, &vsc->name); + } + + res->sys_cache_present = true; + + return 0; + +err_load_subcache_table_fail: + res->sys_cache_present = false; + subcaches->count = 0; + subcaches->subcache_tbl = NULL; + + return rc; +} + +/** + * msm_vidc_load_u32_table() - load dtsi table entries + * @pdev: A pointer to the platform device. + * @of_node: A pointer to the device node. + * @table_name: A pointer to the dtsi table entry name. + * @struct_size: The size of the structure which is nothing but + * a single entry in the dtsi table. + * @table: A pointer to the table pointer which needs to be + * filled by the dtsi table entries. + * @num_elements: Number of elements pointer which needs to be filled + * with the number of elements in the table. + * + * This is a generic implementation to load single or multiple array + * table from dtsi. The array elements should be of size equal to u32. + * + * Return: Return '0' for success else appropriate error value. + */ +int msm_vidc_load_u32_table(struct platform_device *pdev, + struct device_node *of_node, char *table_name, int struct_size, + u32 **table, u32 *num_elements) +{ + int rc = 0, num_elemts = 0; + u32 *ptbl = NULL; + + if (!of_find_property(of_node, table_name, NULL)) { + d_vpr_h("%s not found\n", table_name); + return 0; + } + + num_elemts = get_u32_array_num_elements(of_node, table_name); + if (!num_elemts) { + d_vpr_e("no elements in %s\n", table_name); + return 0; + } + num_elemts /= struct_size / sizeof(u32); + + ptbl = devm_kzalloc(&pdev->dev, num_elemts * struct_size, GFP_KERNEL); + if (!ptbl) { + d_vpr_e("Failed to alloc table %s\n", table_name); + return -ENOMEM; + } + + if (of_property_read_u32_array(of_node, table_name, ptbl, + num_elemts * struct_size / sizeof(u32))) { + d_vpr_e("Failed to read %s\n", table_name); + return -EINVAL; + } + + *table = ptbl; + if (num_elements) + *num_elements = num_elemts; + + return rc; +} +EXPORT_SYMBOL(msm_vidc_load_u32_table); + +/* A comparator to compare loads (needed later on) */ +static int cmp(const void *a, const void *b) +{ + /* want to sort in reverse so flip the comparison */ + return ((struct allowed_clock_rates_table *)b)->clock_rate - + ((struct allowed_clock_rates_table *)a)->clock_rate; +} + +static int msm_vidc_load_allowed_clocks_table( + struct msm_vidc_platform_resources *res) +{ + int rc = 0; + struct platform_device *pdev = res->pdev; + + if (res->allowed_clks_tbl) { + d_vpr_h("allowed-clock-rates populated from platform_data\n"); + goto exit; + } + + if (!of_find_property(pdev->dev.of_node, + "qcom,allowed-clock-rates", NULL)) { + d_vpr_h("allowed-clock-rates not found\n"); + return 0; + } + + rc = msm_vidc_load_u32_table(pdev, pdev->dev.of_node, + "qcom,allowed-clock-rates", + sizeof(*res->allowed_clks_tbl), + (u32 **)&res->allowed_clks_tbl, + &res->allowed_clks_tbl_size); + if (rc) { + d_vpr_e("%s: failed to read allowed clocks table\n", __func__); + return rc; + } +exit: + sort(res->allowed_clks_tbl, res->allowed_clks_tbl_size, + sizeof(*res->allowed_clks_tbl), cmp, NULL); + + return 0; +} + +static int msm_vidc_populate_mem_cdsp(struct device *dev, + struct msm_vidc_platform_resources *res) +{ + res->mem_cdsp.dev = dev; + + return 0; +} + +static int msm_vidc_load_bus_table(struct msm_vidc_platform_resources *res) +{ + struct bus_set *buses = &res->bus_set; + int c = 0, num_buses = 0, rc = 0; + u32 *bus_ranges = NULL; + struct platform_device *pdev = res->pdev; + + num_buses = of_property_count_strings(pdev->dev.of_node, + "interconnect-names"); + if (num_buses <= 0) { + d_vpr_e("No buses found\n"); + return -EINVAL; + } + + buses->count = num_buses; + d_vpr_h("Found %d bus interconnects\n", num_buses); + + bus_ranges = kzalloc(2 * num_buses * sizeof(*bus_ranges), GFP_KERNEL); + if (!bus_ranges) { + d_vpr_e("No memory to read bus ranges\n"); + return -ENOMEM; + } + + rc = of_property_read_u32_array(pdev->dev.of_node, + "qcom,bus-range-kbps", bus_ranges, + num_buses * 2); + if (rc) { + d_vpr_e( + "Failed to read bus ranges: defaulting to <0 INT_MAX>\n"); + for (c = 0; c < num_buses; c++) { + bus_ranges[c * 2] = 0; + bus_ranges[c * 2 + 1] = INT_MAX; + } + } + + buses->bus_tbl = devm_kzalloc(&pdev->dev, num_buses * + sizeof(*buses->bus_tbl), GFP_KERNEL); + if (!buses->bus_tbl) { + d_vpr_e("No memory for bus table\n"); + rc = -ENOMEM; + goto exit; + } + + for (c = 0; c < num_buses; c++) { + struct bus_info *bus = &res->bus_set.bus_tbl[c]; + + of_property_read_string_index(pdev->dev.of_node, + "interconnect-names", c, &bus->name); + + bus->dev = &pdev->dev; + bus->range[0] = bus_ranges[c * 2]; + bus->range[1] = bus_ranges[c * 2 + 1]; + + d_vpr_h("Found bus %s\n", bus->name); + } + +exit: + kfree(bus_ranges); + return rc; +} + +static int msm_vidc_load_buffer_usage_table( + struct msm_vidc_platform_resources *res) +{ + int rc = 0; + struct platform_device *pdev = res->pdev; + struct buffer_usage_set *buffer_usage_set = &res->buffer_usage_set; + + if (!of_find_property(pdev->dev.of_node, + "qcom,buffer-type-tz-usage-table", NULL)) { + /* + * qcom,buffer-type-tz-usage-table is an optional property. It + * likely won't be present if the core doesn't support content + * protection + */ + d_vpr_h("buffer-type-tz-usage-table not found\n"); + return 0; + } + + buffer_usage_set->count = get_u32_array_num_elements( + pdev->dev.of_node, "qcom,buffer-type-tz-usage-table"); + buffer_usage_set->count /= + sizeof(*buffer_usage_set->buffer_usage_tbl) / sizeof(u32); + if (!buffer_usage_set->count) { + d_vpr_h("no elements in buffer usage set\n"); + return 0; + } + + buffer_usage_set->buffer_usage_tbl = devm_kzalloc(&pdev->dev, + buffer_usage_set->count * + sizeof(*buffer_usage_set->buffer_usage_tbl), + GFP_KERNEL); + if (!buffer_usage_set->buffer_usage_tbl) { + d_vpr_e("%s: Failed to alloc buffer usage table\n", + __func__); + rc = -ENOMEM; + goto err_load_buf_usage; + } + + rc = of_property_read_u32_array(pdev->dev.of_node, + "qcom,buffer-type-tz-usage-table", + (u32 *)buffer_usage_set->buffer_usage_tbl, + buffer_usage_set->count * + sizeof(*buffer_usage_set->buffer_usage_tbl) / sizeof(u32)); + if (rc) { + d_vpr_e("Failed to read buffer usage table\n"); + goto err_load_buf_usage; + } + + return 0; +err_load_buf_usage: + msm_vidc_free_buffer_usage_table(res); + return rc; +} + +static int msm_vidc_load_regulator_table( + struct msm_vidc_platform_resources *res) +{ + int rc = 0; + struct platform_device *pdev = res->pdev; + struct regulator_set *regulators = &res->regulator_set; + struct device_node *domains_parent_node = NULL; + struct property *domains_property = NULL; + int reg_count = 0; + + regulators->count = 0; + regulators->regulator_tbl = NULL; + + domains_parent_node = pdev->dev.of_node; + for_each_property_of_node(domains_parent_node, domains_property) { + const char *search_string = "-supply"; + char *supply; + bool matched = false; + + /* check if current property is possibly a regulator */ + supply = strnstr(domains_property->name, search_string, + strlen(domains_property->name) + 1); + matched = supply && (*(supply + strlen(search_string)) == '\0'); + if (!matched) + continue; + + reg_count++; + } + + regulators->regulator_tbl = devm_kzalloc(&pdev->dev, + sizeof(*regulators->regulator_tbl) * + reg_count, GFP_KERNEL); + + if (!regulators->regulator_tbl) { + rc = -ENOMEM; + d_vpr_e("Failed to alloc memory for regulator table\n"); + goto err_reg_tbl_alloc; + } + + for_each_property_of_node(domains_parent_node, domains_property) { + const char *search_string = "-supply"; + char *supply; + bool matched = false; + struct device_node *regulator_node = NULL; + struct regulator_info *rinfo = NULL; + + /* check if current property is possibly a regulator */ + supply = strnstr(domains_property->name, search_string, + strlen(domains_property->name) + 1); + matched = supply && (supply[strlen(search_string)] == '\0'); + if (!matched) + continue; + + /* make sure prop isn't being misused */ + regulator_node = of_parse_phandle(domains_parent_node, + domains_property->name, 0); + if (IS_ERR(regulator_node)) { + d_vpr_e("%s is not a phandle\n", + domains_property->name); + continue; + } + regulators->count++; + + /* populate regulator info */ + rinfo = ®ulators->regulator_tbl[regulators->count - 1]; + rinfo->name = devm_kzalloc(&pdev->dev, + (supply - domains_property->name) + 1, GFP_KERNEL); + if (!rinfo->name) { + rc = -ENOMEM; + d_vpr_e("Failed to alloc memory for regulator name\n"); + goto err_reg_name_alloc; + } + strlcpy(rinfo->name, domains_property->name, + (supply - domains_property->name) + 1); + + rinfo->has_hw_power_collapse = of_property_read_bool( + regulator_node, "qcom,support-hw-trigger"); + + d_vpr_h("Found regulator %s: h/w collapse = %s\n", + rinfo->name, + rinfo->has_hw_power_collapse ? "yes" : "no"); + } + + if (!regulators->count) + d_vpr_h("No regulators found"); + + return 0; + +err_reg_name_alloc: +err_reg_tbl_alloc: + msm_vidc_free_regulator_table(res); + return rc; +} + +static int msm_vidc_load_clock_table( + struct msm_vidc_platform_resources *res) +{ + int rc = 0, num_clocks = 0, c = 0; + struct platform_device *pdev = res->pdev; + int *clock_props = NULL; + struct clock_set *clocks = &res->clock_set; + + num_clocks = of_property_count_strings(pdev->dev.of_node, + "clock-names"); + if (num_clocks <= 0) { + d_vpr_h("No clocks found\n"); + clocks->count = 0; + rc = 0; + goto err_load_clk_table_fail; + } + + clock_props = devm_kzalloc(&pdev->dev, num_clocks * + sizeof(*clock_props), GFP_KERNEL); + if (!clock_props) { + d_vpr_e("No memory to read clock properties\n"); + rc = -ENOMEM; + goto err_load_clk_table_fail; + } + + rc = of_property_read_u32_array(pdev->dev.of_node, + "qcom,clock-configs", clock_props, + num_clocks); + if (rc) { + d_vpr_e("Failed to read clock properties: %d\n", rc); + goto err_load_clk_prop_fail; + } + + clocks->clock_tbl = devm_kzalloc(&pdev->dev, sizeof(*clocks->clock_tbl) + * num_clocks, GFP_KERNEL); + if (!clocks->clock_tbl) { + d_vpr_e("Failed to allocate memory for clock tbl\n"); + rc = -ENOMEM; + goto err_load_clk_prop_fail; + } + + clocks->count = num_clocks; + d_vpr_h("Found %d clocks\n", num_clocks); + + for (c = 0; c < num_clocks; ++c) { + struct clock_info *vc = &res->clock_set.clock_tbl[c]; + + of_property_read_string_index(pdev->dev.of_node, + "clock-names", c, &vc->name); + + if (clock_props[c] & CLOCK_PROP_HAS_SCALING) { + vc->has_scaling = true; + } else { + vc->has_scaling = false; + } + + if (clock_props[c] & CLOCK_PROP_HAS_MEM_RETENTION) + vc->has_mem_retention = true; + else + vc->has_mem_retention = false; + + d_vpr_h("Found clock %s: scale-able = %s\n", vc->name, + vc->has_scaling ? "yes" : "no"); + } + + + return 0; + +err_load_clk_prop_fail: +err_load_clk_table_fail: + return rc; +} + +static int msm_vidc_load_reset_table( + struct msm_vidc_platform_resources *res) +{ + struct platform_device *pdev = res->pdev; + struct reset_set *rst = &res->reset_set; + int num_clocks = 0, c = 0; + + num_clocks = of_property_count_strings(pdev->dev.of_node, + "reset-names"); + if (num_clocks <= 0) { + d_vpr_h("No reset clocks found\n"); + rst->count = 0; + return 0; + } + + rst->reset_tbl = devm_kcalloc(&pdev->dev, num_clocks, + sizeof(*rst->reset_tbl), GFP_KERNEL); + if (!rst->reset_tbl) + return -ENOMEM; + + rst->count = num_clocks; + d_vpr_h("Found %d reset clocks\n", num_clocks); + + for (c = 0; c < num_clocks; ++c) { + struct reset_info *rc = &res->reset_set.reset_tbl[c]; + + of_property_read_string_index(pdev->dev.of_node, + "reset-names", c, &rc->name); + } + + return 0; +} + +static int find_key_value(struct msm_vidc_platform_data *platform_data, + const char *key) +{ + int i = 0; + struct msm_vidc_common_data *common_data = platform_data->common_data; + int size = platform_data->common_data_length; + + for (i = 0; i < size; i++) { + if (!strcmp(common_data[i].key, key)) + return common_data[i].value; + } + return 0; +} + +int read_platform_resources_from_drv_data( + struct msm_vidc_core *core) +{ + struct msm_vidc_platform_data *platform_data; + struct msm_vidc_platform_resources *res; + int rc = 0; + + if (!core || !core->platform_data) { + d_vpr_e("%s: Invalid data\n", __func__); + return -ENOENT; + } + platform_data = core->platform_data; + res = &core->resources; + + res->codecs = platform_data->codecs; + res->codecs_count = platform_data->codecs_count; + res->codec_caps = platform_data->codec_caps; + res->codec_caps_count = platform_data->codec_caps_count; + res->codec_data_count = platform_data->codec_data_length; + res->codec_data = platform_data->codec_data; + res->allowed_clks_tbl = platform_data->clock_data; + res->allowed_clks_tbl_size = platform_data->clock_data_length; + res->vpss_caps = platform_data->vpss_caps; + res->vpss_caps_count = platform_data->vpss_caps_count; + + res->sku_version = platform_data->sku_version; + res->mem_limit_tbl = memory_limit_tbl_mbytes; + res->memory_limit_table_size = + ARRAY_SIZE(memory_limit_tbl_mbytes); + + res->max_load = find_key_value(platform_data, + "qcom,max-hw-load"); + + res->max_image_load = find_key_value(platform_data, + "qcom,max-image-load"); + + res->max_mbpf = find_key_value(platform_data, + "qcom,max-mbpf"); + + res->max_hq_mbs_per_frame = find_key_value(platform_data, + "qcom,max-hq-mbs-per-frame"); + + res->max_hq_mbs_per_sec = find_key_value(platform_data, + "qcom,max-hq-mbs-per-sec"); + + res->max_bframe_mbs_per_frame = find_key_value(platform_data, + "qcom,max-b-frame-mbs-per-frame"); + + res->max_bframe_mbs_per_sec = find_key_value(platform_data, + "qcom,max-b-frame-mbs-per-sec"); + + res->sw_power_collapsible = find_key_value(platform_data, + "qcom,sw-power-collapse"); + + res->never_unload_fw = find_key_value(platform_data, + "qcom,never-unload-fw"); + + res->debug_timeout = find_key_value(platform_data, + "qcom,debug-timeout"); + + res->max_secure_inst_count = find_key_value(platform_data, + "qcom,max-secure-instances"); + + res->prefetch_pix_buf_count = find_key_value(platform_data, + "qcom,prefetch_pix_buf_count"); + res->prefetch_pix_buf_size = find_key_value(platform_data, + "qcom,prefetch_pix_buf_size"); + res->prefetch_non_pix_buf_count = find_key_value(platform_data, + "qcom,prefetch_non_pix_buf_count"); + res->prefetch_non_pix_buf_size = find_key_value(platform_data, + "qcom,prefetch_non_pix_buf_size"); + + res->slave_side_cp = find_key_value(platform_data, + "qcom,slave-side-cp"); + res->thermal_mitigable = find_key_value(platform_data, + "qcom,enable-thermal-mitigation"); + res->msm_vidc_pwr_collapse_delay = find_key_value(platform_data, + "qcom,power-collapse-delay"); + res->msm_vidc_firmware_unload_delay = find_key_value(platform_data, + "qcom,fw-unload-delay"); + res->msm_vidc_hw_rsp_timeout = find_key_value(platform_data, + "qcom,hw-resp-timeout"); + res->non_fatal_pagefaults = find_key_value(platform_data, + "qcom,domain-attr-non-fatal-faults"); + res->cache_pagetables = find_key_value(platform_data, + "qcom,domain-attr-cache-pagetables"); + res->decode_batching = find_key_value(platform_data, + "qcom,decode-batching"); + res->batch_timeout = find_key_value(platform_data, + "qcom,batch-timeout"); + res->dcvs = find_key_value(platform_data, + "qcom,dcvs"); + res->fw_cycles = find_key_value(platform_data, + "qcom,fw-cycles"); + res->fw_vpp_cycles = find_key_value(platform_data, + "qcom,fw-vpp-cycles"); + res->avsync_window_size = find_key_value(platform_data, + "qcom,avsync-window-size"); + res->ubwc_stats_in_fbd = find_key_value(platform_data, + "qcom,ubwc_stats_in_fbd"); + res->has_vpp_delay = find_key_value(platform_data, + "qcom,vpp_delay_supported"); + res->enc_auto_dynamic_fps = find_key_value(platform_data, + "qcom,enc_auto_dynamic_fps"); + res->no_cvp = find_key_value(platform_data, + "qcom,no-cvp"); + + res->csc_coeff_data = &platform_data->csc_data; + + res->vpu_ver = platform_data->vpu_ver; + res->ubwc_config = platform_data->ubwc_config; + res->max_inst_count = platform_data->max_inst_count; + + return rc; + +} + +static int msm_vidc_populate_cx_ipeak_context( + struct msm_vidc_platform_resources *res) +{ + struct platform_device *pdev = res->pdev; + int rc = 0; + + if (of_find_property(pdev->dev.of_node, + "qcom,cx-ipeak-data", NULL)) + res->cx_ipeak_context = cx_ipeak_register( + pdev->dev.of_node, "qcom,cx-ipeak-data"); + else + return rc; + + if (IS_ERR(res->cx_ipeak_context)) { + rc = PTR_ERR(res->cx_ipeak_context); + if (rc == -EPROBE_DEFER) + d_vpr_h("cx-ipeak register failed. Deferring probe!"); + else + d_vpr_e("cx-ipeak register failed. rc: %d", rc); + + res->cx_ipeak_context = NULL; + return rc; + } + + if (res->cx_ipeak_context) + d_vpr_h("cx-ipeak register successful"); + else + d_vpr_h("cx-ipeak register not implemented"); + + of_property_read_u32(pdev->dev.of_node, + "qcom,clock-freq-threshold", + &res->clk_freq_threshold); + d_vpr_h("cx ipeak threshold frequency = %u\n", + res->clk_freq_threshold); + + return rc; +} + +int read_platform_resources_from_dt( + struct msm_vidc_platform_resources *res) +{ + struct platform_device *pdev = res->pdev; + struct resource *kres = NULL; + int rc = 0; + uint32_t firmware_base = 0; + + if (!pdev->dev.of_node) { + d_vpr_e("DT node not found\n"); + return -ENOENT; + } + + INIT_LIST_HEAD(&res->context_banks); + + res->firmware_base = (phys_addr_t)firmware_base; + + kres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + res->register_base = kres ? kres->start : -1; + res->register_size = kres ? (kres->end + 1 - kres->start) : -1; + + kres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + res->irq = kres ? kres->start : -1; + + rc = msm_vidc_load_fw_name(res); + if (rc) + d_vpr_e("%s: failed to load fw name, rc %d, using default fw\n", + __func__, rc); + + rc = msm_vidc_load_subcache_info(res); + if (rc) + d_vpr_e("Failed to load subcache info: %d\n", rc); + + rc = msm_vidc_load_qdss_table(res); + if (rc) + d_vpr_e("Failed to load qdss reg table: %d\n", rc); + + rc = msm_vidc_load_reg_table(res); + if (rc) { + d_vpr_e("Failed to load reg table: %d\n", rc); + goto err_load_reg_table; + } + + rc = msm_vidc_load_buffer_usage_table(res); + if (rc) { + d_vpr_e("Failed to load buffer usage table: %d\n", rc); + goto err_load_buffer_usage_table; + } + + rc = msm_vidc_load_regulator_table(res); + if (rc) { + d_vpr_e("Failed to load list of regulators %d\n", rc); + goto err_load_regulator_table; + } + + rc = msm_vidc_load_bus_table(res); + if (rc) { + d_vpr_e("Failed to load bus table: %d\n", rc); + goto err_load_bus_table; + } + + rc = msm_vidc_load_clock_table(res); + if (rc) { + d_vpr_e("Failed to load clock table: %d\n", rc); + goto err_load_clock_table; + } + + rc = msm_vidc_load_allowed_clocks_table(res); + if (rc) { + d_vpr_e("Failed to load allowed clocks table: %d\n", rc); + goto err_load_allowed_clocks_table; + } + + rc = msm_vidc_load_reset_table(res); + if (rc) { + d_vpr_e("Failed to load reset table: %d\n", rc); + goto err_load_reset_table; + } + + rc = msm_vidc_populate_legacy_context_bank(res); + if (rc) { + d_vpr_e("Failed to setup context banks %d\n", rc); + goto err_setup_legacy_cb; + } + + rc = msm_vidc_populate_cx_ipeak_context(res); + if (rc) { + d_vpr_e("Failed to setup cx-ipeak %d\n", rc); + goto err_register_cx_ipeak; + } + +return rc; + +err_register_cx_ipeak: + msm_vidc_free_cx_ipeak_context(res); +err_setup_legacy_cb: +err_load_reset_table: + msm_vidc_free_allowed_clocks_table(res); +err_load_allowed_clocks_table: + msm_vidc_free_clock_table(res); +err_load_clock_table: + msm_vidc_free_bus_table(res); +err_load_bus_table: + msm_vidc_free_regulator_table(res); +err_load_regulator_table: + msm_vidc_free_buffer_usage_table(res); +err_load_buffer_usage_table: + msm_vidc_free_reg_table(res); +err_load_reg_table: + return rc; +} + +static int msm_vidc_setup_context_bank(struct msm_vidc_platform_resources *res, + struct context_bank_info *cb, struct device *dev) +{ + int rc = 0; + struct bus_type *bus; + + if (!dev || !cb || !res) { + d_vpr_e("%s: Invalid Input params\n", __func__); + return -EINVAL; + } + cb->dev = dev; + + bus = cb->dev->bus; + if (IS_ERR_OR_NULL(bus)) { + d_vpr_e("%s: failed to get bus type\n", __func__); + rc = PTR_ERR(bus) ? PTR_ERR(bus) : -ENODEV; + goto remove_cb; + } + + cb->domain = iommu_get_domain_for_dev(cb->dev); + + /* + * When memory is fragmented, below configuration increases the + * possibility to get a mapping for buffer in the configured CB. + */ + iommu_dma_enable_best_fit_algo(cb->dev); + + /* + * configure device segment size and segment boundary to ensure + * iommu mapping returns one mapping (which is required for partial + * cache operations) + */ + if (!dev->dma_parms) + dev->dma_parms = + devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64)); + + d_vpr_h("Attached %s and created mapping\n", dev_name(dev)); + d_vpr_h( + "Context bank: %s, buffer_type: %#x, is_secure: %d, address range start: %#x, size: %#x, dev: %pK, domain: %pK", + cb->name, cb->buffer_type, cb->is_secure, cb->addr_range.start, + cb->addr_range.size, cb->dev, cb->domain); + +remove_cb: + return rc; +} + +int msm_vidc_smmu_fault_handler(struct iommu_domain *domain, + struct device *dev, unsigned long iova, int flags, void *token) +{ + struct msm_vidc_core *core = token; + + if (!domain || !core) { + d_vpr_e("%s: invalid params %pK %pK\n", + __func__, domain, core); + return -EINVAL; + } + + if (core->smmu_fault_handled) { + if (core->resources.non_fatal_pagefaults) { + dprintk_ratelimit(VIDC_ERR, + "%s: non-fatal pagefault address: %lx\n", + __func__, iova); + return 0; + } + } + + d_vpr_e("%s: faulting address: %lx\n", __func__, iova); + + core->smmu_fault_handled = true; + msm_comm_print_insts_info(core); + /* + * Return -EINVAL to elicit the default behaviour of smmu driver. + * If we return -EINVAL, then smmu driver assumes page fault handler + * is not installed and prints a list of useful debug information like + * FAR, SID etc. This information is not printed if we return 0. + */ + return -EINVAL; +} + +static int msm_vidc_populate_context_bank(struct device *dev, + struct msm_vidc_core *core) +{ + int rc = 0; + struct context_bank_info *cb = NULL; + struct device_node *np = NULL; + + if (!dev || !core) { + d_vpr_e("%s: invalid inputs\n", __func__); + return -EINVAL; + } + + np = dev->of_node; + cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL); + if (!cb) { + d_vpr_e("%s: Failed to allocate cb\n", __func__); + return -ENOMEM; + } + + INIT_LIST_HEAD(&cb->list); + + mutex_lock(&core->resources.cb_lock); + list_add_tail(&cb->list, &core->resources.context_banks); + mutex_unlock(&core->resources.cb_lock); + + rc = of_property_read_string(np, "label", &cb->name); + if (rc) { + d_vpr_h("Failed to read cb label from device tree\n"); + rc = 0; + } + + d_vpr_h("%s: context bank has name %s\n", __func__, cb->name); + rc = of_property_read_u32_array(np, "virtual-addr-pool", + (u32 *)&cb->addr_range, 2); + if (rc) { + d_vpr_e("Could not read addr pool: context bank: %s %d\n", + cb->name, rc); + goto err_setup_cb; + } + + cb->is_secure = of_property_read_bool(np, "qcom,secure-context-bank"); + d_vpr_h("context bank %s: secure = %d\n", + cb->name, cb->is_secure); + + /* setup buffer type for each sub device*/ + rc = of_property_read_u32(np, "buffer-types", &cb->buffer_type); + if (rc) { + d_vpr_e("failed to load buffer_type info %d\n", rc); + rc = -ENOENT; + goto err_setup_cb; + } + d_vpr_h("context bank %s address start %x size %x buffer_type %x\n", + cb->name, cb->addr_range.start, + cb->addr_range.size, cb->buffer_type); + + rc = msm_vidc_setup_context_bank(&core->resources, cb, dev); + if (rc) { + d_vpr_e("Cannot setup context bank %d\n", rc); + goto err_setup_cb; + } + + iommu_set_fault_handler(cb->domain, + msm_vidc_smmu_fault_handler, (void *)core); + + return 0; + +err_setup_cb: + list_del(&cb->list); + return rc; +} + +static int msm_vidc_populate_legacy_context_bank( + struct msm_vidc_platform_resources *res) +{ + int rc = 0; + struct platform_device *pdev = NULL; + struct device_node *domains_parent_node = NULL; + struct device_node *domains_child_node = NULL; + struct device_node *ctx_node = NULL; + struct context_bank_info *cb; + + if (!res || !res->pdev) { + d_vpr_e("%s: invalid inputs\n", __func__); + return -EINVAL; + } + pdev = res->pdev; + + domains_parent_node = of_find_node_by_name(pdev->dev.of_node, + "qcom,vidc-iommu-domains"); + if (!domains_parent_node) { + d_vpr_h("%s: legacy iommu domains not present\n", __func__); + return 0; + } + + /* set up each context bank for legacy DT bindings*/ + for_each_child_of_node(domains_parent_node, + domains_child_node) { + cb = devm_kzalloc(&pdev->dev, sizeof(*cb), GFP_KERNEL); + if (!cb) { + d_vpr_e("%s: Failed to allocate cb\n", __func__); + return -ENOMEM; + } + INIT_LIST_HEAD(&cb->list); + + mutex_lock(&res->cb_lock); + list_add_tail(&cb->list, &res->context_banks); + mutex_unlock(&res->cb_lock); + + ctx_node = of_parse_phandle(domains_child_node, + "qcom,vidc-domain-phandle", 0); + if (!ctx_node) { + d_vpr_e("%s: Unable to parse pHandle\n", __func__); + rc = -EBADHANDLE; + goto err_setup_cb; + } + + rc = of_property_read_string(ctx_node, "label", &(cb->name)); + if (rc) { + d_vpr_e("%s: Could not find label\n", __func__); + goto err_setup_cb; + } + + rc = of_property_read_u32_array(ctx_node, + "qcom,virtual-addr-pool", (u32 *)&cb->addr_range, 2); + if (rc) { + d_vpr_e("%s: Could not read addr pool: %s (%d)\n", + __func__, cb->name, rc); + goto err_setup_cb; + } + + cb->is_secure = + of_property_read_bool(ctx_node, "qcom,secure-domain"); + + rc = of_property_read_u32(domains_child_node, + "qcom,vidc-buffer-types", &cb->buffer_type); + if (rc) { + d_vpr_e("%s: Could not read buffer type (%d)\n", + __func__, rc); + goto err_setup_cb; + } + + cb->dev = msm_iommu_get_ctx(cb->name); + if (IS_ERR_OR_NULL(cb->dev)) { + d_vpr_e("%s: could not get device for cb %s\n", + __func__, cb->name); + rc = -ENOENT; + goto err_setup_cb; + } + + rc = msm_vidc_setup_context_bank(res, cb, cb->dev); + if (rc) { + d_vpr_e("Cannot setup context bank %d\n", rc); + goto err_setup_cb; + } + d_vpr_h( + "context bank %s secure %d addr start = %#x size = %#x buffer_type = %#x\n", + cb->name, cb->is_secure, cb->addr_range.start, + cb->addr_range.size, cb->buffer_type); + } + return rc; + +err_setup_cb: + list_del(&cb->list); + return rc; +} + +int read_context_bank_resources_from_dt(struct platform_device *pdev) +{ + struct msm_vidc_core *core; + int rc = 0; + + if (!pdev) { + d_vpr_e("Invalid platform device\n"); + return -EINVAL; + } else if (!pdev->dev.parent) { + d_vpr_e("Failed to find a parent for %s\n", + dev_name(&pdev->dev)); + return -ENODEV; + } + + core = dev_get_drvdata(pdev->dev.parent); + if (!core) { + d_vpr_e("Failed to find cookie in parent device %s", + dev_name(pdev->dev.parent)); + return -EINVAL; + } + + rc = msm_vidc_populate_context_bank(&pdev->dev, core); + if (rc) + d_vpr_e("Failed to probe context bank\n"); + else + d_vpr_h("Successfully probed context bank\n"); + + return rc; +} + +int read_mem_cdsp_resources_from_dt(struct platform_device *pdev) +{ + struct msm_vidc_core *core; + + if (!pdev) { + d_vpr_e("%s: invalid platform device\n", __func__); + return -EINVAL; + } else if (!pdev->dev.parent) { + d_vpr_e("Failed to find a parent for %s\n", + dev_name(&pdev->dev)); + return -ENODEV; + } + + core = dev_get_drvdata(pdev->dev.parent); + if (!core) { + d_vpr_e("Failed to find cookie in parent device %s", + dev_name(pdev->dev.parent)); + return -EINVAL; + } + + return msm_vidc_populate_mem_cdsp(&pdev->dev, &core->resources); +} diff --git a/techpack/video/msm/vidc/msm_vidc_res_parse.h b/techpack/video/msm/vidc/msm_vidc_res_parse.h new file mode 100644 index 000000000000..5254d2900c07 --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_res_parse.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef DT_PARSE +#define DT_PARSE +#include +#include "msm_vidc_resources.h" +#include "msm_vidc_common.h" +void msm_vidc_free_platform_resources( + struct msm_vidc_platform_resources *res); + +int read_hfi_type(struct platform_device *pdev); + +int read_platform_resources_from_drv_data( + struct msm_vidc_core *core); +int read_platform_resources_from_dt( + struct msm_vidc_platform_resources *res); + +int read_context_bank_resources_from_dt(struct platform_device *pdev); + +int read_bus_resources_from_dt(struct platform_device *pdev); +int read_mem_cdsp_resources_from_dt(struct platform_device *pdev); + +int msm_vidc_load_u32_table(struct platform_device *pdev, + struct device_node *of_node, char *table_name, int struct_size, + u32 **table, u32 *num_elements); + +#endif diff --git a/techpack/video/msm/vidc/msm_vidc_resources.h b/techpack/video/msm/vidc/msm_vidc_resources.h new file mode 100644 index 000000000000..ac6dd6eca9ec --- /dev/null +++ b/techpack/video/msm/vidc/msm_vidc_resources.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __MSM_VIDC_RESOURCES_H__ +#define __MSM_VIDC_RESOURCES_H__ + +#include +#include "msm_vidc.h" +#include +#include + +#define MAX_BUFFER_TYPES 32 + +struct reg_value_pair { + u32 reg; + u32 value; + u32 mask; +}; + +struct reg_set { + struct reg_value_pair *reg_tbl; + int count; +}; + +struct addr_range { + u32 start; + u32 size; +}; + +struct addr_set { + struct addr_range *addr_tbl; + int count; +}; + +struct context_bank_info { + struct list_head list; + const char *name; + u32 buffer_type; + bool is_secure; + struct addr_range addr_range; + struct device *dev; + struct iommu_domain *domain; +}; + +struct buffer_usage_table { + u32 buffer_type; + u32 tz_usage; +}; + +struct buffer_usage_set { + struct buffer_usage_table *buffer_usage_tbl; + u32 count; +}; + +struct regulator_info { + struct regulator *regulator; + bool has_hw_power_collapse; + char *name; +}; + +struct regulator_set { + struct regulator_info *regulator_tbl; + u32 count; +}; + +struct clock_info { + const char *name; + struct clk *clk; + u32 count; + bool has_scaling; + bool has_mem_retention; +}; + +struct clock_set { + struct clock_info *clock_tbl; + u32 count; +}; + +struct bus_info { + const char *name; + unsigned int range[2]; + struct device *dev; + struct icc_path *path; +}; + +struct bus_set { + struct bus_info *bus_tbl; + u32 count; +}; + +struct reset_info { + struct reset_control *rst; + const char *name; +}; + +struct reset_set { + struct reset_info *reset_tbl; + u32 count; +}; + +struct allowed_clock_rates_table { + u32 clock_rate; +}; + +struct memory_limit_table { + u32 ddr_size; /* mega bytes */ + u32 mem_limit; /* mega bytes */ +}; + +struct clock_profile_entry { + u32 codec_mask; + u32 vpp_cycles; + u32 vsp_cycles; + u32 low_power_cycles; +}; + +struct clock_freq_table { + struct clock_profile_entry *clk_prof_entries; + u32 count; +}; + +struct subcache_info { + const char *name; + bool isactive; + bool isset; + struct llcc_slice_desc *subcache; +}; + +struct subcache_set { + struct subcache_info *subcache_tbl; + u32 count; +}; + +struct msm_vidc_mem_cdsp { + struct device *dev; +}; + +struct msm_vidc_platform_resources { + phys_addr_t firmware_base; + phys_addr_t register_base; + uint32_t register_size; + uint32_t irq; + uint32_t sku_version; + struct allowed_clock_rates_table *allowed_clks_tbl; + u32 allowed_clks_tbl_size; + struct clock_freq_table clock_freq_tbl; + struct memory_limit_table *mem_limit_tbl; + u32 memory_limit_table_size; + bool sys_cache_present; + bool sys_cache_res_set; + struct subcache_set subcache_set; + struct reg_set reg_set; + struct addr_set qdss_addr_set; + struct buffer_usage_set buffer_usage_set; + uint32_t max_load; + uint32_t max_image_load; + uint32_t max_mbpf; + uint32_t max_hq_mbs_per_frame; + uint32_t max_hq_mbs_per_sec; + uint32_t max_bframe_mbs_per_frame; + uint32_t max_bframe_mbs_per_sec; + struct platform_device *pdev; + struct regulator_set regulator_set; + struct clock_set clock_set; + struct bus_set bus_set; + struct reset_set reset_set; + bool sw_power_collapsible; + bool slave_side_cp; + struct list_head context_banks; + struct mutex cb_lock; + bool thermal_mitigable; + const char *fw_name; + const char *hfi_version; + bool never_unload_fw; + bool debug_timeout; + uint32_t max_inst_count; + uint32_t max_secure_inst_count; + uint32_t prefetch_pix_buf_count; + uint32_t prefetch_pix_buf_size; + uint32_t prefetch_non_pix_buf_count; + uint32_t prefetch_non_pix_buf_size; + int msm_vidc_hw_rsp_timeout; + int msm_vidc_firmware_unload_delay; + uint32_t msm_vidc_pwr_collapse_delay; + bool non_fatal_pagefaults; + bool cache_pagetables; + bool decode_batching; + uint32_t batch_timeout; + bool dcvs; + struct msm_vidc_codec_data *codec_data; + int codec_data_count; + struct msm_vidc_codec *codecs; + uint32_t codecs_count; + struct msm_vidc_codec_capability *codec_caps; + uint32_t codec_caps_count; + struct msm_vidc_vpss_capability *vpss_caps; + uint32_t vpss_caps_count; + struct msm_vidc_csc_coeff *csc_coeff_data; + struct msm_vidc_mem_cdsp mem_cdsp; + uint32_t vpu_ver; + uint32_t fw_cycles; + uint32_t fw_vpp_cycles; + uint32_t avsync_window_size; + struct msm_vidc_ubwc_config_data *ubwc_config; + uint32_t clk_freq_threshold; + struct cx_ipeak_client *cx_ipeak_context; + uint32_t ubwc_stats_in_fbd; + uint32_t has_vpp_delay; + bool enc_auto_dynamic_fps; + bool no_cvp; +}; + +static inline bool is_iommu_present(struct msm_vidc_platform_resources *res) +{ + return !list_empty(&res->context_banks); +} + +#endif + diff --git a/techpack/video/msm/vidc/vidc_hfi.c b/techpack/video/msm/vidc/vidc_hfi.c new file mode 100644 index 000000000000..959ac5e1fe53 --- /dev/null +++ b/techpack/video/msm/vidc/vidc_hfi.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + */ +#include "msm_vidc_debug.h" +#include "vidc_hfi_api.h" +#include "hfi_common.h" + +struct hfi_device *vidc_hfi_initialize(enum msm_vidc_hfi_type hfi_type, + u32 device_id, struct msm_vidc_platform_resources *res, + hfi_cmd_response_callback callback) +{ + struct hfi_device *hdev = NULL; + int rc = 0; + + hdev = kzalloc(sizeof(struct hfi_device), GFP_KERNEL); + if (!hdev) { + d_vpr_e("%s: failed to allocate hdev\n", __func__); + return NULL; + } + + switch (hfi_type) { + case VIDC_HFI_VENUS: + rc = venus_hfi_initialize(hdev, device_id, res, callback); + break; + default: + d_vpr_e("Unsupported host-firmware interface\n"); + goto err_hfi_init; + } + + if (rc) { + if (rc != -EPROBE_DEFER) + d_vpr_e("%s: device init failed rc = %d", + __func__, rc); + goto err_hfi_init; + } + + return hdev; + +err_hfi_init: + kfree(hdev); + return ERR_PTR(rc); +} + +void vidc_hfi_deinitialize(enum msm_vidc_hfi_type hfi_type, + struct hfi_device *hdev) +{ + if (!hdev) { + d_vpr_e("%s: invalid device %pK", __func__, hdev); + return; + } + + switch (hfi_type) { + case VIDC_HFI_VENUS: + venus_hfi_delete_device(hdev->hfi_device_data); + break; + default: + d_vpr_e("Unsupported host-firmware interface\n"); + } + + kfree(hdev); +} + diff --git a/techpack/video/msm/vidc/vidc_hfi.h b/techpack/video/msm/vidc/vidc_hfi.h new file mode 100644 index 000000000000..3ae3d0ffb2ab --- /dev/null +++ b/techpack/video/msm/vidc/vidc_hfi.h @@ -0,0 +1,839 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ +#ifndef __H_VIDC_HFI_H__ +#define __H_VIDC_HFI_H__ + +#include "vidc/media/msm_media_info.h" +#include "vidc_hfi_helper.h" +#include "vidc_hfi_api.h" + +#define HFI_EVENT_SESSION_SEQUENCE_CHANGED (HFI_OX_BASE + 0x3) +#define HFI_EVENT_SESSION_PROPERTY_CHANGED (HFI_OX_BASE + 0x4) +#define HFI_EVENT_SESSION_LTRUSE_FAILED (HFI_OX_BASE + 0x5) +#define HFI_EVENT_RELEASE_BUFFER_REFERENCE (HFI_OX_BASE + 0x6) + +#define HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUFFER_RESOURCES \ + (HFI_OX_BASE + 0x1) +#define HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUFFER_RESOURCES \ + (HFI_OX_BASE + 0x2) + +#define HFI_BUFFERFLAG_EOS 0x00000001 +#define HFI_BUFFERFLAG_STARTTIME 0x00000002 +#define HFI_BUFFERFLAG_DECODEONLY 0x00000004 +#define HFI_BUFFERFLAG_DATACORRUPT 0x00000008 +#define HFI_BUFFERFLAG_ENDOFFRAME 0x00000010 +#define HFI_BUFFERFLAG_SYNCFRAME 0x00000020 +#define HFI_BUFFERFLAG_EXTRADATA 0x00000040 +#define HFI_BUFFERFLAG_CODECCONFIG 0x00000080 +#define HFI_BUFFERFLAG_TIMESTAMPINVALID 0x00000100 +#define HFI_BUFFERFLAG_READONLY 0x00000200 +#define HFI_BUFFERFLAG_ENDOFSUBFRAME 0x00000400 +#define HFI_BUFFERFLAG_EOSEQ 0x00200000 +#define HFI_BUFFER_FLAG_MBAFF 0x08000000 +#define HFI_BUFFERFLAG_VPE_YUV_601_709_CSC_CLAMP \ + 0x10000000 +#define HFI_BUFFERFLAG_DROP_FRAME 0x20000000 +#define HFI_BUFFERFLAG_TEI 0x40000000 +#define HFI_BUFFERFLAG_DISCONTINUITY 0x80000000 +#define HFI_BUFFERFLAG_CVPMETADATA_REPEAT 0x00000800 + + +#define HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING \ + (HFI_OX_BASE + 0x1001) +#define HFI_ERR_SESSION_SAME_STATE_OPERATION \ + (HFI_OX_BASE + 0x1002) +#define HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED \ + (HFI_OX_BASE + 0x1003) +#define HFI_ERR_SESSION_START_CODE_NOT_FOUND \ + (HFI_OX_BASE + 0x1004) + + +#define HFI_BUFFER_MODE_DYNAMIC (HFI_OX_BASE + 0x3) + +#define HFI_FLUSH_INPUT (HFI_OX_BASE + 0x1) +#define HFI_FLUSH_OUTPUT (HFI_OX_BASE + 0x2) +#define HFI_FLUSH_ALL (HFI_OX_BASE + 0x4) + +#define HFI_EXTRADATA_NONE 0x00000000 +#define HFI_EXTRADATA_MB_QUANTIZATION 0x00000001 +#define HFI_EXTRADATA_INTERLACE_VIDEO 0x00000002 +#define HFI_EXTRADATA_TIMESTAMP 0x00000005 +#define HFI_EXTRADATA_S3D_FRAME_PACKING 0x00000006 +#define HFI_EXTRADATA_FRAME_RATE 0x00000007 +#define HFI_EXTRADATA_PANSCAN_WINDOW 0x00000008 +#define HFI_EXTRADATA_RECOVERY_POINT_SEI 0x00000009 +#define HFI_EXTRADATA_MPEG2_SEQDISP 0x0000000D +#define HFI_EXTRADATA_STREAM_USERDATA 0x0000000E +#define HFI_EXTRADATA_FRAME_QP 0x0000000F +#define HFI_EXTRADATA_FRAME_BITS_INFO 0x00000010 +#define HFI_EXTRADATA_VPX_COLORSPACE 0x00000014 +#define HFI_EXTRADATA_UBWC_CR_STAT_INFO 0x00000019 +#define HFI_EXTRADATA_MULTISLICE_INFO 0x7F100000 +#define HFI_EXTRADATA_NUM_CONCEALED_MB 0x7F100001 +#define HFI_EXTRADATA_INDEX 0x7F100002 +#define HFI_EXTRADATA_METADATA_LTR 0x7F100004 +#define HFI_EXTRADATA_METADATA_FILLER 0x7FE00002 + +#define HFI_INDEX_EXTRADATA_INPUT_CROP 0x0700000E +#define HFI_INDEX_EXTRADATA_OUTPUT_CROP 0x0700000F +#define HFI_INDEX_EXTRADATA_ASPECT_RATIO 0x7F100003 + +struct hfi_buffer_alloc_mode { + u32 buffer_type; + u32 buffer_mode; +}; + + +struct hfi_index_extradata_config { + int enable; + u32 index_extra_data_id; +}; + +struct hfi_extradata_header { + u32 size; + u32 version; + u32 port_index; + u32 type; + u32 data_size; + u8 rg_data[1]; +}; + +#define HFI_INTERLACE_FRAME_PROGRESSIVE 0x01 +#define HFI_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST 0x02 +#define HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST 0x04 +#define HFI_INTERLACE_FRAME_TOPFIELDFIRST 0x08 +#define HFI_INTERLACE_FRAME_BOTTOMFIELDFIRST 0x10 +#define HFI_INTERLACE_FRAME_MBAFF 0x20 + +#define HFI_PROPERTY_SYS_OX_START \ + (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x0000) + +#define HFI_PROPERTY_PARAM_OX_START \ + (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000) +#define HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL \ + (HFI_PROPERTY_PARAM_OX_START + 0x001) +#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO \ + (HFI_PROPERTY_PARAM_OX_START + 0x002) +#define HFI_PROPERTY_PARAM_INDEX_EXTRADATA \ + (HFI_PROPERTY_PARAM_OX_START + 0x006) +#define HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA \ + (HFI_PROPERTY_PARAM_OX_START + 0x009) +#define HFI_PROPERTY_PARAM_BUFFER_SIZE_MINIMUM \ + (HFI_PROPERTY_PARAM_OX_START + 0x00C) +#define HFI_PROPERTY_PARAM_SYNC_BASED_INTERRUPT \ + (HFI_PROPERTY_PARAM_OX_START + 0x00E) + +#define HFI_PROPERTY_CONFIG_OX_START \ + (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x02000) +#define HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS \ + (HFI_PROPERTY_CONFIG_OX_START + 0x001) +#define HFI_PROPERTY_CONFIG_REALTIME \ + (HFI_PROPERTY_CONFIG_OX_START + 0x002) +#define HFI_PROPERTY_CONFIG_PRIORITY \ + (HFI_PROPERTY_CONFIG_OX_START + 0x003) +#define HFI_PROPERTY_PARAM_VDEC_OX_START \ + (HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x3000) +#define HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001) +#define HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x003) +#define HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x004) +#define HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x005) +#define HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x006) +#define HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x007) +#define HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO\ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x009) +#define HFI_PROPERTY_PARAM_VDEC_FRAME_RATE_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00A) +#define HFI_PROPERTY_PARAM_VDEC_PANSCAN_WNDW_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00B) +#define HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00C) +#define HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00D) +#define HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00E) +#define HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x013) +#define HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x014) +#define HFI_PROPERTY_PARAM_VDEC_AVC_SESSION_SELECT \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x015) +#define HFI_PROPERTY_PARAM_VDEC_MPEG2_SEQDISP_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x016) +#define HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x017) +#define HFI_PROPERTY_PARAM_VDEC_FRAME_QP_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x018) +#define HFI_PROPERTY_PARAM_VDEC_FRAME_BITS_INFO_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x019) +#define HFI_PROPERTY_PARAM_VUI_DISPLAY_INFO_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x01B) +#define HFI_PROPERTY_PARAM_VDEC_VQZIP_SEI_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001C) +#define HFI_PROPERTY_PARAM_VDEC_VPX_COLORSPACE_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001D) +#define HFI_PROPERTY_PARAM_VDEC_MASTER_DISP_COL_SEI_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001E) +#define HFI_PROPERTY_PARAM_VDEC_CLL_SEI_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001F) +#define HFI_PROPERTY_PARAM_VDEC_COLOUR_REMAPPING_INFO_SEI_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0020) +#define HFI_PROPERTY_PARAM_VDEC_DOWN_SCALAR \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0021) +#define HFI_PROPERTY_PARAM_VDEC_UBWC_CR_STAT_INFO_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0022) +#define HFI_PROPERTY_PARAM_VDEC_HDR10_HIST_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0023) +#define HFI_PROPERTY_PARAM_VDEC_VSP_VPP_DELAY \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0024) +#define HFI_PROPERTY_PARAM_VDEC_SEQCHNG_AT_SYNCFRM \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0025) + +#define HFI_PROPERTY_CONFIG_VDEC_OX_START \ + (HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x4000) +#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING \ + (HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x002) +#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP \ + (HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x003) +#define HFI_PROPERTY_CONFIG_VDEC_ENTROPY \ + (HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x004) + +#define HFI_PROPERTY_PARAM_VENC_OX_START \ + (HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x5000) +#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO \ + (HFI_PROPERTY_PARAM_VENC_OX_START + 0x001) +#define HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL \ + (HFI_PROPERTY_PARAM_VENC_OX_START + 0x002) +#define HFI_PROPERTY_PARAM_VENC_LTR_INFO \ + (HFI_PROPERTY_PARAM_VENC_OX_START + 0x003) +#define HFI_PROPERTY_PARAM_VENC_MBI_DUMPING \ + (HFI_PROPERTY_PARAM_VENC_OX_START + 0x005) +#define HFI_PROPERTY_PARAM_VENC_FRAME_QP_EXTRADATA \ + (HFI_PROPERTY_PARAM_VENC_OX_START + 0x006) +#define HFI_PROPERTY_PARAM_VENC_ROI_QP_EXTRADATA \ + (HFI_PROPERTY_PARAM_VENC_OX_START + 0x008) +#define HFI_PROPERTY_PARAM_VENC_HDR10PLUS_METADATA_EXTRADATA \ + (HFI_PROPERTY_PARAM_VENC_OX_START + 0x00A) +#define HFI_PROPERTY_PARAM_VENC_CVP_METADATA_EXTRADATA \ + (HFI_PROPERTY_PARAM_VENC_OX_START + 0x00B) + +#define HFI_PROPERTY_CONFIG_VENC_OX_START \ + (HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000) +#define HFI_PROPERTY_PARAM_VPE_OX_START \ + (HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x7000) + +#define HFI_PROPERTY_CONFIG_VPE_OX_START \ + (HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x8000) + +struct hfi_batch_info { + u32 input_batch_count; + u32 output_batch_count; +}; + +struct hfi_buffer_count_actual { + u32 buffer_type; + u32 buffer_count_actual; + u32 buffer_count_min_host; +}; + +struct hfi_buffer_size_minimum { + u32 buffer_type; + u32 buffer_size; +}; + +struct hfi_buffer_requirements { + u32 buffer_type; + u32 buffer_size; + u32 buffer_region_size; + u32 buffer_count_min; + u32 buffer_count_min_host; + u32 buffer_count_actual; + u32 contiguous; + u32 buffer_alignment; +}; + +struct hfi_data_payload { + u32 size; + u8 rg_data[1]; +}; + +struct hfi_mb_error_map { + u32 error_map_size; + u8 rg_error_map[1]; +}; + +struct hfi_metadata_pass_through { + int enable; + u32 size; +}; + +struct hfi_multi_view_select { + u32 view_index; +}; + +struct hfi_hybrid_hierp { + u32 layers; +}; + +struct hfi_ssr_payload { + u32 sub_client_id; + u32 test_addr; +}; + +#define HFI_PRIORITY_LOW 10 +#define HFI_PRIOIRTY_MEDIUM 20 +#define HFI_PRIORITY_HIGH 30 + +#define HFI_OUTPUT_ORDER_DISPLAY (HFI_OX_BASE + 0x1) +#define HFI_OUTPUT_ORDER_DECODE (HFI_OX_BASE + 0x2) + +#define HFI_RATE_CONTROL_OFF (HFI_OX_BASE + 0x1) +#define HFI_RATE_CONTROL_VBR_VFR (HFI_OX_BASE + 0x2) +#define HFI_RATE_CONTROL_VBR_CFR (HFI_OX_BASE + 0x3) +#define HFI_RATE_CONTROL_CBR_VFR (HFI_OX_BASE + 0x4) +#define HFI_RATE_CONTROL_CBR_CFR (HFI_OX_BASE + 0x5) +#define HFI_RATE_CONTROL_MBR_CFR (HFI_OX_BASE + 0x6) +#define HFI_RATE_CONTROL_MBR_VFR (HFI_OX_BASE + 0x7) +#define HFI_RATE_CONTROL_CQ (HFI_OX_BASE + 0x8) + + +struct hfi_uncompressed_plane_actual_constraints_info { + u32 buffer_type; + u32 num_planes; + struct hfi_uncompressed_plane_constraints rg_plane_format[1]; +}; + +#define HFI_CMD_SYS_OX_START \ +(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_CMD_START_OFFSET + 0x0000) +#define HFI_CMD_SYS_SESSION_ABORT (HFI_CMD_SYS_OX_START + 0x001) +#define HFI_CMD_SYS_PING (HFI_CMD_SYS_OX_START + 0x002) + + +#define HFI_CMD_SESSION_OX_START \ +(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_CMD_START_OFFSET + 0x1000) +#define HFI_CMD_SESSION_LOAD_RESOURCES (HFI_CMD_SESSION_OX_START + 0x001) +#define HFI_CMD_SESSION_START (HFI_CMD_SESSION_OX_START + 0x002) +#define HFI_CMD_SESSION_STOP (HFI_CMD_SESSION_OX_START + 0x003) +#define HFI_CMD_SESSION_EMPTY_BUFFER (HFI_CMD_SESSION_OX_START + 0x004) +#define HFI_CMD_SESSION_FILL_BUFFER (HFI_CMD_SESSION_OX_START + 0x005) +#define HFI_CMD_SESSION_SUSPEND (HFI_CMD_SESSION_OX_START + 0x006) +#define HFI_CMD_SESSION_RESUME (HFI_CMD_SESSION_OX_START + 0x007) +#define HFI_CMD_SESSION_FLUSH (HFI_CMD_SESSION_OX_START + 0x008) +#define HFI_CMD_SESSION_GET_PROPERTY (HFI_CMD_SESSION_OX_START + 0x009) +#define HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER \ + (HFI_CMD_SESSION_OX_START + 0x00A) +#define HFI_CMD_SESSION_RELEASE_BUFFERS \ + (HFI_CMD_SESSION_OX_START + 0x00B) +#define HFI_CMD_SESSION_RELEASE_RESOURCES \ + (HFI_CMD_SESSION_OX_START + 0x00C) +#define HFI_CMD_SESSION_CONTINUE (HFI_CMD_SESSION_OX_START + 0x00D) +#define HFI_CMD_SESSION_SYNC (HFI_CMD_SESSION_OX_START + 0x00E) + +#define HFI_MSG_SYS_OX_START \ +(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_MSG_START_OFFSET + 0x0000) +#define HFI_MSG_SYS_PING_ACK (HFI_MSG_SYS_OX_START + 0x2) +#define HFI_MSG_SYS_SESSION_ABORT_DONE (HFI_MSG_SYS_OX_START + 0x4) + +#define HFI_MSG_SESSION_OX_START \ +(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_MSG_START_OFFSET + 0x1000) +#define HFI_MSG_SESSION_LOAD_RESOURCES_DONE (HFI_MSG_SESSION_OX_START + 0x1) +#define HFI_MSG_SESSION_START_DONE (HFI_MSG_SESSION_OX_START + 0x2) +#define HFI_MSG_SESSION_STOP_DONE (HFI_MSG_SESSION_OX_START + 0x3) +#define HFI_MSG_SESSION_SUSPEND_DONE (HFI_MSG_SESSION_OX_START + 0x4) +#define HFI_MSG_SESSION_RESUME_DONE (HFI_MSG_SESSION_OX_START + 0x5) +#define HFI_MSG_SESSION_FLUSH_DONE (HFI_MSG_SESSION_OX_START + 0x6) +#define HFI_MSG_SESSION_EMPTY_BUFFER_DONE (HFI_MSG_SESSION_OX_START + 0x7) +#define HFI_MSG_SESSION_FILL_BUFFER_DONE (HFI_MSG_SESSION_OX_START + 0x8) +#define HFI_MSG_SESSION_PROPERTY_INFO (HFI_MSG_SESSION_OX_START + 0x9) +#define HFI_MSG_SESSION_RELEASE_RESOURCES_DONE \ + (HFI_MSG_SESSION_OX_START + 0xA) +#define HFI_MSG_SESSION_RELEASE_BUFFERS_DONE \ + (HFI_MSG_SESSION_OX_START + 0xC) + +#define VIDC_IFACEQ_MAX_PKT_SIZE 1024 +#define VIDC_IFACEQ_MED_PKT_SIZE 768 +#define VIDC_IFACEQ_MIN_PKT_SIZE 8 +#define VIDC_IFACEQ_VAR_SMALL_PKT_SIZE 100 +#define VIDC_IFACEQ_VAR_LARGE_PKT_SIZE 512 +#define VIDC_IFACEQ_VAR_HUGE_PKT_SIZE (1024*12) + + +struct hfi_cmd_sys_session_abort_packet { + u32 size; + u32 packet_type; + u32 sid; +}; + +struct hfi_cmd_sys_ping_packet { + u32 size; + u32 packet_type; + u32 sid; +}; + +struct hfi_cmd_session_load_resources_packet { + u32 size; + u32 packet_type; + u32 sid; +}; + +struct hfi_cmd_session_start_packet { + u32 size; + u32 packet_type; + u32 sid; +}; + +struct hfi_cmd_session_stop_packet { + u32 size; + u32 packet_type; + u32 sid; +}; + +struct hfi_cmd_session_empty_buffer_compressed_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 time_stamp_hi; + u32 time_stamp_lo; + u32 flags; + u32 mark_target; /* not used anywhere */ + u32 mark_data; /* not used anywhere */ + u32 offset; + u32 alloc_len; + u32 filled_len; + u32 input_tag; + u32 packet_buffer; + u32 extra_data_buffer; + u32 rgData[1]; +}; + +struct hfi_cmd_session_empty_buffer_uncompressed_plane0_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 view_id; + u32 time_stamp_hi; + u32 time_stamp_lo; + u32 flags; + u32 mark_target; /* not used anywhere */ + u32 mark_data; /* not used anywhere */ + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 input_tag; + u32 packet_buffer; + u32 extra_data_buffer; + u32 rgData[1]; +}; + +struct hfi_cmd_session_empty_buffer_uncompressed_plane1_packet { + u32 flags; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 packet_buffer2; + u32 rgData[1]; +}; + +struct hfi_cmd_session_empty_buffer_uncompressed_plane2_packet { + u32 flags; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 packet_buffer3; + u32 rgData[1]; +}; + +struct hfi_cmd_session_fill_buffer_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 stream_id; + u32 offset; + u32 alloc_len; + u32 filled_len; + u32 output_tag; + u32 packet_buffer; + u32 extra_data_buffer; + u32 rgData[1]; +}; + +struct hfi_cmd_session_flush_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 flush_type; +}; + +struct hfi_cmd_session_suspend_packet { + u32 size; + u32 packet_type; + u32 sid; +}; + +struct hfi_cmd_session_resume_packet { + u32 size; + u32 packet_type; + u32 sid; +}; + +struct hfi_cmd_session_get_property_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 num_properties; + u32 rg_property_data[1]; +}; + +struct hfi_cmd_session_release_buffer_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 buffer_type; + u32 buffer_size; + u32 extra_data_size; + int response_req; + u32 num_buffers; + u32 rg_buffer_info[1]; +}; + +struct hfi_cmd_session_release_resources_packet { + u32 size; + u32 packet_type; + u32 sid; +}; + +struct hfi_msg_sys_session_abort_done_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 error_type; +}; + +struct hfi_msg_sys_ping_ack_pkt { + u32 size; + u32 packet_type; + u32 sid; +}; + +struct hfi_msg_sys_property_info_packet { + u32 size; + u32 packet_type; + u32 num_properties; + u32 rg_property_data[1]; +}; + +struct hfi_msg_session_load_resources_done_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 error_type; +}; + +struct hfi_msg_session_start_done_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 error_type; +}; + +struct hfi_msg_session_stop_done_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 error_type; +}; + +struct hfi_msg_session_suspend_done_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 error_type; +}; + +struct hfi_msg_session_resume_done_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 error_type; +}; + +struct hfi_msg_session_flush_done_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 error_type; + u32 flush_type; +}; + +struct hfi_ubwc_cr_stats { + u32 is_valid; + u32 worst_compression_ratio; + u32 worst_complexity_number; +}; + +struct hfi_ubwc_cr_stats_info_type { + u32 cr_stats_info0; + u32 cr_stats_info1; + u32 cr_stats_info2; + u32 cr_stats_info3; + u32 cr_stats_info4; + u32 cr_stats_info5; + u32 cr_stats_info6; +}; + +struct hfi_frame_cr_stats_type { + u32 frame_index; + struct hfi_ubwc_cr_stats_info_type ubwc_stats_info; + u32 complexity_number; +}; + +struct hfi_msg_session_empty_buffer_done_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 error_type; + u32 offset; + u32 filled_len; + u32 input_tag; + u32 packet_buffer; + u32 extra_data_buffer; + u32 flags; + struct hfi_frame_cr_stats_type ubwc_cr_stats; + /* no usage of sync_frame flag in EBD, rgData[1] is not used */ + u32 rgData[1]; +}; + +struct hfi_msg_session_fill_buffer_done_compressed_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 time_stamp_hi; + u32 time_stamp_lo; + u32 error_type; + u32 flags; + u32 mark_target; /* not used anywhere */ + u32 mark_data; /* not used anywhere */ + u32 stats; + u32 offset; + u32 alloc_len; + u32 filled_len; + u32 input_tag; + u32 output_tag; + u32 picture_type; + u32 packet_buffer; + u32 extra_data_buffer; + u32 rgData[0]; +}; + +struct hfi_msg_session_fbd_uncompressed_plane0_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 stream_id; + u32 view_id; + u32 error_type; + u32 time_stamp_hi; + u32 time_stamp_lo; + u32 flags; + u32 mark_target; /* not used anywhere */ + u32 mark_data; /* not used anywhere */ + u32 stats; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 frame_width; + u32 frame_height; + u32 start_x_coord; + u32 start_y_coord; + u32 input_tag; + u32 input_tag2; + u32 output_tag; + u32 picture_type; + u32 packet_buffer; + u32 extra_data_buffer; + u32 rgData[0]; +}; + +struct hfi_msg_session_fill_buffer_done_uncompressed_plane1_packet { + u32 flags; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 packet_buffer2; + u32 rgData[0]; +}; + +struct hfi_msg_session_fill_buffer_done_uncompressed_plane2_packet { + u32 flags; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 packet_buffer3; + u32 rgData[0]; +}; + +struct hfi_msg_session_property_info_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 num_properties; + u32 rg_property_data[1]; +}; + +struct hfi_msg_session_release_resources_done_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 error_type; +}; + +struct hfi_msg_session_release_buffers_done_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 error_type; + u32 num_buffers; + u32 rg_buffer_info[1]; +}; + +struct hfi_extradata_mb_quantization_payload { + u8 rg_mb_qp[1]; +}; + +struct hfi_extradata_timestamp_payload { + u32 time_stamp_low; + u32 time_stamp_high; +}; + + +struct hfi_extradata_s3d_frame_packing_payload { + u32 fpa_id; + int cancel_flag; + u32 fpa_type; + int quin_cunx_flag; + u32 content_interprtation_type; + int spatial_flipping_flag; + int frame0_flipped_flag; + int field_views_flag; + int current_frame_isFrame0_flag; + int frame0_self_contained_flag; + int frame1_self_contained_flag; + u32 frame0_graid_pos_x; + u32 frame0_graid_pos_y; + u32 frame1_graid_pos_x; + u32 frame1_graid_pos_y; + u32 fpa_reserved_byte; + u32 fpa_repetition_period; + int fpa_extension_flag; +}; + +struct hfi_extradata_interlace_video_payload { + u32 format; +}; + +struct hfi_conceal_color_type { + u32 value_8bit; + u32 value_10bit; +}; + +struct hfi_extradata_num_concealed_mb_payload { + u32 num_mb_concealed; +}; + +struct hfi_extradata_sliceinfo { + u32 offset_in_stream; + u32 slice_length; +}; + +struct hfi_extradata_multislice_info_payload { + u32 num_slices; + struct hfi_extradata_sliceinfo rg_slice_info[1]; +}; + +struct hfi_index_extradata_input_crop_payload { + u32 size; + u32 version; + u32 port_index; + u32 left; + u32 top; + u32 width; + u32 height; +}; + +struct hfi_index_extradata_output_crop_payload { + u32 size; + u32 version; + u32 port_index; + u32 left; + u32 top; + u32 display_width; + u32 display_height; + u32 width; + u32 height; +}; + +struct hfi_index_extradata_digital_zoom_payload { + u32 size; + u32 version; + u32 port_index; + int width; + int height; +}; + +struct hfi_index_extradata_aspect_ratio_payload { + u32 size; + u32 version; + u32 port_index; + u32 aspect_width; + u32 aspect_height; +}; + +struct hfi_extradata_frame_type_payload { + u32 frame_rate; +}; + +struct hfi_extradata_recovery_point_sei_payload { + u32 flag; +}; + +struct hfi_cmd_session_continue_packet { + u32 size; + u32 packet_type; + u32 sid; +}; + +enum session_flags { + SESSION_PAUSE = BIT(1), +}; + +struct hal_session { + struct list_head list; + void *inst_id; + bool is_decoder; + enum hal_video_codec codec; + enum hal_domain domain; + u32 flags; + u32 sid; +}; + +struct hal_device_data { + struct list_head dev_head; + int dev_count; +}; + +struct msm_vidc_fw { + void *cookie; +}; + +int hfi_process_msg_packet(u32 device_id, struct vidc_hal_msg_pkt_hdr *msg_hdr, + struct msm_vidc_cb_info *info); + +#endif + diff --git a/techpack/video/msm/vidc/vidc_hfi_api.h b/techpack/video/msm/vidc/vidc_hfi_api.h new file mode 100644 index 000000000000..230c217d3db6 --- /dev/null +++ b/techpack/video/msm/vidc/vidc_hfi_api.h @@ -0,0 +1,692 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __VIDC_HFI_API_H__ +#define __VIDC_HFI_API_H__ + +#include "msm_vidc.h" +#include "msm_vidc_resources.h" + +#define CONTAINS(__a, __sz, __t) (\ + (__t >= __a) && \ + (__t < __a + __sz) \ +) + +#define OVERLAPS(__t, __tsz, __a, __asz) (\ + (__t <= __a) && \ + (__t + __tsz >= __a + __asz) \ +) + +#define HAL_BUFFERFLAG_EOS 0x00000001 +#define HAL_BUFFERFLAG_STARTTIME 0x00000002 +#define HAL_BUFFERFLAG_DATACORRUPT 0x00000008 +#define HAL_BUFFERFLAG_ENDOFFRAME 0x00000010 +#define HAL_BUFFERFLAG_SYNCFRAME 0x00000020 +#define HAL_BUFFERFLAG_EXTRADATA 0x00000040 +#define HAL_BUFFERFLAG_CODECCONFIG 0x00000080 +#define HAL_BUFFERFLAG_READONLY 0x00000200 +#define HAL_BUFFERFLAG_ENDOFSUBFRAME 0x00000400 +#define HAL_BUFFERFLAG_MBAFF 0x08000000 +#define HAL_BUFFERFLAG_YUV_601_709_CSC_CLAMP 0x10000000 +#define HAL_BUFFERFLAG_DROP_FRAME 0x20000000 +#define HAL_BUFFERFLAG_TS_DISCONTINUITY 0x40000000 +#define HAL_BUFFERFLAG_TS_ERROR 0x80000000 +#define HAL_BUFFERFLAG_CVPMETADATA_SKIP 0x00000800 + + +#define HAL_DEBUG_MSG_LOW 0x00000001 +#define HAL_DEBUG_MSG_MEDIUM 0x00000002 +#define HAL_DEBUG_MSG_HIGH 0x00000004 +#define HAL_DEBUG_MSG_ERROR 0x00000008 +#define HAL_DEBUG_MSG_FATAL 0x00000010 +#define MAX_PROFILE_COUNT 16 + +#define HAL_MAX_MATRIX_COEFFS 9 +#define HAL_MAX_BIAS_COEFFS 3 +#define HAL_MAX_LIMIT_COEFFS 6 +#define VENUS_VERSION_LENGTH 128 + +#define IDR_PERIOD 1 + +/* 16 video sessions */ +#define VIDC_MAX_SESSIONS 16 + +enum vidc_status { + VIDC_ERR_NONE = 0x0, + VIDC_ERR_FAIL = 0x80000000, + VIDC_ERR_ALLOC_FAIL, + VIDC_ERR_ILLEGAL_OP, + VIDC_ERR_BAD_PARAM, + VIDC_ERR_BAD_HANDLE, + VIDC_ERR_NOT_SUPPORTED, + VIDC_ERR_BAD_STATE, + VIDC_ERR_MAX_CLIENTS, + VIDC_ERR_IFRAME_EXPECTED, + VIDC_ERR_HW_FATAL, + VIDC_ERR_BITSTREAM_ERR, + VIDC_ERR_INDEX_NOMORE, + VIDC_ERR_SEQHDR_PARSE_FAIL, + VIDC_ERR_INSUFFICIENT_BUFFER, + VIDC_ERR_BAD_POWER_STATE, + VIDC_ERR_NO_VALID_SESSION, + VIDC_ERR_TIMEOUT, + VIDC_ERR_CMDQFULL, + VIDC_ERR_START_CODE_NOT_FOUND, + VIDC_ERR_NOC_ERROR, + VIDC_ERR_CLIENT_PRESENT = 0x90000001, + VIDC_ERR_CLIENT_FATAL, + VIDC_ERR_CMD_QUEUE_FULL, + VIDC_ERR_UNUSED = 0x10000000 +}; + +enum hal_domain { + HAL_VIDEO_DOMAIN_VPE = BIT(0), + HAL_VIDEO_DOMAIN_ENCODER = BIT(1), + HAL_VIDEO_DOMAIN_DECODER = BIT(2), + HAL_UNUSED_DOMAIN = 0x10000000, +}; + +enum multi_stream { + HAL_VIDEO_DECODER_NONE = 0x00000000, + HAL_VIDEO_DECODER_PRIMARY = 0x00000001, + HAL_VIDEO_DECODER_SECONDARY = 0x00000002, + HAL_VIDEO_DECODER_BOTH_OUTPUTS = 0x00000004, + HAL_VIDEO_UNUSED_OUTPUTS = 0x10000000, +}; + +enum hal_core_capabilities { + HAL_VIDEO_ENCODER_ROTATION_CAPABILITY = 0x00000001, + HAL_VIDEO_ENCODER_SCALING_CAPABILITY = 0x00000002, + HAL_VIDEO_ENCODER_DEINTERLACE_CAPABILITY = 0x00000004, + HAL_VIDEO_DECODER_MULTI_STREAM_CAPABILITY = 0x00000008, + HAL_VIDEO_UNUSED_CAPABILITY = 0x10000000, +}; + +enum hal_default_properties { + HAL_VIDEO_DYNAMIC_BUF_MODE = 0x00000001, + HAL_VIDEO_CONTINUE_DATA_TRANSFER = 0x00000002, +}; + +enum hal_video_codec { + HAL_VIDEO_CODEC_UNKNOWN = 0x00000000, + HAL_VIDEO_CODEC_MVC = 0x00000001, + HAL_VIDEO_CODEC_H264 = 0x00000002, + HAL_VIDEO_CODEC_H263 = 0x00000004, + HAL_VIDEO_CODEC_MPEG1 = 0x00000008, + HAL_VIDEO_CODEC_MPEG2 = 0x00000010, + HAL_VIDEO_CODEC_MPEG4 = 0x00000020, + HAL_VIDEO_CODEC_DIVX_311 = 0x00000040, + HAL_VIDEO_CODEC_DIVX = 0x00000080, + HAL_VIDEO_CODEC_VC1 = 0x00000100, + HAL_VIDEO_CODEC_SPARK = 0x00000200, + HAL_VIDEO_CODEC_VP6 = 0x00000400, + HAL_VIDEO_CODEC_VP7 = 0x00000800, + HAL_VIDEO_CODEC_HEVC = 0x00002000, + HAL_VIDEO_CODEC_VP9 = 0x00004000, + HAL_VIDEO_CODEC_HEVC_HYBRID = 0x80000000, + HAL_UNUSED_CODEC = 0x10000000, +}; + +enum hal_ssr_trigger_type { + SSR_ERR_FATAL = 1, + SSR_SW_DIV_BY_ZERO, + SSR_HW_WDOG_IRQ, + SSR_NOC_ERROR, + SSR_VCODEC_HUNG, +}; + +struct hal_profile_level { + u32 profile; + u32 level; +}; + +struct hal_profile_level_supported { + u32 profile_count; + struct hal_profile_level profile_level[MAX_PROFILE_COUNT]; +}; + +enum hal_intra_refresh_mode { + HAL_INTRA_REFRESH_NONE = 0x1, + HAL_INTRA_REFRESH_CYCLIC = 0x2, + HAL_INTRA_REFRESH_RANDOM = 0x5, + HAL_UNUSED_INTRA = 0x10000000, +}; + +struct hal_intra_refresh { + enum hal_intra_refresh_mode mode; + u32 ir_mbs; +}; + +struct hal_buffer_requirements { + enum hal_buffer buffer_type; + u32 buffer_size; + u16 buffer_count_min; + u16 buffer_count_min_host; + u16 buffer_count_actual; + u16 buffer_alignment; +}; + +enum hal_priority {/* Priority increases with number */ + HAL_PRIORITY_LOW = 10, + HAL_PRIOIRTY_MEDIUM = 20, + HAL_PRIORITY_HIGH = 30, + HAL_UNUSED_PRIORITY = 0x10000000, +}; + +struct hal_batch_info { + u32 input_batch_count; + u32 output_batch_count; +}; + +struct hal_uncompressed_format_supported { + enum hal_buffer buffer_type; + u32 format_entries; + u32 rg_format_info[1]; +}; + +enum hal_interlace_format { + HAL_INTERLACE_FRAME_PROGRESSIVE = 0x01, + HAL_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST = 0x02, + HAL_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST = 0x04, + HAL_INTERLACE_FRAME_TOPFIELDFIRST = 0x08, + HAL_INTERLACE_FRAME_BOTTOMFIELDFIRST = 0x10, + HAL_UNUSED_INTERLACE = 0x10000000, +}; + +struct hal_interlace_format_supported { + enum hal_buffer buffer_type; + enum hal_interlace_format format; +}; + +enum hal_chroma_site { + HAL_CHROMA_SITE_0, + HAL_CHROMA_SITE_1, + HAL_UNUSED_CHROMA = 0x10000000, +}; + +enum hal_capability { + CAP_FRAME_WIDTH = 0x1, + CAP_FRAME_HEIGHT, + CAP_MBS_PER_FRAME, + CAP_MBS_PER_SECOND, + CAP_FRAMERATE, + CAP_SCALE_X, + CAP_SCALE_Y, + CAP_BITRATE, + CAP_CABAC_BITRATE, + CAP_BFRAME, + CAP_PEAKBITRATE, + CAP_HIER_P_NUM_ENH_LAYERS, + CAP_LTR_COUNT, + CAP_SECURE_OUTPUT2_THRESHOLD, + CAP_HIER_B_NUM_ENH_LAYERS, + CAP_LCU_SIZE, + CAP_HIER_P_HYBRID_NUM_ENH_LAYERS, + CAP_MBS_PER_SECOND_POWER_SAVE, + CAP_EXTRADATA, + CAP_PROFILE, + CAP_LEVEL, + CAP_I_FRAME_QP, + CAP_P_FRAME_QP, + CAP_B_FRAME_QP, + CAP_RATE_CONTROL_MODES, + CAP_BLUR_WIDTH, + CAP_BLUR_HEIGHT, + CAP_SLICE_BYTE, + CAP_SLICE_MB, + CAP_SECURE, + CAP_MAX_NUM_B_FRAMES, + CAP_MAX_VIDEOCORES, + CAP_MAX_WORKMODES, + CAP_UBWC_CR_STATS, + CAP_SECURE_FRAME_WIDTH, + CAP_SECURE_FRAME_HEIGHT, + CAP_SECURE_MBS_PER_FRAME, + CAP_SECURE_BITRATE, + CAP_BATCH_MAX_MB_PER_FRAME, + CAP_BATCH_MAX_FPS, + CAP_LOSSLESS_FRAME_WIDTH, + CAP_LOSSLESS_FRAME_HEIGHT, + CAP_LOSSLESS_MBS_PER_FRAME, + CAP_ALLINTRA_MAX_FPS, + CAP_HEVC_IMAGE_FRAME_WIDTH, + CAP_HEVC_IMAGE_FRAME_HEIGHT, + CAP_HEIC_IMAGE_FRAME_WIDTH, + CAP_HEIC_IMAGE_FRAME_HEIGHT, + CAP_H264_LEVEL, + CAP_HEVC_LEVEL, + CAP_MAX, +}; + +struct hal_capability_supported { + enum hal_capability capability_type; + u32 min; + u32 max; + u32 step_size; + u32 default_value; +}; + +struct hal_nal_stream_format_supported { + u32 nal_stream_format_supported; +}; + +struct hal_nal_stream_format_select { + u32 nal_stream_format_select; +}; + +struct hal_multi_view_format { + u32 views; + u32 rg_view_order[1]; +}; + +enum hal_buffer_layout_type { + HAL_BUFFER_LAYOUT_TOP_BOTTOM, + HAL_BUFFER_LAYOUT_SEQ, + HAL_UNUSED_BUFFER_LAYOUT = 0x10000000, +}; + +struct hal_codec_supported { + u32 decoder_codec_supported; + u32 encoder_codec_supported; +}; + +enum hal_core_id { + VIDC_CORE_ID_DEFAULT = 0, + VIDC_CORE_ID_1 = 1, /* 0b01 */ + VIDC_CORE_ID_2 = 2, /* 0b10 */ + VIDC_CORE_ID_3 = 3, /* 0b11 */ + VIDC_CORE_ID_UNUSED = 0x10000000, +}; + +enum vidc_resource_id { + VIDC_RESOURCE_NONE, + VIDC_RESOURCE_SYSCACHE, + VIDC_UNUSED_RESOURCE = 0x10000000, +}; + +struct vidc_resource_hdr { + enum vidc_resource_id resource_id; + void *resource_handle; +}; + +struct vidc_buffer_addr_info { + enum hal_buffer buffer_type; + u32 buffer_size; + u32 num_buffers; + u32 align_device_addr; + u32 extradata_addr; + u32 extradata_size; + u32 response_required; +}; + +/* Needs to be exactly the same as hfi_buffer_info */ +struct hal_buffer_info { + u32 buffer_addr; + u32 extra_data_addr; +}; + +struct vidc_frame_plane_config { + u32 left; + u32 top; + u32 width; + u32 height; + u32 stride; + u32 scan_lines; +}; + +struct vidc_uncompressed_frame_config { + struct vidc_frame_plane_config luma_plane; + struct vidc_frame_plane_config chroma_plane; +}; + +struct vidc_frame_data { + enum hal_buffer buffer_type; + u32 device_addr; + u32 extradata_addr; + int64_t timestamp; + u32 flags; + u32 offset; + u32 alloc_len; + u32 filled_len; + u32 input_tag; + u32 extradata_size; +}; + +struct hal_fw_info { + char version[VENUS_VERSION_LENGTH]; + phys_addr_t base_addr; + int register_base; + int register_size; + int irq; +}; + +enum hal_flush { + HAL_FLUSH_INPUT = BIT(0), + HAL_FLUSH_OUTPUT = BIT(1), + HAL_FLUSH_ALL = HAL_FLUSH_INPUT | HAL_FLUSH_OUTPUT, +}; + +enum hal_event_type { + HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES, + HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES, + HAL_EVENT_RELEASE_BUFFER_REFERENCE, + HAL_UNUSED_SEQCHG = 0x10000000, +}; + +enum buffer_mode_type { + HAL_BUFFER_MODE_DYNAMIC = 0x100, + HAL_BUFFER_MODE_STATIC = 0x001, +}; + +struct hal_buffer_alloc_mode { + enum hal_buffer buffer_type; + enum buffer_mode_type buffer_mode; +}; + +enum ltr_mode { + HAL_LTR_MODE_DISABLE, + HAL_LTR_MODE_MANUAL, +}; + +struct buffer_requirements { + struct hal_buffer_requirements buffer[HAL_BUFFER_MAX]; +}; + +struct hal_conceal_color { + u32 conceal_color_8bit; + u32 conceal_color_10bit; +}; + +union hal_get_property { + struct hal_batch_info batch_info; + struct hal_uncompressed_format_supported uncompressed_format_supported; + struct hal_interlace_format_supported interlace_format_supported; + struct hal_nal_stream_format_supported nal_stream_format_supported; + struct hal_nal_stream_format_select nal_stream_format_select; + struct hal_multi_view_format multi_view_format; + struct hal_buffer_info buffer_info; + struct hal_buffer_alloc_mode buffer_alloc_mode; + struct buffer_requirements buf_req; + struct hal_conceal_color conceal_color; +}; + +/* HAL Response */ +#define IS_HAL_SYS_CMD(cmd) ((cmd) >= HAL_SYS_INIT_DONE && \ + (cmd) <= HAL_SYS_ERROR) +#define IS_HAL_SESSION_CMD(cmd) ((cmd) >= HAL_SESSION_EVENT_CHANGE && \ + (cmd) <= HAL_SESSION_ERROR) +enum hal_command_response { + /* SYSTEM COMMANDS_DONE*/ + HAL_SYS_INIT_DONE, + HAL_SYS_SET_RESOURCE_DONE, + HAL_SYS_RELEASE_RESOURCE_DONE, + HAL_SYS_PC_PREP_DONE, + HAL_SYS_PING_ACK, + HAL_SYS_DEBUG, + HAL_SYS_WATCHDOG_TIMEOUT, + HAL_SYS_ERROR, + /* SESSION COMMANDS_DONE */ + HAL_SESSION_EVENT_CHANGE, + HAL_SESSION_LOAD_RESOURCE_DONE, + HAL_SESSION_INIT_DONE, + HAL_SESSION_END_DONE, + HAL_SESSION_ABORT_DONE, + HAL_SESSION_START_DONE, + HAL_SESSION_STOP_DONE, + HAL_SESSION_ETB_DONE, + HAL_SESSION_FTB_DONE, + HAL_SESSION_FLUSH_DONE, + HAL_SESSION_SUSPEND_DONE, + HAL_SESSION_RESUME_DONE, + HAL_SESSION_SET_PROP_DONE, + HAL_SESSION_GET_PROP_DONE, + HAL_SESSION_RELEASE_BUFFER_DONE, + HAL_SESSION_RELEASE_RESOURCE_DONE, + HAL_SESSION_PROPERTY_INFO, + HAL_SESSION_ERROR, + HAL_RESPONSE_UNUSED = 0x10000000, +}; + +struct msm_vidc_ubwc_stats { + u32 is_valid; + u32 worst_cr; + u32 worst_cf; +}; + +struct ubwc_cr_stats_info_type { + u32 cr_stats_info0; + u32 cr_stats_info1; + u32 cr_stats_info2; + u32 cr_stats_info3; + u32 cr_stats_info4; + u32 cr_stats_info5; + u32 cr_stats_info6; +}; + +struct recon_stats_type { + u32 buffer_index; + u32 complexity_number; + struct ubwc_cr_stats_info_type ubwc_stats_info; +}; + +struct vidc_hal_ebd { + u32 timestamp_hi; + u32 timestamp_lo; + u32 flags; + enum vidc_status status; + u32 input_tag; + u32 stats; + u32 offset; + u32 alloc_len; + u32 filled_len; + u32 picture_type; + struct recon_stats_type recon_stats; + u32 packet_buffer; + u32 extra_data_buffer; +}; + +struct vidc_hal_fbd { + u32 stream_id; + u32 view_id; + u32 timestamp_hi; + u32 timestamp_lo; + u32 flags1; + u32 stats; + u32 alloc_len1; + u32 filled_len1; + u32 offset1; + u32 frame_width; + u32 frame_height; + u32 start_x_coord; + u32 start_y_coord; + u32 input_tag; + u32 input_tag2; + u32 picture_type; + u32 packet_buffer1; + u32 extra_data_buffer; + u32 flags2; + u32 alloc_len2; + u32 filled_len2; + u32 offset2; + u32 packet_buffer2; + u32 flags3; + u32 alloc_len3; + u32 filled_len3; + u32 offset3; + u32 packet_buffer3; + enum hal_buffer buffer_type; + struct msm_vidc_ubwc_stats ubwc_cr_stat; +}; + +struct msm_vidc_capability { + enum hal_domain domain; + enum hal_video_codec codec; + struct hal_capability_supported cap[CAP_MAX]; +}; + +struct vidc_hal_sys_init_done { + u32 dec_codec_supported; + u32 enc_codec_supported; + u32 max_sessions_supported; +}; + +struct msm_vidc_cb_cmd_done { + u32 device_id; + void *inst_id; + enum vidc_status status; + u32 size; + union { + struct vidc_resource_hdr resource_hdr; + struct vidc_buffer_addr_info buffer_addr_info; + struct vidc_frame_plane_config frame_plane_config; + struct vidc_uncompressed_frame_config uncompressed_frame_config; + struct vidc_frame_data frame_data; + struct vidc_hal_ebd ebd; + struct vidc_hal_fbd fbd; + struct vidc_hal_sys_init_done sys_init_done; + struct hal_buffer_info buffer_info; + union hal_get_property property; + enum hal_flush flush_type; + } data; +}; + +struct msm_vidc_cb_event { + u32 device_id; + void *inst_id; + enum vidc_status status; + u32 height; + u32 width; + int bit_depth; + u32 hal_event_type; + u32 packet_buffer; + u32 extra_data_buffer; + u32 pic_struct; + u32 colour_space; + u32 profile; + u32 level; + u32 entropy_mode; + u32 max_dpb_count; + u32 max_ref_frames; + u32 max_dec_buffering; + u32 max_reorder_frames; + u32 fw_min_cnt; +}; + +struct msm_vidc_cb_data_done { + u32 device_id; + void *inst_id; + enum vidc_status status; + u32 size; + union { + struct vidc_hal_ebd input_done; + struct vidc_hal_fbd output_done; + }; +}; + +struct msm_vidc_cb_info { + enum hal_command_response response_type; + union { + struct msm_vidc_cb_cmd_done cmd; + struct msm_vidc_cb_event event; + struct msm_vidc_cb_data_done data; + } response; +}; + +enum msm_vidc_hfi_type { + VIDC_HFI_VENUS, +}; + +enum msm_vidc_thermal_level { + VIDC_THERMAL_NORMAL = 0, + VIDC_THERMAL_LOW, + VIDC_THERMAL_HIGH, + VIDC_THERMAL_CRITICAL +}; + +enum msm_vidc_power_mode { + VIDC_POWER_NORMAL = 0, + VIDC_POWER_LOW, + VIDC_POWER_TURBO +}; + +struct hal_cmd_sys_get_property_packet { + u32 size; + u32 packet_type; + u32 num_properties; + u32 rg_property_data[1]; +}; + +struct hal_hdr10_pq_sei { + struct msm_vidc_mastering_display_colour_sei_payload disp_color_sei; + struct msm_vidc_content_light_level_sei_payload cll_sei; +}; + +struct hal_vbv_hdr_buf_size { + u32 vbv_hdr_buf_size; +}; + +#define call_hfi_op(q, op, ...) \ + (((q) && (q)->op) ? ((q)->op(__VA_ARGS__)) : 0) + +struct hfi_device { + void *hfi_device_data; + + /*Add function pointers for all the hfi functions below*/ + int (*core_init)(void *device); + int (*core_ping)(void *device, u32 sid); + int (*core_release)(void *device); + int (*core_trigger_ssr)(void *device, + enum hal_ssr_trigger_type ssr_type, u32 sub_client_id, + u32 test_addr); + int (*session_init)(void *device, void *inst_id, + enum hal_domain session_type, enum hal_video_codec codec_type, + void **new_session, u32 sid); + int (*session_end)(void *session); + int (*session_abort)(void *session); + int (*session_set_buffers)(void *sess, + struct vidc_buffer_addr_info *buffer_info); + int (*session_release_buffers)(void *sess, + struct vidc_buffer_addr_info *buffer_info); + int (*session_load_res)(void *sess); + int (*session_release_res)(void *sess); + int (*session_start)(void *sess); + int (*session_continue)(void *sess); + int (*session_stop)(void *sess); + int (*session_etb)(void *sess, struct vidc_frame_data *input_frame); + int (*session_ftb)(void *sess, struct vidc_frame_data *output_frame); + int (*session_process_batch)(void *sess, + int num_etbs, struct vidc_frame_data etbs[], + int num_ftbs, struct vidc_frame_data ftbs[]); + int (*session_get_buf_req)(void *sess); + int (*session_flush)(void *sess, enum hal_flush flush_mode); + int (*session_set_property)(void *sess, u32 ptype, + void *pdata, u32 size); + int (*session_pause)(void *sess); + int (*session_resume)(void *sess); + int (*scale_clocks)(void *dev, u32 freq, u32 sid); + int (*vote_bus)(void *dev, unsigned long bw_ddr, + unsigned long bw_llcc, u32 sid); + int (*get_fw_info)(void *dev, struct hal_fw_info *fw_info); + int (*session_clean)(void *sess); + int (*get_core_capabilities)(void *dev); + int (*suspend)(void *dev); + int (*flush_debug_queue)(void *dev); + int (*noc_error_info)(void *dev); + enum hal_default_properties (*get_default_properties)(void *dev); +}; + +typedef void (*hfi_cmd_response_callback) (enum hal_command_response cmd, + void *data); +typedef void (*msm_vidc_callback) (enum hal_command_response response, + void *callback); + +struct hfi_device *vidc_hfi_initialize(enum msm_vidc_hfi_type hfi_type, + u32 device_id, struct msm_vidc_platform_resources *res, + hfi_cmd_response_callback callback); +void vidc_hfi_deinitialize(enum msm_vidc_hfi_type hfi_type, + struct hfi_device *hdev); +u32 vidc_get_hfi_domain(enum hal_domain hal_domain, u32 sid); +u32 vidc_get_hfi_codec(enum hal_video_codec hal_codec, u32 sid); +#endif /*__VIDC_HFI_API_H__ */ diff --git a/techpack/video/msm/vidc/vidc_hfi_helper.h b/techpack/video/msm/vidc/vidc_hfi_helper.h new file mode 100644 index 000000000000..bf1d55a4f1ea --- /dev/null +++ b/techpack/video/msm/vidc/vidc_hfi_helper.h @@ -0,0 +1,1080 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __H_VIDC_HFI_HELPER_H__ +#define __H_VIDC_HFI_HELPER_H__ + +#include "vidc/media/msm_vidc_utils.h" + +#define HFI_COMMON_BASE (0) +#define HFI_OX_BASE (0x01000000) + +#define HFI_VIDEO_DOMAIN_ENCODER (HFI_COMMON_BASE + 0x1) +#define HFI_VIDEO_DOMAIN_DECODER (HFI_COMMON_BASE + 0x2) +#define HFI_VIDEO_DOMAIN_VPE (HFI_COMMON_BASE + 0x4) + +#define HFI_DOMAIN_BASE_COMMON (HFI_COMMON_BASE + 0) +#define HFI_DOMAIN_BASE_VDEC (HFI_COMMON_BASE + 0x01000000) +#define HFI_DOMAIN_BASE_VENC (HFI_COMMON_BASE + 0x02000000) +#define HFI_DOMAIN_BASE_VPE (HFI_COMMON_BASE + 0x03000000) + +#define HFI_VIDEO_ARCH_OX (HFI_COMMON_BASE + 0x1) + +#define HFI_ARCH_COMMON_OFFSET (0) +#define HFI_ARCH_OX_OFFSET (0x00200000) + +#define HFI_CMD_START_OFFSET (0x00010000) +#define HFI_MSG_START_OFFSET (0x00020000) + +#define HFI_ERR_NONE HFI_COMMON_BASE +#define HFI_ERR_SYS_FATAL (HFI_COMMON_BASE + 0x1) +#define HFI_ERR_SYS_INVALID_PARAMETER (HFI_COMMON_BASE + 0x2) +#define HFI_ERR_SYS_VERSION_MISMATCH (HFI_COMMON_BASE + 0x3) +#define HFI_ERR_SYS_INSUFFICIENT_RESOURCES (HFI_COMMON_BASE + 0x4) +#define HFI_ERR_SYS_MAX_SESSIONS_REACHED (HFI_COMMON_BASE + 0x5) +#define HFI_ERR_SYS_UNSUPPORTED_CODEC (HFI_COMMON_BASE + 0x6) +#define HFI_ERR_SYS_SESSION_IN_USE (HFI_COMMON_BASE + 0x7) +#define HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE (HFI_COMMON_BASE + 0x8) +#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN (HFI_COMMON_BASE + 0x9) +#define HFI_ERR_SYS_NOC_ERROR (HFI_COMMON_BASE + 0x11) +#define HFI_ERR_SESSION_FATAL (HFI_COMMON_BASE + 0x1001) +#define HFI_ERR_SESSION_INVALID_PARAMETER (HFI_COMMON_BASE + 0x1002) +#define HFI_ERR_SESSION_BAD_POINTER (HFI_COMMON_BASE + 0x1003) +#define HFI_ERR_SESSION_INVALID_SESSION_ID (HFI_COMMON_BASE + 0x1004) +#define HFI_ERR_SESSION_INVALID_STREAM_ID (HFI_COMMON_BASE + 0x1005) +#define HFI_ERR_SESSION_INCORRECT_STATE_OPERATION \ + (HFI_COMMON_BASE + 0x1006) +#define HFI_ERR_SESSION_UNSUPPORTED_PROPERTY (HFI_COMMON_BASE + 0x1007) + +#define HFI_ERR_SESSION_UNSUPPORTED_SETTING (HFI_COMMON_BASE + 0x1008) + +#define HFI_ERR_SESSION_INSUFFICIENT_RESOURCES (HFI_COMMON_BASE + 0x1009) + +#define HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED \ + (HFI_COMMON_BASE + 0x100A) + +#define HFI_ERR_SESSION_STREAM_CORRUPT (HFI_COMMON_BASE + 0x100B) +#define HFI_ERR_SESSION_ENC_OVERFLOW (HFI_COMMON_BASE + 0x100C) +#define HFI_ERR_SESSION_UNSUPPORTED_STREAM (HFI_COMMON_BASE + 0x100D) +#define HFI_ERR_SESSION_CMDSIZE (HFI_COMMON_BASE + 0x100E) +#define HFI_ERR_SESSION_UNSUPPORT_CMD (HFI_COMMON_BASE + 0x100F) +#define HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE (HFI_COMMON_BASE + 0x1010) +#define HFI_ERR_SESSION_BUFFERCOUNT_TOOSMALL (HFI_COMMON_BASE + 0x1011) +#define HFI_ERR_SESSION_INVALID_SCALE_FACTOR (HFI_COMMON_BASE + 0x1012) +#define HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED (HFI_COMMON_BASE + 0x1013) + +#define HFI_EVENT_SYS_ERROR (HFI_COMMON_BASE + 0x1) +#define HFI_EVENT_SESSION_ERROR (HFI_COMMON_BASE + 0x2) + +#define HFI_VIDEO_CODEC_H264 0x00000002 +#define HFI_VIDEO_CODEC_MPEG1 0x00000008 +#define HFI_VIDEO_CODEC_MPEG2 0x00000010 +#define HFI_VIDEO_CODEC_HEVC 0x00002000 +#define HFI_VIDEO_CODEC_VP9 0x00004000 + +#define HFI_PROFILE_UNKNOWN 0x00000000 +#define HFI_LEVEL_UNKNOWN 0x00000000 + +#define HFI_H264_PROFILE_BASELINE 0x00000001 +#define HFI_H264_PROFILE_MAIN 0x00000002 +#define HFI_H264_PROFILE_HIGH 0x00000004 +#define HFI_H264_PROFILE_STEREO_HIGH 0x00000008 +#define HFI_H264_PROFILE_MULTIVIEW_HIGH 0x00000010 +#define HFI_H264_PROFILE_CONSTRAINED_BASE 0x00000020 +#define HFI_H264_PROFILE_CONSTRAINED_HIGH 0x00000040 + +#define HFI_LEVEL_UNKNOWN 0x00000000 +#define HFI_H264_LEVEL_1 0x00000001 +#define HFI_H264_LEVEL_1b 0x00000002 +#define HFI_H264_LEVEL_11 0x00000004 +#define HFI_H264_LEVEL_12 0x00000008 +#define HFI_H264_LEVEL_13 0x00000010 +#define HFI_H264_LEVEL_2 0x00000020 +#define HFI_H264_LEVEL_21 0x00000040 +#define HFI_H264_LEVEL_22 0x00000080 +#define HFI_H264_LEVEL_3 0x00000100 +#define HFI_H264_LEVEL_31 0x00000200 +#define HFI_H264_LEVEL_32 0x00000400 +#define HFI_H264_LEVEL_4 0x00000800 +#define HFI_H264_LEVEL_41 0x00001000 +#define HFI_H264_LEVEL_42 0x00002000 +#define HFI_H264_LEVEL_5 0x00004000 +#define HFI_H264_LEVEL_51 0x00008000 +#define HFI_H264_LEVEL_52 0x00010000 +#define HFI_H264_LEVEL_6 0x00020000 +#define HFI_H264_LEVEL_61 0x00040000 +#define HFI_H264_LEVEL_62 0x00080000 + +#define HFI_MPEG2_PROFILE_SIMPLE 0x00000001 +#define HFI_MPEG2_PROFILE_MAIN 0x00000002 + +#define HFI_MPEG2_LEVEL_LL 0x00000001 +#define HFI_MPEG2_LEVEL_ML 0x00000002 +#define HFI_MPEG2_LEVEL_HL 0x00000004 + +#define HFI_VP9_PROFILE_P0 0x00000001 +#define HFI_VP9_PROFILE_P2_10B 0x00000004 + +#define HFI_VP9_LEVEL_1 0x00000001 +#define HFI_VP9_LEVEL_11 0x00000002 +#define HFI_VP9_LEVEL_2 0x00000004 +#define HFI_VP9_LEVEL_21 0x00000008 +#define HFI_VP9_LEVEL_3 0x00000010 +#define HFI_VP9_LEVEL_31 0x00000020 +#define HFI_VP9_LEVEL_4 0x00000040 +#define HFI_VP9_LEVEL_41 0x00000080 +#define HFI_VP9_LEVEL_5 0x00000100 +#define HFI_VP9_LEVEL_51 0x00000200 +#define HFI_VP9_LEVEL_6 0x00000400 +#define HFI_VP9_LEVEL_61 0x00000800 + +#define HFI_HEVC_PROFILE_MAIN 0x00000001 +#define HFI_HEVC_PROFILE_MAIN10 0x00000002 +#define HFI_HEVC_PROFILE_MAIN_STILL_PIC 0x00000004 + +#define HFI_HEVC_LEVEL_1 0x00000001 +#define HFI_HEVC_LEVEL_2 0x00000002 +#define HFI_HEVC_LEVEL_21 0x00000004 +#define HFI_HEVC_LEVEL_3 0x00000008 +#define HFI_HEVC_LEVEL_31 0x00000010 +#define HFI_HEVC_LEVEL_4 0x00000020 +#define HFI_HEVC_LEVEL_41 0x00000040 +#define HFI_HEVC_LEVEL_5 0x00000080 +#define HFI_HEVC_LEVEL_51 0x00000100 +#define HFI_HEVC_LEVEL_52 0x00000200 +#define HFI_HEVC_LEVEL_6 0x00000400 +#define HFI_HEVC_LEVEL_61 0x00000800 +#define HFI_HEVC_LEVEL_62 0x00001000 + +#define HFI_HEVC_TIER_MAIN 0x1 +#define HFI_HEVC_TIER_HIGH 0x2 + +#define HFI_TME_PROFILE_DEFAULT 0x00000001 +#define HFI_TME_PROFILE_FRC 0x00000002 +#define HFI_TME_PROFILE_ASW 0x00000004 +#define HFI_TME_PROFILE_DFS_BOKEH 0x00000008 + +#define HFI_TME_LEVEL_INTEGER 0x00000001 + +#define HFI_BUFFER_INPUT (HFI_COMMON_BASE + 0x1) +#define HFI_BUFFER_OUTPUT (HFI_COMMON_BASE + 0x2) +#define HFI_BUFFER_OUTPUT2 (HFI_COMMON_BASE + 0x3) +#define HFI_BUFFER_INTERNAL_PERSIST (HFI_COMMON_BASE + 0x4) +#define HFI_BUFFER_INTERNAL_PERSIST_1 (HFI_COMMON_BASE + 0x5) +#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH (HFI_COMMON_BASE + 0x6) +#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1 (HFI_COMMON_BASE + 0x7) +#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2 (HFI_COMMON_BASE + 0x8) +#define HFI_BUFFER_COMMON_INTERNAL_RECON (HFI_COMMON_BASE + 0x9) +#define HFI_BUFFER_EXTRADATA_OUTPUT (HFI_COMMON_BASE + 0xA) +#define HFI_BUFFER_EXTRADATA_OUTPUT2 (HFI_COMMON_BASE + 0xB) +#define HFI_BUFFER_EXTRADATA_INPUT (HFI_COMMON_BASE + 0xC) + +#define HFI_BITDEPTH_8 (HFI_COMMON_BASE + 0x0) +#define HFI_BITDEPTH_9 (HFI_COMMON_BASE + 0x1) +#define HFI_BITDEPTH_10 (HFI_COMMON_BASE + 0x2) + +#define HFI_VENC_PERFMODE_MAX_QUALITY 0x1 +#define HFI_VENC_PERFMODE_POWER_SAVE 0x2 + +#define HFI_WORKMODE_1 (HFI_COMMON_BASE + 0x1) +#define HFI_WORKMODE_2 (HFI_COMMON_BASE + 0x2) + +struct hfi_buffer_info { + u32 buffer_addr; + u32 extra_data_addr; +}; + +#define HFI_PROPERTY_SYS_COMMON_START \ + (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000) +#define HFI_PROPERTY_SYS_DEBUG_CONFIG \ + (HFI_PROPERTY_SYS_COMMON_START + 0x001) +#define HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO \ + (HFI_PROPERTY_SYS_COMMON_START + 0x002) +#define HFI_PROPERTY_SYS_CONFIG_VCODEC_CLKFREQ \ + (HFI_PROPERTY_SYS_COMMON_START + 0x003) +#define HFI_PROPERTY_SYS_IDLE_INDICATOR \ + (HFI_PROPERTY_SYS_COMMON_START + 0x004) +#define HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL \ + (HFI_PROPERTY_SYS_COMMON_START + 0x005) +#define HFI_PROPERTY_SYS_IMAGE_VERSION \ + (HFI_PROPERTY_SYS_COMMON_START + 0x006) +#define HFI_PROPERTY_SYS_CONFIG_COVERAGE \ + (HFI_PROPERTY_SYS_COMMON_START + 0x007) +#define HFI_PROPERTY_SYS_UBWC_CONFIG \ + (HFI_PROPERTY_SYS_COMMON_START + 0x008) + +#define HFI_PROPERTY_PARAM_COMMON_START \ + (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000) +#define HFI_PROPERTY_PARAM_FRAME_SIZE \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x001) +#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x002) +#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x003) +#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x004) +#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x005) +#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x006) +#define HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x007) +#define HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x008) +#define HFI_PROPERTY_PARAM_CODEC_SUPPORTED \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x009) +#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x00A) +#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x00B) +#define HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x00C) +#define HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x00E) +#define HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x010) +#define HFI_PROPERTY_PARAM_SECURE_SESSION \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x011) +#define HFI_PROPERTY_PARAM_WORK_MODE \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x015) +#define HFI_PROPERTY_TME_VERSION_SUPPORTED \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x016) +#define HFI_PROPERTY_PARAM_WORK_ROUTE \ + (HFI_PROPERTY_PARAM_COMMON_START + 0x017) + +#define HFI_PROPERTY_CONFIG_COMMON_START \ + (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000) +#define HFI_PROPERTY_CONFIG_FRAME_RATE \ + (HFI_PROPERTY_CONFIG_COMMON_START + 0x001) +#define HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE \ + (HFI_PROPERTY_CONFIG_COMMON_START + 0x002) +#define HFI_PROPERTY_CONFIG_OPERATING_RATE \ + (HFI_PROPERTY_CONFIG_COMMON_START + 0x003) + +#define HFI_PROPERTY_PARAM_VDEC_COMMON_START \ + (HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x3000) +#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM \ + (HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x001) +#define HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR \ + (HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x002) +#define HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH \ + (HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x007) +#define HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT \ + (HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x009) +#define HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE \ + (HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x00A) + + +#define HFI_PROPERTY_CONFIG_VDEC_COMMON_START \ + (HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x4000) + +#define HFI_PROPERTY_PARAM_VENC_COMMON_START \ + (HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x5000) +#define HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x001) +#define HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x002) +#define HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x003) +#define HFI_PROPERTY_PARAM_VENC_RATE_CONTROL \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x004) +#define HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x009) +#define HFI_PROPERTY_PARAM_VENC_OPEN_GOP \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00C) +#define HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00D) +#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00E) +#define HFI_PROPERTY_PARAM_VENC_VBV_HRD_BUF_SIZE \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00F) +#define HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x010) +#define HFI_PROPERTY_PARAM_VENC_H264_SPS_ID \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x014) +#define HFI_PROPERTY_PARAM_VENC_H264_PPS_ID \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x015) +#define HFI_PROPERTY_PARAM_VENC_GENERATE_AUDNAL \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x016) +#define HFI_PROPERTY_PARAM_VENC_ASPECT_RATIO \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x017) +#define HFI_PROPERTY_PARAM_VENC_NUMREF \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x018) +#define HFI_PROPERTY_PARAM_VENC_LTRMODE \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01C) +#define HFI_PROPERTY_PARAM_VENC_VIDEO_SIGNAL_INFO \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01D) +#define HFI_PROPERTY_PARAM_VENC_VUI_TIMING_INFO \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01E) +#define HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x022) +#define HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x023) +#define HFI_PROPERTY_PARAM_VENC_H264_8X8_TRANSFORM \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x025) +#define HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x026) +#define HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x027) +#define HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x02C) +#define HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x02F) +#define HFI_PROPERTY_PARAM_VENC_BITRATE_TYPE \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x031) +#define HFI_PROPERTY_PARAM_VENC_IFRAMESIZE \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x034) +#define HFI_PROPERTY_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAMES \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x035) +#define HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x036) +#define HFI_PROPERTY_PARAM_VENC_ADAPTIVE_B \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x037) +#define HFI_PROPERTY_PARAM_VENC_BITRATE_SAVINGS \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x038) +#define HFI_PROPERTY_PARAM_VENC_LOSSLESS_ENCODING \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x039) +#define HFI_PROPERTY_PARAM_HEVC_PPS_CB_CR_OFFSET \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x040) +#define HFI_PROPERTY_PARAM_VENC_BITRATE_BOOST \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x041) + +#define HFI_PROPERTY_CONFIG_VENC_COMMON_START \ + (HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000) +#define HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x001) +#define HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x002) +#define HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x003) +#define HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x004) +#define HFI_PROPERTY_CONFIG_VENC_SLICE_SIZE \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x005) +#define HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x008) +#define HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x009) +#define HFI_PROPERTY_CONFIG_VENC_USELTRFRAME \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00A) +#define HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00B) +#define HFI_PROPERTY_CONFIG_VENC_VBV_HRD_BUF_SIZE \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00D) +#define HFI_PROPERTY_CONFIG_VENC_PERF_MODE \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00E) +#define HFI_PROPERTY_CONFIG_VENC_BASELAYER_PRIORITYID \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00F) +#define HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x010) +#define HFI_PROPERTY_CONFIG_VENC_FRAME_QP \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x012) +#define HFI_PROPERTY_CONFIG_HEIC_FRAME_CROP_INFO \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x013) +#define HFI_PROPERTY_CONFIG_HEIC_FRAME_QUALITY \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x014) +#define HFI_PROPERTY_CONFIG_HEIC_GRID_ENABLE \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x015) +#define HFI_PROPERTY_CONFIG_CVP_SKIP_RATIO \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x016) +#define HFI_PROPERTY_CONFIG_VENC_AUTO_FRAME_RATE \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x017) + +#define HFI_PROPERTY_PARAM_VPE_COMMON_START \ + (HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000) +#define HFI_PROPERTY_PARAM_VPE_ROTATION \ + (HFI_PROPERTY_PARAM_VPE_COMMON_START + 0x001) +#define HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION \ + (HFI_PROPERTY_PARAM_VPE_COMMON_START + 0x002) + +#define HFI_PROPERTY_CONFIG_VPE_COMMON_START \ + (HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000) + +#define HFI_PROPERTY_CONFIG_VPE_FLIP \ + (HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x001) + +struct hfi_cvp_skip_ratio { + u32 cvp_skip_ratio; +}; + +struct hfi_pic_struct { + u32 progressive_only; +}; + +struct hfi_bitrate { + u32 bit_rate; + u32 layer_id; +}; + +struct hfi_colour_space { + u32 colour_space; +}; + +#define HFI_CAPABILITY_FRAME_WIDTH (HFI_COMMON_BASE + 0x1) +#define HFI_CAPABILITY_FRAME_HEIGHT (HFI_COMMON_BASE + 0x2) +#define HFI_CAPABILITY_MBS_PER_FRAME (HFI_COMMON_BASE + 0x3) +#define HFI_CAPABILITY_MBS_PER_SECOND (HFI_COMMON_BASE + 0x4) +#define HFI_CAPABILITY_FRAMERATE (HFI_COMMON_BASE + 0x5) +#define HFI_CAPABILITY_SCALE_X (HFI_COMMON_BASE + 0x6) +#define HFI_CAPABILITY_SCALE_Y (HFI_COMMON_BASE + 0x7) +#define HFI_CAPABILITY_BITRATE (HFI_COMMON_BASE + 0x8) +#define HFI_CAPABILITY_BFRAME (HFI_COMMON_BASE + 0x9) +#define HFI_CAPABILITY_PEAKBITRATE (HFI_COMMON_BASE + 0xa) +#define HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS (HFI_COMMON_BASE + 0x10) +#define HFI_CAPABILITY_ENC_LTR_COUNT (HFI_COMMON_BASE + 0x11) +#define HFI_CAPABILITY_CP_OUTPUT2_THRESH (HFI_COMMON_BASE + 0x12) +#define HFI_CAPABILITY_HIER_B_NUM_ENH_LAYERS (HFI_COMMON_BASE + 0x13) +#define HFI_CAPABILITY_LCU_SIZE (HFI_COMMON_BASE + 0x14) +#define HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS (HFI_COMMON_BASE + 0x15) +#define HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE (HFI_COMMON_BASE + 0x16) +#define HFI_CAPABILITY_EXTRADATA (HFI_COMMON_BASE + 0X17) +#define HFI_CAPABILITY_PROFILE (HFI_COMMON_BASE + 0X18) +#define HFI_CAPABILITY_LEVEL (HFI_COMMON_BASE + 0X19) +#define HFI_CAPABILITY_I_FRAME_QP (HFI_COMMON_BASE + 0X20) +#define HFI_CAPABILITY_P_FRAME_QP (HFI_COMMON_BASE + 0X21) +#define HFI_CAPABILITY_B_FRAME_QP (HFI_COMMON_BASE + 0X22) +#define HFI_CAPABILITY_RATE_CONTROL_MODES (HFI_COMMON_BASE + 0X23) +#define HFI_CAPABILITY_BLUR_WIDTH (HFI_COMMON_BASE + 0X24) +#define HFI_CAPABILITY_BLUR_HEIGHT (HFI_COMMON_BASE + 0X25) +#define HFI_CAPABILITY_SLICE_DELIVERY_MODES (HFI_COMMON_BASE + 0X26) +#define HFI_CAPABILITY_SLICE_BYTE (HFI_COMMON_BASE + 0X27) +#define HFI_CAPABILITY_SLICE_MB (HFI_COMMON_BASE + 0X28) +#define HFI_CAPABILITY_SECURE (HFI_COMMON_BASE + 0X29) +#define HFI_CAPABILITY_MAX_NUM_B_FRAMES (HFI_COMMON_BASE + 0X2A) +#define HFI_CAPABILITY_MAX_VIDEOCORES (HFI_COMMON_BASE + 0X2B) +#define HFI_CAPABILITY_MAX_WORKMODES (HFI_COMMON_BASE + 0X2C) +#define HFI_CAPABILITY_UBWC_CR_STATS (HFI_COMMON_BASE + 0X2D) +#define HFI_CAPABILITY_MAX_WORKROUTES (HFI_COMMON_BASE + 0X31) +#define HFI_CAPABILITY_CQ_QUALITY_LEVEL (HFI_COMMON_BASE + 0X32) + + +#define HFI_DEBUG_MSG_LOW 0x00000001 +#define HFI_DEBUG_MSG_MEDIUM 0x00000002 +#define HFI_DEBUG_MSG_HIGH 0x00000004 +#define HFI_DEBUG_MSG_ERROR 0x00000008 +#define HFI_DEBUG_MSG_FATAL 0x00000010 +#define HFI_DEBUG_MSG_PERF 0x00000020 + +#define HFI_DEBUG_MODE_QUEUE 0x00000001 +#define HFI_DEBUG_MODE_QDSS 0x00000002 + +struct hfi_debug_config { + u32 debug_config; + u32 debug_mode; +}; + +struct hfi_enable { + u32 enable; +}; + +#define HFI_H264_DB_MODE_DISABLE (HFI_COMMON_BASE + 0x1) +#define HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY \ + (HFI_COMMON_BASE + 0x2) +#define HFI_H264_DB_MODE_ALL_BOUNDARY (HFI_COMMON_BASE + 0x3) + +struct hfi_h264_db_control { + u32 mode; + u32 slice_alpha_offset; + u32 slice_beta_offset; +}; + +#define HFI_H264_ENTROPY_CAVLC (HFI_COMMON_BASE + 0x1) +#define HFI_H264_ENTROPY_CABAC (HFI_COMMON_BASE + 0x2) + +#define HFI_H264_CABAC_MODEL_0 (HFI_COMMON_BASE + 0x1) +#define HFI_H264_CABAC_MODEL_1 (HFI_COMMON_BASE + 0x2) +#define HFI_H264_CABAC_MODEL_2 (HFI_COMMON_BASE + 0x3) + +struct hfi_h264_entropy_control { + u32 entropy_mode; + u32 cabac_model; +}; + +struct hfi_frame_rate { + u32 buffer_type; + u32 frame_rate; +}; + +struct hfi_heic_frame_quality { + u32 frame_quality; + u32 reserved[3]; +}; + +struct hfi_heic_grid_enable { + u32 grid_enable; +}; + +struct hfi_operating_rate { + u32 operating_rate; +}; + +struct hfi_chroma_qp_offset { + u32 chroma_offset; + u32 reserved; +}; + +#define HFI_INTRA_REFRESH_NONE (HFI_COMMON_BASE + 0x1) +#define HFI_INTRA_REFRESH_CYCLIC (HFI_COMMON_BASE + 0x2) +#define HFI_INTRA_REFRESH_RANDOM (HFI_COMMON_BASE + 0x5) + +struct hfi_intra_refresh { + u32 mode; + u32 mbs; +}; + +struct hfi_idr_period { + u32 idr_period; +}; + +struct hfi_vpe_rotation_type { + u32 rotation; + u32 flip; +}; + +struct hfi_conceal_color { + u32 conceal_color_8bit; + u32 conceal_color_10bit; +}; + +struct hfi_intra_period { + u32 pframes; + u32 bframes; +}; + +struct hfi_adaptive_p_b_intra_period { + u32 nframes; +}; + +struct hfi_multi_stream { + u32 buffer_type; + u32 enable; +}; + +#define HFI_MULTI_SLICE_OFF (HFI_COMMON_BASE + 0x1) +#define HFI_MULTI_SLICE_BY_MB_COUNT (HFI_COMMON_BASE + 0x2) +#define HFI_MULTI_SLICE_BY_BYTE_COUNT (HFI_COMMON_BASE + 0x3) + +struct hfi_multi_slice_control { + u32 multi_slice; + u32 slice_size; +}; + +#define HFI_NAL_FORMAT_STARTCODES 0x00000001 +#define HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER 0x00000002 +#define HFI_NAL_FORMAT_ONE_BYTE_LENGTH 0x00000004 +#define HFI_NAL_FORMAT_TWO_BYTE_LENGTH 0x00000008 +#define HFI_NAL_FORMAT_FOUR_BYTE_LENGTH 0x00000010 + +struct hfi_nal_stream_format_supported { + u32 nal_stream_format_supported; +}; + +struct hfi_nal_stream_format_select { + u32 nal_stream_format_select; +}; +#define HFI_PICTURE_TYPE_I 0x01 +#define HFI_PICTURE_TYPE_P 0x02 +#define HFI_PICTURE_TYPE_B 0x04 +#define HFI_PICTURE_TYPE_IDR 0x08 +#define HFI_PICTURE_TYPE_CRA 0x10 +#define HFI_FRAME_NOTCODED 0x7F002000 +#define HFI_FRAME_YUV 0x7F004000 +#define HFI_UNUSED_PICT 0x10000000 + +struct hfi_profile_level { + u32 profile; + u32 level; +}; + +struct hfi_dpb_counts { + u32 max_dpb_count; + u32 max_ref_frames; + u32 max_dec_buffering; + u32 max_reorder_frames; + u32 fw_min_cnt; +}; + +struct hfi_profile_level_supported { + u32 profile_count; + struct hfi_profile_level rg_profile_level[1]; +}; + +struct hfi_quality_vs_speed { + u32 quality_vs_speed; +}; + +struct hfi_quantization { + u32 qp_packed; + u32 layer_id; + u32 enable; + u32 reserved[3]; +}; + +struct hfi_quantization_range { + struct hfi_quantization min_qp; + struct hfi_quantization max_qp; + u32 reserved[4]; +}; + +#define HFI_LTR_MODE_DISABLE 0x0 +#define HFI_LTR_MODE_MANUAL 0x1 + +struct hfi_ltr_mode { + u32 ltr_mode; + u32 ltr_count; + u32 trust_mode; +}; + +struct hfi_ltr_use { + u32 ref_ltr; + u32 use_constrnt; + u32 frames; +}; + +struct hfi_ltr_mark { + u32 mark_frame; +}; + +struct hfi_frame_size { + u32 buffer_type; + u32 width; + u32 height; +}; + +struct hfi_videocores_usage_type { + u32 video_core_enable_mask; +}; + +struct hfi_video_work_mode { + u32 video_work_mode; +}; + +struct hfi_video_work_route { + u32 video_work_route; +}; + +struct hfi_video_signal_metadata { + u32 enable; + u32 video_format; + u32 video_full_range; + u32 color_description; + u32 color_primaries; + u32 transfer_characteristics; + u32 matrix_coeffs; +}; + +struct hfi_vui_timing_info { + u32 enable; + u32 fixed_frame_rate; + u32 time_scale; +}; + +struct hfi_bit_depth { + u32 buffer_type; + u32 bit_depth; +}; + +/* Base Offset for UBWC color formats */ +#define HFI_COLOR_FORMAT_UBWC_BASE (0x8000) +/* Base Offset for 10-bit color formats */ +#define HFI_COLOR_FORMAT_10_BIT_BASE (0x4000) + +#define HFI_COLOR_FORMAT_NV12 (HFI_COMMON_BASE + 0x2) +#define HFI_COLOR_FORMAT_NV21 (HFI_COMMON_BASE + 0x3) +#define HFI_COLOR_FORMAT_RGBA8888 (HFI_COMMON_BASE + 0x10) + +#define HFI_COLOR_FORMAT_YUV420_TP10 \ + (HFI_COLOR_FORMAT_10_BIT_BASE + HFI_COLOR_FORMAT_NV12) +#define HFI_COLOR_FORMAT_P010 \ + (HFI_COLOR_FORMAT_10_BIT_BASE + HFI_COLOR_FORMAT_NV12 + 0x1) + +#define HFI_COLOR_FORMAT_NV12_UBWC \ + (HFI_COLOR_FORMAT_UBWC_BASE + HFI_COLOR_FORMAT_NV12) + +#define HFI_COLOR_FORMAT_YUV420_TP10_UBWC \ + (HFI_COLOR_FORMAT_UBWC_BASE + HFI_COLOR_FORMAT_YUV420_TP10) + +#define HFI_COLOR_FORMAT_RGBA8888_UBWC \ + (HFI_COLOR_FORMAT_UBWC_BASE + HFI_COLOR_FORMAT_RGBA8888) + +#define HFI_MAX_MATRIX_COEFFS 9 +#define HFI_MAX_BIAS_COEFFS 3 +#define HFI_MAX_LIMIT_COEFFS 6 + +struct hfi_uncompressed_format_select { + u32 buffer_type; + u32 format; +}; + +struct hfi_uncompressed_format_supported { + u32 buffer_type; + u32 format_entries; + u32 rg_format_info[1]; +}; + +struct hfi_uncompressed_plane_actual { + u32 actual_stride; + u32 actual_plane_buffer_height; +}; + +struct hfi_uncompressed_plane_actual_info { + u32 buffer_type; + u32 num_planes; + struct hfi_uncompressed_plane_actual rg_plane_format[1]; +}; + +struct hfi_uncompressed_plane_constraints { + u32 stride_multiples; + u32 max_stride; + u32 min_plane_buffer_height_multiple; + u32 buffer_alignment; +}; + +struct hfi_uncompressed_plane_info { + u32 format; + u32 num_planes; + struct hfi_uncompressed_plane_constraints rg_plane_format[1]; +}; + +struct hfi_vpe_color_space_conversion { + u32 input_color_primaries; + u32 custom_matrix_enabled; + u32 csc_matrix[HFI_MAX_MATRIX_COEFFS]; + u32 csc_bias[HFI_MAX_BIAS_COEFFS]; + u32 csc_limit[HFI_MAX_LIMIT_COEFFS]; +}; + +#define HFI_ROTATE_NONE (HFI_COMMON_BASE + 0x1) +#define HFI_ROTATE_90 (HFI_COMMON_BASE + 0x2) +#define HFI_ROTATE_180 (HFI_COMMON_BASE + 0x3) +#define HFI_ROTATE_270 (HFI_COMMON_BASE + 0x4) + +#define HFI_FLIP_NONE (HFI_COMMON_BASE + 0x1) +#define HFI_FLIP_HORIZONTAL (HFI_COMMON_BASE + 0x2) +#define HFI_FLIP_VERTICAL (HFI_COMMON_BASE + 0x4) + +#define HFI_RESOURCE_SYSCACHE 0x00000002 + +struct hfi_resource_subcache_type { + u32 size; + u32 sc_id; +}; + +struct hfi_resource_syscache_info_type { + u32 num_entries; + struct hfi_resource_subcache_type rg_subcache_entries[1]; +}; + +struct hfi_property_sys_image_version_info_type { + u32 string_size; + u8 str_image_version[1]; +}; + +struct hfi_vbv_hrd_bufsize { + u32 buffer_size; +}; + +struct hfi_codec_mask_supported { + u32 codecs; + u32 video_domains; +}; + +struct hfi_aspect_ratio { + u32 aspect_width; + u32 aspect_height; +}; + +#define HFI_CMD_SYS_COMMON_START \ +(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + HFI_CMD_START_OFFSET \ + + 0x0000) +#define HFI_CMD_SYS_INIT (HFI_CMD_SYS_COMMON_START + 0x001) +#define HFI_CMD_SYS_PC_PREP (HFI_CMD_SYS_COMMON_START + 0x002) +#define HFI_CMD_SYS_SET_RESOURCE (HFI_CMD_SYS_COMMON_START + 0x003) +#define HFI_CMD_SYS_RELEASE_RESOURCE (HFI_CMD_SYS_COMMON_START + 0x004) +#define HFI_CMD_SYS_SET_PROPERTY (HFI_CMD_SYS_COMMON_START + 0x005) +#define HFI_CMD_SYS_GET_PROPERTY (HFI_CMD_SYS_COMMON_START + 0x006) +#define HFI_CMD_SYS_SESSION_INIT (HFI_CMD_SYS_COMMON_START + 0x007) +#define HFI_CMD_SYS_SESSION_END (HFI_CMD_SYS_COMMON_START + 0x008) +#define HFI_CMD_SYS_SET_BUFFERS (HFI_CMD_SYS_COMMON_START + 0x009) +#define HFI_CMD_SYS_TEST_START (HFI_CMD_SYS_COMMON_START + 0x100) + +#define HFI_CMD_SESSION_COMMON_START \ + (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + \ + HFI_CMD_START_OFFSET + 0x1000) +#define HFI_CMD_SESSION_SET_PROPERTY \ + (HFI_CMD_SESSION_COMMON_START + 0x001) +#define HFI_CMD_SESSION_SET_BUFFERS \ + (HFI_CMD_SESSION_COMMON_START + 0x002) +#define HFI_CMD_SESSION_GET_SEQUENCE_HEADER \ + (HFI_CMD_SESSION_COMMON_START + 0x003) + +#define HFI_MSG_SYS_COMMON_START \ + (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + \ + HFI_MSG_START_OFFSET + 0x0000) +#define HFI_MSG_SYS_INIT_DONE (HFI_MSG_SYS_COMMON_START + 0x1) +#define HFI_MSG_SYS_PC_PREP_DONE (HFI_MSG_SYS_COMMON_START + 0x2) +#define HFI_MSG_SYS_RELEASE_RESOURCE (HFI_MSG_SYS_COMMON_START + 0x3) +#define HFI_MSG_SYS_DEBUG (HFI_MSG_SYS_COMMON_START + 0x4) +#define HFI_MSG_SYS_SESSION_INIT_DONE (HFI_MSG_SYS_COMMON_START + 0x6) +#define HFI_MSG_SYS_SESSION_END_DONE (HFI_MSG_SYS_COMMON_START + 0x7) +#define HFI_MSG_SYS_IDLE (HFI_MSG_SYS_COMMON_START + 0x8) +#define HFI_MSG_SYS_COV (HFI_MSG_SYS_COMMON_START + 0x9) +#define HFI_MSG_SYS_PROPERTY_INFO (HFI_MSG_SYS_COMMON_START + 0xA) +#define HFI_MSG_SESSION_SYNC_DONE (HFI_MSG_SESSION_OX_START + 0xD) + +#define HFI_MSG_SESSION_COMMON_START \ + (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + \ + HFI_MSG_START_OFFSET + 0x1000) +#define HFI_MSG_EVENT_NOTIFY (HFI_MSG_SESSION_COMMON_START + 0x1) +#define HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE \ + (HFI_MSG_SESSION_COMMON_START + 0x2) + +#define HFI_CMD_SYS_TEST_SSR (HFI_CMD_SYS_TEST_START + 0x1) +#define HFI_TEST_SSR_SW_ERR_FATAL 0x1 +#define HFI_TEST_SSR_SW_DIV_BY_ZERO 0x2 +#define HFI_TEST_SSR_HW_WDOG_IRQ 0x3 +#define HFI_TEST_SSR_NOC_ERROR 0x4 +#define HFI_TEST_SSR_VCODEC_HUNG 0x5 + +struct vidc_hal_cmd_pkt_hdr { + u32 size; + u32 packet_type; +}; + +struct vidc_hal_msg_pkt_hdr { + u32 size; + u32 packet; +}; + +struct vidc_hal_session_cmd_pkt { + u32 size; + u32 packet_type; + u32 sid; +}; + +struct hfi_packet_header { + u32 size; + u32 packet_type; +}; + +struct hfi_cmd_sys_init_packet { + u32 size; + u32 packet_type; + u32 arch_type; +}; + +struct hfi_cmd_sys_pc_prep_packet { + u32 size; + u32 packet_type; +}; + +struct hfi_cmd_sys_set_resource_packet { + u32 size; + u32 packet_type; + u32 resource_handle; + u32 resource_type; + u32 rg_resource_data[1]; +}; + +struct hfi_cmd_sys_release_resource_packet { + u32 size; + u32 packet_type; + u32 resource_type; + u32 resource_handle; +}; + +struct hfi_cmd_sys_set_property_packet { + u32 size; + u32 packet_type; + u32 num_properties; + u32 rg_property_data[1]; +}; + +struct hfi_cmd_sys_get_property_packet { + u32 size; + u32 packet_type; + u32 num_properties; + u32 rg_property_data[1]; +}; + +struct hfi_cmd_sys_session_init_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 session_domain; + u32 session_codec; +}; + +struct hfi_cmd_sys_session_end_packet { + u32 size; + u32 packet_type; + u32 sid; +}; + +struct hfi_cmd_sys_set_buffers_packet { + u32 size; + u32 packet_type; + u32 buffer_type; + u32 buffer_size; + u32 num_buffers; + u32 rg_buffer_addr[1]; +}; + +struct hfi_cmd_sys_set_ubwc_config_packet_type { + u32 size; + u32 packet_type; + struct { + u32 max_channel_override : 1; + u32 mal_length_override : 1; + u32 hb_override : 1; + u32 bank_swzl_level_override : 1; + u32 bank_spreading_override : 1; + u32 reserved : 27; + } override_bit_info; + u32 max_channels; + u32 mal_length; + u32 highest_bank_bit; + u32 bank_swzl_level; + u32 bank_spreading; + u32 reserved[2]; +}; + +struct hfi_cmd_session_set_property_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 num_properties; + u32 rg_property_data[1]; +}; + +struct hfi_cmd_session_set_buffers_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 buffer_type; + u32 buffer_size; + u32 extra_data_size; + u32 min_buffer_size; + u32 num_buffers; + u32 rg_buffer_info[1]; +}; + +struct hfi_buffer_mapping_type { + u32 index; + u32 device_addr; + u32 size; +}; + +struct hfi_msg_event_notify_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 event_id; + u32 event_data1; + u32 event_data2; + u32 rg_ext_event_data[1]; +}; + +struct hfi_msg_release_buffer_ref_event_packet { + u32 packet_buffer; + u32 extra_data_buffer; + u32 output_tag; +}; + +struct hfi_msg_sys_init_done_packet { + u32 size; + u32 packet_type; + u32 error_type; + u32 num_properties; + u32 rg_property_data[1]; +}; + +struct hfi_msg_sys_pc_prep_done_packet { + u32 size; + u32 packet_type; + u32 error_type; +}; + +struct hfi_msg_sys_release_resource_done_packet { + u32 size; + u32 packet_type; + u32 resource_handle; + u32 error_type; +}; + +struct hfi_msg_sys_session_init_done_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 error_type; + u32 num_properties; + u32 rg_property_data[1]; +}; + +struct hfi_msg_sys_session_end_done_packet { + u32 size; + u32 packet_type; + u32 sid; + u32 error_type; +}; + +struct hfi_msg_sys_debug_packet { + u32 size; + u32 packet_type; + u32 msg_type; + u32 msg_size; + u32 time_stamp_hi; + u32 time_stamp_lo; + u8 rg_msg_data[1]; +}; + +struct hfi_msg_sys_coverage_packet { + u32 size; + u32 packet_type; + u32 msg_size; + u32 time_stamp_hi; + u32 time_stamp_lo; + u8 rg_msg_data[1]; +}; + +enum HFI_VENUS_QTBL_STATUS { + HFI_VENUS_QTBL_DISABLED = 0x00, + HFI_VENUS_QTBL_ENABLED = 0x01, + HFI_VENUS_QTBL_INITIALIZING = 0x02, + HFI_VENUS_QTBL_DEINITIALIZING = 0x03 +}; + +enum HFI_VENUS_CTRL_INIT_STATUS { + HFI_VENUS_CTRL_NOT_INIT = 0x0, + HFI_VENUS_CTRL_READY = 0x1, + HFI_VENUS_CTRL_ERROR_FATAL = 0x2 +}; + +struct hfi_sfr_struct { + u32 bufSize; + u8 rg_data[1]; +}; + +struct hfi_cmd_sys_test_ssr_packet { + u32 size; + u32 packet_type; + u32 trigger_type; + u32 rg_data[1]; +}; + +struct hfi_hdr10_pq_sei { + struct msm_vidc_mastering_display_colour_sei_payload mdisp_info; + struct msm_vidc_content_light_level_sei_payload cll_info; +}; + +struct hfi_vbv_hrd_buf_size { + u32 vbv_hrd_buf_size; +}; + +struct hfi_bitrate_boost_margin { + u32 margin; +}; + +#endif