Merge tag 'LA.UM.9.14.r1-19500-LAHAINA.QSSI12.0' of https://git.codelinaro.org/clo/la/platform/vendor/opensource/display-drivers into android12-5.4-lahaina

"LA.UM.9.14.r1-19500-LAHAINA.QSSI12.0"

* tag 'LA.UM.9.14.r1-19500-LAHAINA.QSSI12.0' of https://git.codelinaro.org/clo/la/platform/vendor/opensource/display-drivers:
  disp: msm: use vzalloc for large allocations
  disp: msm: sde: Fix data width calculation when widebus is enabled
  drm: msm: call rsc hw_init after hibernation
  disp: msm: sde: remove redundant backlight update
  disp: msm: sde: take min ib votes from perf config
  disp: msm: sde: validate plane mode and gem obj flags
  disp: msm: dsi: fix compressed RGB101010 support
  disp: msm: sde: set parent to xo for link clks while enterting suspend
  disp: msm: sde: while timing engine enabling poll for active region
  disp: msm: sde: fix null pointer dereference
  disp: msm: sde: set NOAUTOEN for sde irq to match with power event
  disp: msm: sde: always set CTL_x_UIDLE_ACTIVE register to "1"
  disp: msm: sde: move sde power event call into kms post init
  disp: msm: sde: fix RM poll timeouts during PM suspend/resume usecase
  disp: msm: sde: remove clearing cur_master in encoder enable function
  disp: msm: sde: cancel delayed_off_work before reinitialization
  disp: msm: sde: update TEAR_SYNC_WRCOUNT register before vsync counter
  disp: msm: sde: disable vsync counter before tear check update
  disp: msm: sde: disable vsync_in to update tear check
  disp: msm: sde: avoid tx wait during DMS for targets with dsc rev2
  disp: msm: sde: avoid irq enable/disable during modeset
  disp: msm: fix rsc static wakeup time calculation
  disp: msm: dsi: allocate DSI command buffer during bind
  disp: msm: sde: update uidle_db_updates in both enable/disable cases
  disp: msm: dsi: add API to handle PHY programming during 0p9 collapse
  disp: msm: sde: modify format specifier
  disp: msm: dsi: Clear slave dma status only for broadcast command
  disp: msm: sde: avoid CWB in power on commit
  disp: msm: sde: avoid sde irq enable or disable when sde irq not active
  disp: msm: dsi: remove early return from dma_cmd_wait_for_done
  disp: msm: sde: protect file private structure with mutex lock
  disp: msm: add support for twm entry
  disp: msm: sde: add twm mode sysfs mode
  disp: msm: sde: add sysfs node to give panel power state
  disp: msm: dsi: Support uncompressed rgb101010 format
  disp: msm: sde: avoid rsvp_nxt allocation for suspend commit
  disp: rotator: remove ubwc format support for rotator
  disp: msm: sde: add changes to allocate compatible cwb mixers in RM
  disp: msm: sde: add evt log in rsc timer calculation
  msm: disp: rotator: add ROT macros for logs
  disp: msm: dp: replace pr_err with DP_ERR
  disp: msm: dsi: Do not call devm_clk_put() with invalid clk
  disp: msm: sde: disable CWB crop after cwb session is ended
  disp: rotator: remove warning log from spin_lock
  disp: msm: sde: protect file private structure with mutex lock
  disp: msm: dsi: add support for ultra low power state
  disp: msm: sde: switch rsc state before CTL_PREPARE in dual display
  disp: msm: sde: add checks to avoid null pointer dereference
  drm: msm: dsi: Update DSI parser util to skip disabled child nodes
  disp: msm: qpic: fix kw issues in QPIC display driver
  disp: msm: dsi: Fix deadlock issue in debugfs_esd_trigger_check function

Change-Id: I4acda3b051e4306f0c1f1a99c9aa61dfeb99ef90
This commit is contained in:
Michael Bestas 2022-05-19 00:40:44 +03:00
commit e4409f8201
No known key found for this signature in database
GPG Key ID: CC95044519BE6669
62 changed files with 1153 additions and 272 deletions

View File

@ -3051,7 +3051,7 @@ static int dp_display_setup_colospace(struct dp_display *dp_display,
struct dp_display_private *dp;
if (!dp_display || !panel) {
pr_err("invalid input\n");
DP_ERR("invalid input\n");
return -EINVAL;
}

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
*/
#include <drm/drm_atomic_helper.h>
@ -336,7 +336,7 @@ int dp_connector_set_colorspace(struct drm_connector *connector,
sde_conn = to_sde_connector(connector);
if (!sde_conn->drv_panel) {
pr_err("invalid dp panel\n");
DP_ERR("invalid dp panel\n");
return -EINVAL;
}

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
*/
#include "dp_panel.h"
@ -2571,7 +2571,7 @@ static int dp_panel_set_colorspace(struct dp_panel *dp_panel,
struct dp_panel_private *panel;
if (!dp_panel) {
pr_err("invalid input\n");
DP_ERR("invalid input\n");
rc = -EINVAL;
goto end;
}

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/errno.h>
@ -293,6 +294,7 @@ static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy)
dsi_phy_hw_v4_0_cache_phy_timings;
phy->ops.set_continuous_clk = dsi_phy_hw_v4_0_set_continuous_clk;
phy->ops.commit_phy_timing = dsi_phy_hw_v4_0_commit_phy_timing;
phy->ops.phy_idle_off = dsi_phy_hw_v4_0_phy_idle_off;
}
/**

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DSI_CATALOG_H_
@ -125,6 +126,7 @@ void dsi_phy_hw_v4_0_reset_clk_en_sel(struct dsi_phy_hw *phy);
void dsi_phy_hw_v4_0_set_continuous_clk(struct dsi_phy_hw *phy, bool enable);
void dsi_phy_hw_v4_0_commit_phy_timing(struct dsi_phy_hw *phy,
struct dsi_phy_per_lane_cfgs *timing);
void dsi_phy_hw_v4_0_phy_idle_off(struct dsi_phy_hw *phy);
/* DSI controller common ops */
u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl);

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
@ -172,6 +173,16 @@ int dsi_clk_update_parent(struct dsi_clk_link_set *parent,
{
int rc = 0;
if (!parent->byte_clk || !parent->pixel_clk) {
DSI_ERR("Invalid parent\n");
return -EINVAL;
}
if (!child->byte_clk || !child->pixel_clk) {
DSI_ERR("Invalid child\n");
return -EINVAL;
}
rc = clk_set_parent(child->byte_clk, parent->byte_clk);
if (rc) {
DSI_ERR("failed to set byte clk parent\n");

View File

@ -390,13 +390,6 @@ static void dsi_ctrl_dma_cmd_wait_for_done(struct work_struct *work)
dsi_hw_ops = dsi_ctrl->hw.ops;
SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
/*
* This atomic state will be set if ISR has been triggered,
* so the wait is not needed.
*/
if (atomic_read(&dsi_ctrl->dma_irq_trig))
goto done;
ret = wait_for_completion_timeout(
&dsi_ctrl->irq_info.cmd_dma_done,
msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
@ -416,8 +409,8 @@ static void dsi_ctrl_dma_cmd_wait_for_done(struct work_struct *work)
DSI_SINT_CMD_MODE_DMA_DONE);
}
done:
dsi_ctrl->dma_wait_queued = false;
SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_EXIT);
}
static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl,
@ -657,34 +650,34 @@ static int dsi_ctrl_clocks_deinit(struct dsi_ctrl *ctrl)
struct dsi_link_hs_clk_info *hs_link = &ctrl->clk_info.hs_link_clks;
struct dsi_clk_link_set *rcg = &ctrl->clk_info.rcg_clks;
if (core->mdp_core_clk)
if (!IS_ERR_OR_NULL(core->mdp_core_clk))
devm_clk_put(&ctrl->pdev->dev, core->mdp_core_clk);
if (core->iface_clk)
if (!IS_ERR_OR_NULL(core->iface_clk))
devm_clk_put(&ctrl->pdev->dev, core->iface_clk);
if (core->core_mmss_clk)
if (!IS_ERR_OR_NULL(core->core_mmss_clk))
devm_clk_put(&ctrl->pdev->dev, core->core_mmss_clk);
if (core->bus_clk)
if (!IS_ERR_OR_NULL(core->bus_clk))
devm_clk_put(&ctrl->pdev->dev, core->bus_clk);
if (core->mnoc_clk)
if (!IS_ERR_OR_NULL(core->mnoc_clk))
devm_clk_put(&ctrl->pdev->dev, core->mnoc_clk);
memset(core, 0x0, sizeof(*core));
if (hs_link->byte_clk)
if (!IS_ERR_OR_NULL(hs_link->byte_clk))
devm_clk_put(&ctrl->pdev->dev, hs_link->byte_clk);
if (hs_link->pixel_clk)
if (!IS_ERR_OR_NULL(hs_link->pixel_clk))
devm_clk_put(&ctrl->pdev->dev, hs_link->pixel_clk);
if (lp_link->esc_clk)
if (!IS_ERR_OR_NULL(lp_link->esc_clk))
devm_clk_put(&ctrl->pdev->dev, lp_link->esc_clk);
if (hs_link->byte_intf_clk)
if (!IS_ERR_OR_NULL(hs_link->byte_intf_clk))
devm_clk_put(&ctrl->pdev->dev, hs_link->byte_intf_clk);
memset(hs_link, 0x0, sizeof(*hs_link));
memset(lp_link, 0x0, sizeof(*lp_link));
if (rcg->byte_clk)
if (!IS_ERR_OR_NULL(rcg->byte_clk))
devm_clk_put(&ctrl->pdev->dev, rcg->byte_clk);
if (rcg->pixel_clk)
if (!IS_ERR_OR_NULL(rcg->pixel_clk))
devm_clk_put(&ctrl->pdev->dev, rcg->pixel_clk);
memset(rcg, 0x0, sizeof(*rcg));
@ -933,6 +926,9 @@ int dsi_ctrl_pixel_format_to_bpp(enum dsi_pixel_format dst_format)
case DSI_PIXEL_FORMAT_RGB888:
bpp = 24;
break;
case DSI_PIXEL_FORMAT_RGB101010:
bpp = 30;
break;
default:
bpp = 24;
break;
@ -1557,7 +1553,8 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
if (dsi_ctrl->dma_wait_queued)
dsi_ctrl_flush_cmd_dma_queue(dsi_ctrl);
if (!(*flags & DSI_CTRL_CMD_BROADCAST_MASTER))
if ((*flags & DSI_CTRL_CMD_BROADCAST) &&
(!(*flags & DSI_CTRL_CMD_BROADCAST_MASTER)))
dsi_ctrl_clear_slave_dma_status(dsi_ctrl, *flags);
if (*flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {

View File

@ -41,9 +41,9 @@ static bool dsi_compression_enabled(struct dsi_mode_info *mode)
/* Unsupported formats default to RGB888 */
static const u8 cmd_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
0x6, 0x7, 0x8, 0x8, 0x0, 0x3, 0x4 };
0x6, 0x7, 0x8, 0x8, 0x0, 0x3, 0x4, 0x9 };
static const u8 video_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
0x0, 0x1, 0x2, 0x3, 0x3, 0x3, 0x3 };
0x0, 0x1, 0x2, 0x3, 0x3, 0x3, 0x3, 0x4 };
/**
* dsi_split_link_setup() - setup dsi split link configurations
@ -421,10 +421,13 @@ void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
u32 reg = 0, offset = 0;
int pic_width = 0, this_frame_slices = 0, intf_ip_w = 0;
u32 pkt_per_line = 0, eol_byte_num = 0, bytes_in_slice = 0;
u32 bpp;
if (roi && (!roi->w || !roi->h))
return;
bpp = dsi_pixel_format_to_bpp(cfg->dst_format);
if (dsi_dsc_compression_enabled(mode)) {
struct msm_display_dsc_info dsc;
@ -458,11 +461,11 @@ void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
bytes_in_slice = vdc.bytes_in_slice;
} else if (roi) {
width_final = roi->w;
stride_final = roi->w * 3;
stride_final = DIV_ROUND_UP(roi->w * bpp, 8);
height_final = roi->h;
} else {
width_final = mode->h_active;
stride_final = mode->h_active * 3;
stride_final = DIV_ROUND_UP(mode->h_active * bpp, 8);
height_final = mode->v_active;
}
@ -579,7 +582,7 @@ void dsi_ctrl_hw_cmn_video_engine_setup(struct dsi_ctrl_hw *ctrl,
reg |= (cfg->bllp_lp11_en ? BIT(12) : 0);
reg |= (cfg->traffic_mode & 0x3) << 8;
reg |= (cfg->vc_id & 0x3);
reg |= (video_mode_format_map[common_cfg->dst_format] & 0x3) << 4;
reg |= (video_mode_format_map[common_cfg->dst_format] & 0x7) << 4;
DSI_W32(ctrl, DSI_VIDEO_MODE_CTRL, reg);
reg = (common_cfg->swap_mode & 0x7) << 12;

View File

@ -39,6 +39,7 @@
* @DSI_PIXEL_FORMAT_RGB111:
* @DSI_PIXEL_FORMAT_RGB332:
* @DSI_PIXEL_FORMAT_RGB444:
* @DSI_PIXEL_FORMAT_RGB101010:
* @DSI_PIXEL_FORMAT_MAX:
*/
enum dsi_pixel_format {
@ -49,6 +50,7 @@ enum dsi_pixel_format {
DSI_PIXEL_FORMAT_RGB111,
DSI_PIXEL_FORMAT_RGB332,
DSI_PIXEL_FORMAT_RGB444,
DSI_PIXEL_FORMAT_RGB101010,
DSI_PIXEL_FORMAT_MAX
};
@ -740,6 +742,8 @@ static inline int dsi_pixel_format_to_bpp(enum dsi_pixel_format fmt)
return 8;
case DSI_PIXEL_FORMAT_RGB444:
return 12;
case DSI_PIXEL_FORMAT_RGB101010:
return 30;
}
return 24;
}

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
@ -1247,12 +1248,22 @@ int dsi_display_set_power(struct drm_connector *connector,
switch (power_mode) {
case SDE_MODE_DPMS_LP1:
if (display->panel->power_mode == SDE_MODE_DPMS_LP2) {
if (dsi_display_set_ulp_load(display, false) < 0)
DSI_WARN("failed to set load for lp1 state\n");
}
rc = dsi_panel_set_lp1(display->panel);
break;
case SDE_MODE_DPMS_LP2:
rc = dsi_panel_set_lp2(display->panel);
if (dsi_display_set_ulp_load(display, true) < 0)
DSI_WARN("failed to set load for lp2 state\n");
break;
case SDE_MODE_DPMS_ON:
if (display->panel->power_mode == SDE_MODE_DPMS_LP2) {
if (dsi_display_set_ulp_load(display, false) < 0)
DSI_WARN("failed to set load for on state\n");
}
if ((display->panel->power_mode == SDE_MODE_DPMS_LP1) ||
(display->panel->power_mode == SDE_MODE_DPMS_LP2))
rc = dsi_panel_set_nolp(display->panel);
@ -1562,7 +1573,7 @@ static ssize_t debugfs_esd_trigger_check(struct file *file,
display->trusted_vm_env);
if (rc) {
DSI_ERR("Failed to trigger ESD attack\n");
goto error;
goto unlock;
}
}
@ -2647,6 +2658,38 @@ error:
return rc;
}
#ifdef CONFIG_DEEPSLEEP
static int dsi_display_unset_clk_src(struct dsi_display *display)
{
int rc = 0;
int i;
struct dsi_display_ctrl *ctrl;
DSI_DEBUG("[%s] unset source clocks\n", display->name);
display_for_each_ctrl(i, display) {
ctrl = &display->ctrl[i];
if (!ctrl->ctrl)
continue;
/* set ctrl clocks to xo source */
rc = dsi_ctrl_set_clock_source(ctrl->ctrl,
&display->clock_info.xo_clks);
if (rc) {
DSI_ERR("[%s] failed to set source clocks, rc=%d\n",
display->name, rc);
return rc;
}
}
return 0;
}
#else
static inline int dsi_display_unset_clk_src(struct dsi_display *display)
{
return 0;
}
#endif
static int dsi_display_set_clk_src(struct dsi_display *display)
{
int rc = 0;
@ -3432,6 +3475,7 @@ static int dsi_display_clocks_init(struct dsi_display *display)
{
int i, rc = 0, num_clk = 0;
const char *clk_name;
const char *xo_byte = "xo_byte", *xo_pixel = "xo_pixel";
const char *src_byte = "src_byte", *src_pixel = "src_pixel";
const char *mux_byte = "mux_byte", *mux_pixel = "mux_pixel";
const char *cphy_byte = "cphy_byte", *cphy_pixel = "cphy_pixel";
@ -3439,6 +3483,7 @@ static int dsi_display_clocks_init(struct dsi_display *display)
const char *shadow_cphybyte = "shadow_cphybyte",
*shadow_cphypixel = "shadow_cphypixel";
struct clk *dsi_clk;
struct dsi_clk_link_set *xo = &display->clock_info.xo_clks;
struct dsi_clk_link_set *src = &display->clock_info.src_clks;
struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
struct dsi_clk_link_set *cphy = &display->clock_info.cphy_clks;
@ -3469,6 +3514,15 @@ static int dsi_display_clocks_init(struct dsi_display *display)
DSI_ERR("failed to get %s, rc=%d\n", clk_name, rc);
if (dsi_display_check_prefix(xo_byte, clk_name)) {
xo->byte_clk = NULL;
goto error;
}
if (dsi_display_check_prefix(xo_pixel, clk_name)) {
xo->pixel_clk = NULL;
goto error;
}
if (dsi_display_check_prefix(mux_byte, clk_name)) {
mux->byte_clk = NULL;
goto error;
@ -3514,6 +3568,16 @@ static int dsi_display_clocks_init(struct dsi_display *display)
}
}
if (dsi_display_check_prefix(xo_byte, clk_name)) {
xo->byte_clk = dsi_clk;
continue;
}
if (dsi_display_check_prefix(xo_pixel, clk_name)) {
xo->pixel_clk = dsi_clk;
continue;
}
if (dsi_display_check_prefix(src_byte, clk_name)) {
src->byte_clk = dsi_clk;
continue;
@ -3904,6 +3968,35 @@ int dsi_pre_clkon_cb(void *priv,
return rc;
}
int dsi_display_set_ulp_load(struct dsi_display *display, bool enable)
{
int i, rc = 0;
struct dsi_display_ctrl *display_ctrl;
struct dsi_ctrl *ctrl;
struct dsi_panel *panel;
display_for_each_ctrl(i, display) {
display_ctrl = &display->ctrl[i];
if (!display_ctrl->ctrl)
continue;
ctrl = display_ctrl->ctrl;
rc = dsi_pwr_config_vreg_opt_mode(&ctrl->pwr_info.host_pwr, enable);
if (rc) {
DSI_ERR("failed to set ctrl load\n");
return rc;
}
}
panel = display->panel;
rc = dsi_pwr_config_vreg_opt_mode(&panel->power_info, enable);
if (rc) {
DSI_ERR("failed to set panel load\n");
return rc;
}
return rc;
}
static void __set_lane_map_v2(u8 *lane_map_v2,
enum dsi_phy_data_lanes lane0,
enum dsi_phy_data_lanes lane1,
@ -6072,6 +6165,12 @@ int dsi_display_drm_bridge_init(struct dsi_display *display,
display->bridge = bridge;
priv->bridges[priv->num_bridges++] = &bridge->base;
if (display->tx_cmd_buf == NULL) {
rc = dsi_host_alloc_cmd_tx_buffer(display);
if (rc)
DSI_ERR("failed to allocate cmd tx buffer memory\n");
}
error:
mutex_unlock(&display->display_lock);
return rc;
@ -6873,6 +6972,20 @@ int dsi_display_get_modes(struct dsi_display *display,
goto error;
}
/*
* Update the host_config.dst_format for compressed RGB101010
* pixel format.
*/
if (display->panel->host_config.dst_format ==
DSI_PIXEL_FORMAT_RGB101010 &&
display_mode.timing.dsc_enabled) {
display->panel->host_config.dst_format =
DSI_PIXEL_FORMAT_RGB888;
DSI_DEBUG("updated dst_format from %d to %d\n",
DSI_PIXEL_FORMAT_RGB101010,
display->panel->host_config.dst_format);
}
is_cmd_mode = (display_mode.panel_mode == DSI_OP_CMD_MODE);
/* Setup widebus support */
@ -8541,6 +8654,12 @@ int dsi_display_unprepare(struct dsi_display *display)
DSI_ERR("[%s] failed to disable Link clocks, rc=%d\n",
display->name, rc);
/* set to dsi clocks to xo clocks */
rc = dsi_display_unset_clk_src(display);
if (rc)
DSI_ERR("[%s] failed to unset clocks, rc=%d\n",
display->name, rc);
rc = dsi_display_ctrl_deinit(display);
if (rc)
DSI_ERR("[%s] failed to deinit controller, rc=%d\n",

View File

@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
@ -110,6 +111,7 @@ struct dsi_display_boot_param {
* @shadow_cphy_clks: Used for C-phy clock switch.
*/
struct dsi_display_clk_info {
struct dsi_clk_link_set xo_clks;
struct dsi_clk_link_set src_clks;
struct dsi_clk_link_set mux_clks;
struct dsi_clk_link_set cphy_clks;
@ -628,7 +630,7 @@ int dsi_pre_clkon_cb(void *priv, enum dsi_clk_type clk_type,
* Return: error code.
*/
int dsi_display_unprepare(struct dsi_display *display);
int dsi_display_set_ulp_load(struct dsi_display *display, bool enable);
int dsi_display_set_tpg_state(struct dsi_display *display, bool enable);
int dsi_display_clock_gate(struct dsi_display *display, bool enable);

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
@ -378,6 +379,10 @@ static int dsi_panel_power_off(struct dsi_panel *panel)
{
int rc = 0;
if (panel->is_twm_en) {
DSI_DEBUG("TWM Enabled, skip panel power off\n");
return rc;
}
if (gpio_is_valid(panel->reset_config.disp_en_gpio))
gpio_set_value(panel->reset_config.disp_en_gpio, 0);
@ -929,6 +934,18 @@ static int dsi_panel_parse_pixel_format(struct dsi_host_common_cfg *host,
case 18:
fmt = DSI_PIXEL_FORMAT_RGB666;
break;
case 30:
/*
* The destination pixel format (host->dst_format) depends
* upon the compression, and should be RGB888 if the DSC is
* enable.
* The DSC status information is inside the timing modes, that
* is parsed during first dsi_display_get_modes() call.
* The dst_format will be updated there depending upon the
* DSC status.
*/
fmt = DSI_PIXEL_FORMAT_RGB101010;
break;
case 24:
default:
fmt = DSI_PIXEL_FORMAT_RGB888;
@ -4305,6 +4322,10 @@ int dsi_panel_set_nolp(struct dsi_panel *panel)
return -EINVAL;
}
if (panel->is_twm_en) {
DSI_DEBUG("TWM Enabled, skip idle off\n");
return rc;
}
mutex_lock(&panel->panel_lock);
if (!panel->panel_initialized)
goto exit;
@ -4717,6 +4738,10 @@ int dsi_panel_disable(struct dsi_panel *panel)
return -EINVAL;
}
if (panel->is_twm_en) {
DSI_DEBUG("TWM Enabled, skip panel disable\n");
return rc;
}
mutex_lock(&panel->panel_lock);
/* Avoid sending panel off commands when ESD recovery is underway */

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2020, 2021 The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_PANEL_H_
@ -244,6 +244,7 @@ struct dsi_panel {
bool reset_gpio_always_on;
atomic_t esd_recovery_pending;
bool is_twm_en;
bool panel_initialized;
bool te_using_watchdog_timer;
struct dsi_qsync_capabilities qsync_caps;

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_PARSER_H_
@ -204,7 +204,7 @@ static inline struct dsi_parser_utils *dsi_parser_get_of_utils(void)
.get_child_count = of_get_child_count,
.get_available_child_count = of_get_available_child_count,
.find_property = of_find_property,
.get_next_child = of_get_next_child,
.get_next_child = of_get_next_available_child,
.count_u32_elems = of_property_count_u32_elems,
.get_named_gpio = of_get_named_gpio,
};

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/of_device.h>
@ -356,6 +357,8 @@ static int dsi_phy_settings_init(struct platform_device *pdev,
phy->allow_phy_power_off = of_property_read_bool(pdev->dev.of_node,
"qcom,panel-allow-phy-poweroff");
phy->hw.clamp_enable = of_property_read_bool(pdev->dev.of_node,
"qcom,phy-clamp-enable");
of_property_read_u32(pdev->dev.of_node,
"qcom,dsi-phy-regulator-min-datarate-bps",

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DSI_PHY_HW_H_
@ -354,6 +355,7 @@ struct dsi_phy_hw_ops {
* @length: Length of the DSI dynamic refresh register base map.
* @index: Instance ID of the controller.
* @version: DSI PHY version.
* @clamp_enable True if phy clamp is enabled
* @phy_clamp_base: Base address of phy clamp register map.
* @feature_map: Features supported by DSI PHY.
* @ops: Function pointer to PHY operations.
@ -366,6 +368,7 @@ struct dsi_phy_hw {
u32 index;
enum dsi_phy_version version;
bool clamp_enable;
void __iomem *phy_clamp_base;
DECLARE_BITMAP(feature_map, DSI_PHY_MAX_FEATURES);

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/math64.h>
@ -858,3 +859,11 @@ void dsi_phy_hw_v4_0_set_continuous_clk(struct dsi_phy_hw *phy, bool enable)
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, reg);
wmb(); /* make sure request is set */
}
void dsi_phy_hw_v4_0_phy_idle_off(struct dsi_phy_hw *phy)
{
if (phy->version >= DSI_PHY_VERSION_4_2 && phy->clamp_enable) {
DSI_W32(phy, DSIPHY_CMN_CTRL_4, 0x1);
DSI_W32(phy, DSIPHY_CMN_CTRL_3, 0x0);
}
}

View File

@ -1,12 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
#include "dsi_phy_timing_calc.h"
static const u32 bits_per_pixel[DSI_PIXEL_FORMAT_MAX] = {
16, 18, 18, 24, 3, 8, 12 };
16, 18, 18, 24, 3, 8, 12, 30 };
static int dsi_phy_cmn_validate_and_set(struct timing_entry *t,
char const *t_name)

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2020,2021 The Linux Foundation. All rights reserved.
*/
#include <linux/of.h>
@ -65,6 +65,14 @@ static int dsi_pwr_parse_supply_node(struct dsi_parser_utils *utils,
regs->vregs[i].disable_load = tmp;
/* Optional values */
rc = utils->read_u32(node, "qcom,supply-ulp-load", &tmp);
if (rc) {
DSI_DEBUG("ulp-load not specified\n");
rc = 0;
}
regs->vregs[i].ulp_load = (!rc ? tmp :
regs->vregs[i].enable_load);
rc = utils->read_u32(node, "qcom,supply-off-min-voltage", &tmp);
if (rc) {
DSI_DEBUG("off-min-voltage not specified\n");
@ -118,6 +126,30 @@ error:
return rc;
}
int dsi_pwr_config_vreg_opt_mode(struct dsi_regulator_info *regs,
bool enable)
{
int i = 0, rc = 0;
struct dsi_vreg *vreg;
u32 mode;
for (i = 0; i < regs->count; i++) {
vreg = &regs->vregs[i];
mode = enable ? vreg->ulp_load : vreg->enable_load;
DSI_DEBUG(" Setting optimum mode for %s load = %d\n",
vreg->vreg_name, mode);
rc = regulator_set_load(vreg->vreg, mode);
if (rc < 0) {
DSI_ERR("Set opt mode failed for %s",
vreg->vreg_name);
return rc;
}
}
return rc;
}
/**
* dsi_pwr_enable_vregs() - enable/disable regulators
*/

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2019,2021 The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_PWR_H_
@ -21,6 +21,7 @@ struct dsi_parser_utils;
* @max_voltage: Maximum voltage in uV.
* @enable_load: Load, in uA, when enabled.
* @disable_load: Load, in uA, when disabled.
* @ulp_load: Load, in uA, when ulp is enabled.
* @off_min_voltage: Minimum voltage in uV when regulator is disabled.
* @pre_on_sleep: Sleep, in ms, before enabling the regulator.
* @post_on_sleep: Sleep, in ms, after enabling the regulator.
@ -34,6 +35,7 @@ struct dsi_vreg {
u32 max_voltage;
u32 enable_load;
u32 disable_load;
u32 ulp_load;
u32 off_min_voltage;
u32 pre_on_sleep;
u32 post_on_sleep;
@ -103,4 +105,13 @@ int dsi_pwr_enable_regulator(struct dsi_regulator_info *regs, bool enable);
int dsi_pwr_panel_regulator_mode_set(struct dsi_regulator_info *regs,
const char *reg_name,
int regulator_mode);
/**
* dsi_pwr_config_vreg_opt_mode()
* set regulator load
* @regs: Pointer to set of regulators to enable or disable.
* @enable: enable ulp_load or disable
* return: error code in case of failure or 0 for success.
*/
int dsi_pwr_config_vreg_opt_mode(struct dsi_regulator_info *regs, bool enable);
#endif /* _DSI_PWR_H_ */

View File

@ -55,7 +55,7 @@ static inline bool _msm_seamless_for_crtc(struct drm_device *dev,
if (msm_is_mode_seamless_dms(&crtc_state->adjusted_mode) && !enable)
return true;
if (!crtc_state->mode_changed && crtc_state->connectors_changed) {
if (!crtc_state->mode_changed && crtc_state->connectors_changed && crtc_state->active) {
for_each_old_connector_in_state(state, connector,
conn_state, i) {
if ((conn_state->crtc == crtc_state->crtc) ||
@ -282,6 +282,9 @@ msm_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
mode = &new_crtc_state->mode;
adjusted_mode = &new_crtc_state->adjusted_mode;
if (!new_crtc_state->active)
continue;
if (!new_crtc_state->mode_changed &&
new_crtc_state->connectors_changed) {
if (_msm_seamless_for_conn(connector,

View File

@ -1488,7 +1488,7 @@ void msm_mode_object_event_notify(struct drm_mode_object *obj,
static int msm_release(struct inode *inode, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
struct drm_file *file_priv;
struct drm_minor *minor;
struct drm_device *dev;
struct msm_drm_private *priv;
@ -1500,6 +1500,7 @@ static int msm_release(struct inode *inode, struct file *filp)
mutex_lock(&msm_release_lock);
file_priv = filp->private_data;
if (!file_priv) {
ret = -EINVAL;
goto end;

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
@ -1128,6 +1129,8 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
int w, int h, int p, uint32_t format);
int msm_fb_obj_get_attrs(struct drm_gem_object *obj, int *fb_ns,
int *fb_sec, int *fb_sec_dir, unsigned long *flags);
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
void msm_fbdev_free(struct drm_device *dev);

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
@ -16,6 +17,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/dma-buf.h>
#include <linux/msm_ion.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@ -378,3 +381,32 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
return fb;
}
int msm_fb_obj_get_attrs(struct drm_gem_object *obj, int *fb_ns,
int *fb_sec, int *fb_sec_dir, unsigned long *flags)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
if (!obj->import_attach) {
DRM_DEBUG("NULL attachment in drm gem object flags:0x%x\n", msm_obj->flags);
return -EINVAL;
}
ret = dma_buf_get_flags(obj->import_attach->dmabuf, flags);
if (ret) {
DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
return ret;
}
if (!(*flags & ION_FLAG_SECURE))
*fb_ns = 1;
else if (*flags & ION_FLAG_CP_PIXEL)
*fb_sec = 1;
else if (*flags & (ION_FLAG_CP_SEC_DISPLAY |
ION_FLAG_CP_CAMERA_PREVIEW))
*fb_sec_dir = 1;
return ret;
}

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
@ -4476,7 +4477,7 @@ void sde_cp_crtc_enable(struct drm_crtc *drm_crtc)
if (!num_mixers)
return;
mutex_lock(&crtc->crtc_cp_lock);
info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
info = vzalloc(sizeof(struct sde_kms_info));
if (info) {
for (i = 0; i < ARRAY_SIZE(dspp_cap_update_func); i++)
dspp_cap_update_func[i](crtc, info);
@ -4485,7 +4486,7 @@ void sde_cp_crtc_enable(struct drm_crtc *drm_crtc)
info->data, SDE_KMS_INFO_DATALEN(info),
CRTC_PROP_DSPP_INFO);
}
kfree(info);
vfree(info);
mutex_unlock(&crtc->crtc_cp_lock);
}
@ -4500,12 +4501,12 @@ void sde_cp_crtc_disable(struct drm_crtc *drm_crtc)
}
crtc = to_sde_crtc(drm_crtc);
mutex_lock(&crtc->crtc_cp_lock);
info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
info = vzalloc(sizeof(struct sde_kms_info));
if (info)
msm_property_set_blob(&crtc->property_info,
&crtc->dspp_blob_info,
info->data, SDE_KMS_INFO_DATALEN(info),
CRTC_PROP_DSPP_INFO);
mutex_unlock(&crtc->crtc_cp_lock);
kfree(info);
vfree(info);
}

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
@ -784,10 +785,6 @@ static int _sde_connector_update_dirty_properties(
_sde_connector_update_power_locked(c_conn);
mutex_unlock(&c_conn->lock);
break;
case CONNECTOR_PROP_BL_SCALE:
case CONNECTOR_PROP_SV_BL_SCALE:
_sde_connector_update_bl_scale(c_conn);
break;
case CONNECTOR_PROP_HDR_METADATA:
_sde_connector_update_hdr_metadata(c_conn, c_state);
break;
@ -1026,6 +1023,9 @@ void sde_connector_destroy(struct drm_connector *connector)
c_conn = to_sde_connector(connector);
if (c_conn->sysfs_dev)
device_unregister(c_conn->sysfs_dev);
/* cancel if any pending esd work */
sde_connector_schedule_status_work(connector, false);
@ -1833,7 +1833,7 @@ static int _sde_connector_lm_preference(struct sde_connector *sde_conn,
return -EINVAL;
}
sde_hw_mixer_set_preference(sde_kms->catalog, num_lm, disp_type);
sde_conn->lm_mask = sde_hw_mixer_set_preference(sde_kms->catalog, num_lm, disp_type);
return ret;
}
@ -2634,7 +2634,7 @@ int sde_connector_set_blob_data(struct drm_connector *conn,
return -EINVAL;
}
info = kzalloc(sizeof(*info), GFP_KERNEL);
info = vzalloc(sizeof(*info));
if (!info)
return -ENOMEM;
@ -2692,7 +2692,7 @@ int sde_connector_set_blob_data(struct drm_connector *conn,
SDE_KMS_INFO_DATALEN(info),
prop_id);
exit:
kfree(info);
vfree(info);
return rc;
}
@ -2845,6 +2845,104 @@ static int _sde_connector_install_properties(struct drm_device *dev,
return 0;
}
static ssize_t panel_power_state_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct drm_connector *conn;
struct sde_connector *sde_conn;
conn = dev_get_drvdata(device);
sde_conn = to_sde_connector(conn);
return scnprintf(buf, PAGE_SIZE, "%d\n", sde_conn->last_panel_power_mode);
}
static ssize_t twm_enable_store(struct device *device,
struct device_attribute *attr, const char *buf, size_t count)
{
struct drm_connector *conn;
struct sde_connector *sde_conn;
struct dsi_display *dsi_display;
int rc;
int data;
conn = dev_get_drvdata(device);
sde_conn = to_sde_connector(conn);
dsi_display = (struct dsi_display *) sde_conn->display;
rc = kstrtoint(buf, 10, &data);
if (rc) {
SDE_ERROR("kstrtoint failed, rc = %d\n", rc);
return -EINVAL;
}
sde_conn->twm_en = data ? true : false;
dsi_display->panel->is_twm_en = sde_conn->twm_en;
sde_conn->allow_bl_update = data ? false : true;
SDE_DEBUG("TWM: %s\n", sde_conn->twm_en ? "ENABLED" : "DISABLED");
return count;
}
static ssize_t twm_enable_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct drm_connector *conn;
struct sde_connector *sde_conn;
conn = dev_get_drvdata(device);
sde_conn = to_sde_connector(conn);
SDE_DEBUG("TWM: %s\n", sde_conn->twm_en ? "ENABLED" : "DISABLED");
return scnprintf(buf, PAGE_SIZE, "%d\n", sde_conn->twm_en);
}
static DEVICE_ATTR_RO(panel_power_state);
static DEVICE_ATTR_RW(twm_enable);
static struct attribute *sde_connector_dev_attrs[] = {
&dev_attr_panel_power_state.attr,
&dev_attr_twm_enable.attr,
NULL
};
static const struct attribute_group sde_connector_attr_group = {
.attrs = sde_connector_dev_attrs,
};
static const struct attribute_group *sde_connector_attr_groups[] = {
&sde_connector_attr_group,
NULL,
};
int sde_connector_post_init(struct drm_device *dev, struct drm_connector *conn)
{
struct sde_connector *c_conn;
int rc = 0;
if (!dev || !dev->primary || !dev->primary->kdev || !conn) {
SDE_ERROR("invalid input param(s)\n");
rc = -EINVAL;
return rc;
}
c_conn = to_sde_connector(conn);
if (conn->connector_type != DRM_MODE_CONNECTOR_DSI)
return rc;
c_conn->sysfs_dev =
device_create_with_groups(dev->primary->kdev->class, dev->primary->kdev, 0,
conn, sde_connector_attr_groups, "sde-conn-%d-%s", conn->index,
conn->name);
if (IS_ERR_OR_NULL(c_conn->sysfs_dev)) {
SDE_ERROR("connector:%d sysfs create failed rc:%ld\n", &c_conn->base.index,
PTR_ERR(c_conn->sysfs_dev));
if (!c_conn->sysfs_dev)
rc = -EINVAL;
else
rc = PTR_ERR(c_conn->sysfs_dev);
}
return rc;
}
struct drm_connector *sde_connector_init(struct drm_device *dev,
struct drm_encoder *encoder,
struct drm_panel *panel,
@ -2896,6 +2994,7 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
c_conn->dpms_mode = DRM_MODE_DPMS_ON;
c_conn->lp_mode = 0;
c_conn->last_panel_power_mode = SDE_MODE_DPMS_ON;
c_conn->twm_en = false;
sde_kms = to_sde_kms(priv->kms);
if (sde_kms->vbif[VBIF_NRT]) {

View File

@ -470,6 +470,7 @@ struct sde_connector_dyn_hdr_metadata {
* @esd_status_interval: variable to change ESD check interval in millisec
* @panel_dead: Flag to indicate if panel has gone bad
* @esd_status_check: Flag to indicate if ESD thread is scheduled or not
* @twm_en: Flag to indicate if TWM mode is enabled or not
* @bl_scale_dirty: Flag to indicate PP BL scale value(s) is changed
* @bl_scale: BL scale value for ABA feature
* @bl_scale_sv: BL scale value for sunlight visibility feature
@ -511,6 +512,7 @@ struct sde_connector {
int dpms_mode;
int lp_mode;
int last_panel_power_mode;
struct device *sysfs_dev;
struct msm_property_info property_info;
struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
@ -532,6 +534,7 @@ struct sde_connector {
u32 esd_status_interval;
bool panel_dead;
bool esd_status_check;
bool twm_en;
bool bl_scale_dirty;
u32 bl_scale;
@ -547,6 +550,7 @@ struct sde_connector {
bool hdr_supported;
u32 color_enc_fmt;
u32 lm_mask;
u8 hdr_plus_app_ver;
u32 qsync_mode;
@ -752,6 +756,15 @@ int sde_connector_set_property_for_commit(struct drm_connector *connector,
struct drm_atomic_state *atomic_state,
uint32_t property_idx, uint64_t value);
/**
* sde_connector_post_init - update connector object with post initialization.
* It can update the debugfs, sysfs, entries
* @dev: Pointer to drm device struct
* @conn: Pointer to drm connector
* Returns: Zero on success
*/
int sde_connector_post_init(struct drm_device *dev, struct drm_connector *conn);
/**
* sde_connector_init - create drm connector object for a given display
* @dev: Pointer to drm device struct

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@ -494,7 +494,7 @@ static void _sde_core_uidle_setup_cfg(struct sde_kms *kms,
uidle->ops.set_uidle_ctl(uidle, &cfg);
}
static void _sde_core_uidle_setup_ctl(struct drm_crtc *crtc,
void sde_core_uidle_setup_ctl(struct drm_crtc *crtc,
bool enable)
{
struct drm_encoder *drm_enc;
@ -522,7 +522,7 @@ static int _sde_core_perf_enable_uidle(struct sde_kms *kms,
SDE_EVT32(enable);
_sde_core_uidle_setup_wd(kms, enable);
_sde_core_uidle_setup_cfg(kms, enable);
_sde_core_uidle_setup_ctl(crtc, enable);
sde_core_uidle_setup_ctl(crtc, true);
kms->perf.uidle_enabled = enable;
@ -577,7 +577,7 @@ void sde_core_perf_crtc_update_uidle(struct drm_crtc *crtc,
struct drm_crtc *tmp_crtc;
struct sde_kms *kms;
bool disable_uidle = false;
u32 fps;
u32 fps, num_crtc = 0;
if (!crtc) {
SDE_ERROR("invalid crtc\n");
@ -603,6 +603,7 @@ void sde_core_perf_crtc_update_uidle(struct drm_crtc *crtc,
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (_sde_core_perf_crtc_is_power_on(tmp_crtc)) {
num_crtc++;
/*
* If DFPS is enabled with VFP, SDE clock and
* transfer time will get fixed at max FPS
@ -620,7 +621,7 @@ void sde_core_perf_crtc_update_uidle(struct drm_crtc *crtc,
_sde_core_perf_is_cwb(tmp_crtc),
disable_uidle, enable);
if (_sde_core_perf_is_wb(tmp_crtc) ||
if ((num_crtc > 1) || _sde_core_perf_is_wb(tmp_crtc) ||
_sde_core_perf_is_cwb(tmp_crtc) || (!fps ||
fps > kms->perf.catalog->uidle_cfg.max_fps)) {
disable_uidle = true;
@ -632,6 +633,8 @@ void sde_core_perf_crtc_update_uidle(struct drm_crtc *crtc,
_sde_core_perf_enable_uidle(kms, crtc,
(enable && !disable_uidle) ? true : false);
kms->perf.catalog->uidle_cfg.dirty = !enable;
/* If perf counters enabled, set them up now */
if (kms->catalog->uidle_cfg.debugfs_perf)
_sde_core_perf_uidle_setup_cntr(kms, enable);

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
#ifndef _SDE_CORE_PERF_H_
@ -138,6 +138,13 @@ void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc);
*/
void sde_core_perf_crtc_update_uidle(struct drm_crtc *crtc, bool enable);
/**
* sde_core_uidle_setup_ctl - enable uidle DB control
* @crtc: Pointer to crtc
* @enable: enable/disable uidle DB
*/
void sde_core_uidle_setup_ctl(struct drm_crtc *crtc, bool enable);
/**
* sde_core_perf_destroy - destroy the given core performance context
* @perf: Pointer to core performance context

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
@ -3303,6 +3304,7 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
sde_encoder_trigger_rsc_state_change(encoder);
/* encoder will trigger pending mask now */
sde_encoder_trigger_kickoff_pending(encoder);
}
@ -3321,7 +3323,7 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
_sde_crtc_blend_setup(crtc, old_state, true);
_sde_crtc_dest_scaler_setup(crtc);
if (crtc->state->mode_changed)
if (crtc->state->mode_changed || sde_kms->perf.catalog->uidle_cfg.dirty)
sde_core_perf_crtc_update_uidle(crtc, true);
/*
@ -3476,6 +3478,7 @@ static void sde_crtc_destroy_state(struct drm_crtc *crtc,
struct sde_crtc_state *cstate;
struct drm_encoder *enc;
struct sde_kms *sde_kms;
u32 encoder_mask;
if (!crtc || !state) {
SDE_ERROR("invalid argument(s)\n");
@ -3491,9 +3494,11 @@ static void sde_crtc_destroy_state(struct drm_crtc *crtc,
return;
}
SDE_DEBUG("crtc%d\n", crtc->base.id);
encoder_mask = state->encoder_mask ? state->encoder_mask :
crtc->state->encoder_mask;
SDE_DEBUG("crtc%d\n, encoder_mask=%d", crtc->base.id, encoder_mask);
drm_for_each_encoder_mask(enc, crtc->dev, state->encoder_mask)
drm_for_each_encoder_mask(enc, crtc->dev, encoder_mask)
sde_rm_release(&sde_kms->rm, enc, true);
__drm_atomic_helper_crtc_destroy_state(state);
@ -5410,7 +5415,7 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
return;
}
info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
info = vzalloc(sizeof(struct sde_kms_info));
if (!info) {
SDE_ERROR("failed to allocate info memory\n");
return;
@ -5494,7 +5499,7 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
info->data, SDE_KMS_INFO_DATALEN(info),
CRTC_PROP_INFO);
kfree(info);
vfree(info);
}
static int _sde_crtc_get_output_fence(struct drm_crtc *crtc,

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
@ -141,7 +142,6 @@ void sde_encoder_uidle_enable(struct drm_encoder *drm_enc, bool enable)
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
if (phys && phys->hw_ctl && phys->hw_ctl->ops.uidle_enable) {
SDE_EVT32(DRMID(drm_enc), enable);
phys->hw_ctl->ops.uidle_enable(phys->hw_ctl, enable);
}
}
@ -267,13 +267,11 @@ static int _sde_encoder_wait_timeout(int32_t drm_id, int32_t hw_id,
return rc;
}
bool sde_encoder_is_primary_display(struct drm_encoder *drm_enc)
u32 sde_encoder_get_display_type(struct drm_encoder *drm_enc)
{
struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
return sde_enc &&
(sde_enc->disp_info.display_type ==
SDE_CONNECTOR_PRIMARY);
return sde_enc ? sde_enc->disp_info.display_type : 0;
}
bool sde_encoder_is_dsi_display(struct drm_encoder *drm_enc)
@ -920,9 +918,10 @@ static int _sde_encoder_atomic_check_reserve(struct drm_encoder *drm_enc,
}
/* Skip RM allocation for Primary during CWB usecase */
if (!crtc_state->mode_changed && !crtc_state->active_changed &&
if ((!crtc_state->mode_changed && !crtc_state->active_changed &&
crtc_state->connectors_changed && (conn_state->crtc ==
conn_state->connector->state->crtc))
conn_state->connector->state->crtc)) ||
(crtc_state->active_changed && !crtc_state->active))
goto skip_reserve;
/* Reserve dynamic resources, indicating atomic_check phase */
@ -1378,20 +1377,9 @@ static int _sde_encoder_update_rsc_client(
(rsc_config->prefill_lines != mode_info->prefill_lines) ||
(rsc_config->jitter_numer != mode_info->jitter_numer) ||
(rsc_config->jitter_denom != mode_info->jitter_denom)) {
rsc_config->fps = mode_info->frame_rate;
rsc_config->vtotal = mode_info->vtotal;
/*
* for video mode, prefill lines should not go beyond vertical
* front porch for RSCC configuration. This will ensure bw
* downvotes are not sent within the active region. Additional
* -1 is to give one line time for rscc mode min_threshold.
*/
if (is_vid_mode && (mode_info->prefill_lines >= v_front_porch))
rsc_config->prefill_lines = v_front_porch - 1;
else
rsc_config->prefill_lines = mode_info->prefill_lines;
rsc_config->prefill_lines = mode_info->prefill_lines;
rsc_config->jitter_numer = mode_info->jitter_numer;
rsc_config->jitter_denom = mode_info->jitter_denom;
sde_enc->rsc_state_init = false;
@ -1843,9 +1831,6 @@ static int _sde_encoder_rc_pre_modeset(struct drm_encoder *drm_enc,
{
int ret = 0;
/* cancel delayed off work, if any */
_sde_encoder_rc_cancel_delayed(sde_enc, sw_event);
mutex_lock(&sde_enc->rc_lock);
if (sde_enc->rc_state == SDE_ENC_RC_STATE_OFF) {
@ -1875,19 +1860,18 @@ static int _sde_encoder_rc_pre_modeset(struct drm_encoder *drm_enc,
sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
}
if (sde_encoder_has_dsc_hw_rev_2(sde_enc))
goto skip_wait;
ret = sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
if (ret && ret != -EWOULDBLOCK) {
SDE_ERROR_ENC(sde_enc,
"wait for commit done returned %d\n",
ret);
SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
ret, SDE_EVTLOG_ERROR);
SDE_ERROR_ENC(sde_enc, "wait for commit done returned %d\n", ret);
SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state, ret, SDE_EVTLOG_ERROR);
ret = -EINVAL;
goto end;
}
sde_encoder_irq_control(drm_enc, false);
skip_wait:
SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
SDE_ENC_RC_STATE_MODESET, SDE_EVTLOG_FUNC_CASE5);
@ -1922,8 +1906,6 @@ static int _sde_encoder_rc_post_modeset(struct drm_encoder *drm_enc,
goto end;
}
sde_encoder_irq_control(drm_enc, true);
_sde_encoder_update_rsc_client(drm_enc, true);
SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
@ -2375,6 +2357,9 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
sde_connector_state_get_mode_info(conn->state, &sde_enc->mode_info);
sde_encoder_dce_set_bpp(sde_enc->mode_info, sde_enc->crtc);
/* cancel delayed off work, if any */
kthread_cancel_delayed_work_sync(&sde_enc->delayed_off_work);
/* release resources before seamless mode change */
ret = sde_encoder_virt_modeset_rc(drm_enc, adj_mode, true);
if (ret)
@ -2612,7 +2597,6 @@ static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
&sde_enc->cur_master->intf_cfg_v1);
_sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info, false);
sde_encoder_control_te(drm_enc, true);
memset(&sde_enc->prv_conn_roi, 0, sizeof(sde_enc->prv_conn_roi));
memset(&sde_enc->cur_conn_roi, 0, sizeof(sde_enc->cur_conn_roi));
@ -2715,6 +2699,7 @@ void sde_encoder_virt_restore(struct drm_encoder *drm_enc)
sde_enc->cur_master->ops.restore(sde_enc->cur_master);
_sde_encoder_virt_enable_helper(drm_enc);
sde_encoder_control_te(drm_enc, true);
}
static void sde_encoder_off_work(struct kthread_work *work)
@ -2737,6 +2722,7 @@ static void sde_encoder_off_work(struct kthread_work *work)
static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
{
struct sde_encoder_virt *sde_enc = NULL;
bool has_master_enc = false;
int i, ret = 0;
struct msm_compression_info *comp_info = NULL;
struct drm_display_mode *cur_mode = NULL;
@ -2763,18 +2749,19 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
SDE_DEBUG_ENC(sde_enc, "\n");
SDE_EVT32(DRMID(drm_enc), cur_mode->hdisplay, cur_mode->vdisplay);
sde_enc->cur_master = NULL;
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
SDE_DEBUG_ENC(sde_enc, "master is now idx %d\n", i);
sde_enc->cur_master = phys;
has_master_enc = true;
break;
}
}
if (!sde_enc->cur_master) {
if (!has_master_enc) {
sde_enc->cur_master = NULL;
SDE_ERROR("virt encoder has no master! num_phys %d\n", i);
return;
}
@ -2796,6 +2783,9 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
return;
}
/* turn off vsync_in to update tear check configuration */
sde_encoder_control_te(drm_enc, false);
memset(&sde_enc->cur_master->intf_cfg_v1, 0,
sizeof(sde_enc->cur_master->intf_cfg_v1));
@ -2851,6 +2841,7 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
sde_enc->cur_master->ops.enable(sde_enc->cur_master);
_sde_encoder_virt_enable_helper(drm_enc);
sde_encoder_control_te(drm_enc, true);
}
void sde_encoder_virt_reset(struct drm_encoder *drm_enc)
@ -3806,6 +3797,46 @@ bool sde_encoder_check_curr_mode(struct drm_encoder *drm_enc, u32 mode)
return (disp_info->curr_panel_mode == mode);
}
void sde_encoder_trigger_rsc_state_change(struct drm_encoder *drm_enc)
{
struct sde_encoder_virt *sde_enc = NULL;
int ret = 0;
sde_enc = to_sde_encoder_virt(drm_enc);
if (!sde_enc)
return;
mutex_lock(&sde_enc->rc_lock);
/*
* In dual display case when secondary comes out of
* idle make sure RSC solver mode is disabled before
* setting CTL_PREPARE.
*/
if (!sde_enc->cur_master ||
!sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE) ||
sde_enc->disp_info.display_type == SDE_CONNECTOR_PRIMARY ||
sde_enc->rc_state != SDE_ENC_RC_STATE_IDLE)
goto end;
/* enable all the clks and resources */
ret = _sde_encoder_resource_control_helper(drm_enc, true);
if (ret) {
SDE_ERROR_ENC(sde_enc, "rc in state %d\n", sde_enc->rc_state);
SDE_EVT32(DRMID(drm_enc), sde_enc->rc_state, SDE_EVTLOG_ERROR);
goto end;
}
_sde_encoder_update_rsc_client(drm_enc, true);
SDE_EVT32(DRMID(drm_enc), sde_enc->rc_state, SDE_ENC_RC_STATE_ON);
sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
end:
mutex_unlock(&sde_enc->rc_lock);
}
void sde_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
{
struct sde_encoder_virt *sde_enc;

View File

@ -275,6 +275,12 @@ void sde_encoder_get_hw_resources(struct drm_encoder *encoder,
struct sde_encoder_hw_resources *hw_res,
struct drm_connector_state *conn_state);
/**
* sde_encoder_trigger_rsc_state_change - rsc state change.
* @encoder: encoder pointer
*/
void sde_encoder_trigger_rsc_state_change(struct drm_encoder *drm_enc);
/**
* sde_encoder_early_wakeup - early wake up display
* @encoder: encoder pointer
@ -517,12 +523,12 @@ bool sde_encoder_is_cwb_disabling(struct drm_encoder *drm_enc,
struct drm_crtc *drm_crtc);
/**
* sde_encoder_is_primary_display - checks if underlying display is primary
* display or not.
* sde_encoder_get_display_type - returns the display_type of underlying
* display
* @drm_enc: Pointer to drm encoder structure
* @Return: true if it is primary display. false if secondary display
* @Return: display_type
*/
bool sde_encoder_is_primary_display(struct drm_encoder *enc);
u32 sde_encoder_get_display_type(struct drm_encoder *enc);
/**
* sde_encoder_is_dsi_display - checks if underlying display is DSI

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
*/
@ -913,6 +914,26 @@ void sde_encoder_dce_set_bpp(struct msm_mode_info mode_info,
sde_crtc->src_bpp, sde_crtc->target_bpp);
}
bool sde_encoder_has_dsc_hw_rev_2(struct sde_encoder_virt *sde_enc)
{
enum msm_display_compression_type comp_type;
int i;
if (!sde_enc)
return false;
comp_type = sde_enc->mode_info.comp_info.comp_type;
if (comp_type != MSM_DISPLAY_COMPRESSION_DSC)
return false;
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
if (sde_enc->hw_dsc[i])
return test_bit(SDE_DSC_HW_REV_1_2, &sde_enc->hw_dsc[i]->caps->features);
return false;
}
void sde_encoder_dce_disable(struct sde_encoder_virt *sde_enc)
{
enum msm_display_compression_type comp_type;

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2017, 2020 The Linux Foundation. All rights reserved.
*/
@ -36,4 +37,10 @@ int sde_encoder_dce_setup(struct sde_encoder_virt *sde_enc,
*/
void sde_encoder_dce_flush(struct sde_encoder_virt *sde_enc);
/**
* sde_encoder_has_dsc_hw_rev_2 :checks if dsc_hw_rev_1_2 feature is enabled
* @sde_enc: pointer to virtual encoder structure
*/
bool sde_encoder_has_dsc_hw_rev_2(struct sde_encoder_virt *sde_enc);
#endif /* __SDE_ENCODER_DCE_H__ */

View File

@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
@ -275,7 +276,6 @@ struct sde_encoder_irq {
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enable_state: Enable state tracking
* @vblank_refcount: Reference count of vblank request
* @vblank_cached_refcount: Reference count of vblank cached request
* @wbirq_refcount: Reference count of wb irq request
* @vsync_cnt: Vsync count for the physical encoder
* @underrun_cnt: Underrun count for the physical encoder
@ -325,7 +325,6 @@ struct sde_encoder_phys {
enum sde_enc_enable_state enable_state;
struct mutex *vblank_ctl_lock;
atomic_t vblank_refcount;
atomic_t vblank_cached_refcount;
atomic_t wbirq_refcount;
atomic_t vsync_cnt;
atomic_t underrun_cnt;

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
@ -340,8 +341,6 @@ static void _sde_encoder_phys_cmd_setup_irq_hw_idx(
{
struct sde_encoder_irq *irq;
struct sde_kms *sde_kms;
int ret = 0;
u32 vblank_refcount;
if (!phys_enc->sde_kms || !phys_enc->hw_pp || !phys_enc->hw_ctl) {
SDE_ERROR("invalid args %d %d %d\n", !phys_enc->sde_kms,
@ -356,39 +355,13 @@ static void _sde_encoder_phys_cmd_setup_irq_hw_idx(
sde_kms = phys_enc->sde_kms;
mutex_lock(phys_enc->vblank_ctl_lock);
vblank_refcount = atomic_read(&phys_enc->vblank_refcount);
if (vblank_refcount) {
ret = sde_encoder_helper_unregister_irq(phys_enc,
INTR_IDX_RDPTR);
if (ret)
SDE_ERROR(
"control vblank irq registration error %d\n",
ret);
if (vblank_refcount > 1)
SDE_ERROR(
"vblank_refcount mismatch detected, try to reset %d\n",
atomic_read(&phys_enc->vblank_refcount));
else
atomic_set(&phys_enc->vblank_cached_refcount, 1);
SDE_EVT32(DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0, vblank_refcount,
atomic_read(&phys_enc->vblank_cached_refcount));
}
atomic_set(&phys_enc->vblank_refcount, 0);
mutex_unlock(phys_enc->vblank_ctl_lock);
irq = &phys_enc->irq[INTR_IDX_CTL_START];
irq->hw_idx = phys_enc->hw_ctl->idx;
irq->irq_idx = -EINVAL;
irq = &phys_enc->irq[INTR_IDX_PINGPONG];
irq->hw_idx = phys_enc->hw_pp->idx;
irq->irq_idx = -EINVAL;
irq = &phys_enc->irq[INTR_IDX_RDPTR];
irq->irq_idx = -EINVAL;
if (phys_enc->has_intf_te)
irq->hw_idx = phys_enc->hw_intf->idx;
else
@ -396,17 +369,14 @@ static void _sde_encoder_phys_cmd_setup_irq_hw_idx(
irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
irq->hw_idx = phys_enc->intf_idx;
irq->irq_idx = -EINVAL;
irq = &phys_enc->irq[INTR_IDX_AUTOREFRESH_DONE];
irq->irq_idx = -EINVAL;
if (phys_enc->has_intf_te)
irq->hw_idx = phys_enc->hw_intf->idx;
else
irq->hw_idx = phys_enc->hw_pp->idx;
irq = &phys_enc->irq[INTR_IDX_WRPTR];
irq->irq_idx = -EINVAL;
if (phys_enc->has_intf_te)
irq->hw_idx = phys_enc->hw_intf->idx;
else
@ -831,7 +801,7 @@ static int sde_encoder_phys_cmd_control_vblank_irq(
struct sde_encoder_phys_cmd *cmd_enc =
to_sde_encoder_phys_cmd(phys_enc);
int ret = 0;
u32 refcount, cached_refcount;
u32 refcount;
struct sde_kms *sde_kms;
if (!phys_enc || !phys_enc->hw_pp) {
@ -846,17 +816,11 @@ static int sde_encoder_phys_cmd_control_vblank_irq(
goto end;
refcount = atomic_read(&phys_enc->vblank_refcount);
cached_refcount = atomic_read(&phys_enc->vblank_cached_refcount);
/* protect against negative */
if (!enable && refcount == 0) {
if (cached_refcount == 1) {
atomic_set(&phys_enc->vblank_cached_refcount, 0);
goto end;
} else {
ret = -EINVAL;
goto end;
}
ret = -EINVAL;
goto end;
}
SDE_DEBUG_CMDENC(cmd_enc, "[%pS] enable=%d/%d\n",
@ -876,11 +840,6 @@ static int sde_encoder_phys_cmd_control_vblank_irq(
atomic_inc_return(&phys_enc->vblank_refcount);
}
if (enable && cached_refcount) {
atomic_inc(&phys_enc->vblank_refcount);
atomic_set(&phys_enc->vblank_cached_refcount, 0);
}
end:
mutex_unlock(phys_enc->vblank_ctl_lock);
if (ret) {
@ -2129,7 +2088,6 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init(
irq->cb.func = sde_encoder_phys_cmd_wr_ptr_irq;
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->vblank_cached_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
atomic_set(&cmd_enc->pending_vblank_cnt, 0);

View File

@ -1107,12 +1107,41 @@ exit:
phys_enc->enable_state = SDE_ENC_DISABLED;
}
static int sde_encoder_phys_vid_poll_for_active_region(struct sde_encoder_phys *phys_enc)
{
struct sde_encoder_phys_vid *vid_enc;
struct intf_timing_params *timing;
u32 line_cnt, v_inactive, poll_time_us, trial = 0;
if (!phys_enc || !phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count)
return -EINVAL;
vid_enc = to_sde_encoder_phys_vid(phys_enc);
timing = &vid_enc->timing_params;
/* if programmable fetch is not enabled return early */
if (!programmable_fetch_get_num_lines(vid_enc, timing))
return 0;
poll_time_us = DIV_ROUND_UP(1000000, timing->vrefresh) / MAX_POLL_CNT;
v_inactive = timing->v_front_porch + timing->v_back_porch + timing->vsync_pulse_width;
do {
usleep_range(poll_time_us, poll_time_us + 5);
line_cnt = phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
trial++;
} while ((trial < MAX_POLL_CNT) || (line_cnt < v_inactive));
return (trial >= MAX_POLL_CNT) ? -ETIMEDOUT : 0;
}
static void sde_encoder_phys_vid_handle_post_kickoff(
struct sde_encoder_phys *phys_enc)
{
unsigned long lock_flags;
struct sde_encoder_phys_vid *vid_enc;
u32 avr_mode;
u32 ret;
if (!phys_enc) {
SDE_ERROR("invalid encoder\n");
@ -1135,6 +1164,10 @@ static void sde_encoder_phys_vid_handle_post_kickoff(
1);
spin_unlock_irqrestore(phys_enc->enc_spinlock,
lock_flags);
ret = sde_encoder_phys_vid_poll_for_active_region(phys_enc);
if (ret)
SDE_DEBUG_VIDENC(vid_enc, "poll for active failed ret:%d\n", ret);
}
phys_enc->enable_state = SDE_ENC_ENABLED;
}

View File

@ -1727,6 +1727,9 @@ static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
}
if (phys_enc->in_clone_mode) {
if (hw_wb->ops.setup_crop)
hw_wb->ops.setup_crop(hw_wb, NULL, false);
_sde_encoder_phys_wb_setup_cwb(phys_enc, false);
_sde_encoder_phys_wb_update_cwb_flush(phys_enc, false);
phys_enc->enable_state = SDE_ENC_DISABLING;

View File

@ -43,7 +43,7 @@ signed long sde_sync_wait(void *fnc, long timeout_ms)
timeline_str, TIMELINE_VAL_LENGTH);
SDE_ERROR(
"fence driver name:%s timeline name:%s seqno:0x%x timeline:%s signaled:0x%x\n",
"fence driver name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x\n",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
fence->seqno, timeline_str,
@ -132,7 +132,7 @@ static bool sde_fence_signaled(struct dma_fence *fence)
bool status;
status = ((int)(fence->seqno - f->ctx->done_count) <= 0);
SDE_DEBUG("status:%d fence seq:%d and timeline:%d\n",
SDE_DEBUG("status:%d fence seq:%llu and timeline:%d\n",
status, fence->seqno, f->ctx->done_count);
return status;
}
@ -153,7 +153,7 @@ static void sde_fence_value_str(struct dma_fence *fence, char *str, int size)
if (!fence || !str)
return;
snprintf(str, size, "%d", fence->seqno);
snprintf(str, size, "%llu", fence->seqno);
}
static void sde_fence_timeline_value_str(struct dma_fence *fence, char *str,
@ -435,7 +435,7 @@ void sde_fence_list_dump(struct dma_fence *fence, struct seq_file **s)
fence->ops->timeline_value_str(fence,
timeline_str, TIMELINE_VAL_LENGTH);
seq_printf(*s, "fence name:%s timeline name:%s seqno:0x%x timeline:%s signaled:0x%x\n",
seq_printf(*s, "fence name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x\n",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
fence->seqno, timeline_str,

View File

@ -425,6 +425,7 @@ enum {
MIXER_BLOCKS,
MIXER_DISP,
MIXER_CWB,
MIXER_CWB_MASK,
MIXER_PROP_MAX,
};
@ -710,6 +711,7 @@ static struct sde_prop_type mixer_prop[] = {
PROP_TYPE_STRING_ARRAY},
{MIXER_CWB, "qcom,sde-mixer-cwb-pref", false,
PROP_TYPE_STRING_ARRAY},
{MIXER_CWB_MASK, "qcom,sde-mixer-cwb-mask", false, PROP_TYPE_U32_ARRAY},
};
static struct sde_prop_type mixer_blocks_prop[] = {
@ -1976,10 +1978,10 @@ void sde_hw_ctl_set_preference(struct sde_mdss_cfg *sde_cfg,
}
}
void sde_hw_mixer_set_preference(struct sde_mdss_cfg *sde_cfg, u32 num_lm,
u32 sde_hw_mixer_set_preference(struct sde_mdss_cfg *sde_cfg, u32 num_lm,
uint32_t disp_type)
{
u32 i, cnt = 0, sec_cnt = 0;
u32 i, cnt = 0, sec_cnt = 0, lm_mask = 0;
if (disp_type == SDE_CONNECTOR_PRIMARY) {
for (i = 0; i < sde_cfg->mixer_count; i++) {
@ -1998,6 +2000,7 @@ void sde_hw_mixer_set_preference(struct sde_mdss_cfg *sde_cfg, u32 num_lm,
if (cnt < num_lm) {
set_bit(SDE_DISP_PRIMARY_PREF,
&sde_cfg->mixer[i].features);
lm_mask |= BIT(sde_cfg->mixer[i].id - 1);
cnt++;
}
@ -2036,10 +2039,13 @@ void sde_hw_mixer_set_preference(struct sde_mdss_cfg *sde_cfg, u32 num_lm,
BIT(SDE_DISP_PRIMARY_PREF))) {
set_bit(SDE_DISP_SECONDARY_PREF,
&sde_cfg->mixer[i].features);
lm_mask |= BIT(sde_cfg->mixer[i].id - 1);
cnt++;
}
}
}
return lm_mask;
}
static int sde_mixer_parse_dt(struct device_node *np,
@ -2151,6 +2157,9 @@ static int sde_mixer_parse_dt(struct device_node *np,
if (BIT(mixer->id - LM_0) & sde_cfg->cwb_virtual_mixers_mask)
set_bit(SDE_MIXER_IS_VIRTUAL, &mixer->features);
mixer->cwb_mask = !props->exists[MIXER_CWB_MASK] ? 0x0 :
PROP_VALUE_ACCESS(props->values, MIXER_CWB_MASK, i);
mixer->pingpong = pp_count > 0 ? pp_idx + PINGPONG_0
: PINGPONG_MAX;
mixer->dspp = dspp_count > 0 ? dspp_idx + DSPP_0

View File

@ -938,6 +938,7 @@ struct sde_uidle_cfg {
u32 debugfs_perf;
bool debugfs_ctrl;
bool perf_cntr_en;
bool dirty;
};
/* struct sde_mdp_cfg : MDP TOP-BLK instance info
@ -977,6 +978,7 @@ struct sde_sspp_cfg {
* @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported
* @ds: ID of connected DS, DS_MAX if unsupported
* @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
* @cwb_mask: Bitmask of LMs connected to cwb mux from this LM id
*/
struct sde_lm_cfg {
SDE_HW_BLK_INFO;
@ -985,6 +987,7 @@ struct sde_lm_cfg {
u32 pingpong;
u32 ds;
unsigned long lm_pair_mask;
u32 cwb_mask;
};
/**
@ -1641,8 +1644,10 @@ struct sde_mdss_hw_cfg_handler {
* @sde_cfg: pointer to sspp cfg
* @num_lm: num lms to set preference
* @disp_type: is the given display primary/secondary
*
* Return: layer mixer mask allocated for the disp_type
*/
void sde_hw_mixer_set_preference(struct sde_mdss_cfg *sde_cfg, u32 num_lm,
u32 sde_hw_mixer_set_preference(struct sde_mdss_cfg *sde_cfg, u32 num_lm,
uint32_t disp_type);
/**

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/iopoll.h>
@ -257,10 +258,11 @@ static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx,
data_width = p->width;
if (p->compression_en) {
data_width = DIV_ROUND_UP(p->dce_bytes_per_line, 3);
if (p->wide_bus_en)
data_width >>= 1;
data_width = DIV_ROUND_UP(p->dce_bytes_per_line, 6);
else
data_width = DIV_ROUND_UP(p->dce_bytes_per_line, 3);
} else if (!dp_intf && p->wide_bus_en) {
data_width = p->width >> 1;
} else {
@ -543,20 +545,30 @@ static int sde_hw_intf_setup_te_config(struct sde_hw_intf *intf,
struct sde_hw_tear_check *te)
{
struct sde_hw_blk_reg_map *c;
int cfg;
u32 cfg = 0;
spinlock_t tearcheck_spinlock;
if (!intf)
return -EINVAL;
spin_lock_init(&tearcheck_spinlock);
c = &intf->hw;
cfg = BIT(19); /* VSYNC_COUNTER_EN */
if (te->hw_vsync_mode)
cfg |= BIT(20);
cfg |= te->vsync_count;
/*
* Local spinlock is acquired here to avoid pre-emption
* as below register programming should be completed in
* less than 2^16 vsync clk cycles.
*/
spin_lock(&tearcheck_spinlock);
SDE_REG_WRITE(c, INTF_TEAR_SYNC_WRCOUNT,
(te->start_pos + te->sync_threshold_start + 1));
SDE_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_VSYNC, cfg);
wmb(); /* disable vsync counter before updating single buffer registers */
SDE_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
SDE_REG_WRITE(c, INTF_TEAR_VSYNC_INIT_VAL, te->vsync_init_val);
SDE_REG_WRITE(c, INTF_TEAR_RD_PTR_IRQ, te->rd_ptr_irq);
@ -565,8 +577,9 @@ static int sde_hw_intf_setup_te_config(struct sde_hw_intf *intf,
SDE_REG_WRITE(c, INTF_TEAR_SYNC_THRESH,
((te->sync_threshold_continue << 16) |
te->sync_threshold_start));
SDE_REG_WRITE(c, INTF_TEAR_SYNC_WRCOUNT,
(te->start_pos + te->sync_threshold_start + 1));
cfg |= BIT(19); /* VSYNC_COUNTER_EN */
SDE_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_VSYNC, cfg);
spin_unlock(&tearcheck_spinlock);
return 0;
}

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/iopoll.h>
@ -1191,6 +1192,10 @@ static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
//Lack of block support will be caught by kick_off
memset(&hw, 0, sizeof(hw));
SET_UP_REG_DMA_REG(hw, reg_dma, kick_off.dma_type);
if (hw.hwversion == 0) {
DRM_ERROR("DMA type %d is unsupported\n", kick_off.dma_type);
return -EOPNOTSUPP;
}
SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, mode, ctl->idx, kick_off.queue_select,
kick_off.dma_type, kick_off.op);

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
*/
@ -161,7 +162,7 @@ void sde_hw_uidle_setup_ctl(struct sde_hw_uidle *uidle,
struct sde_uidle_ctl_cfg *cfg)
{
struct sde_hw_blk_reg_map *c = &uidle->hw;
u32 reg_val;
u32 reg_val, fal10_veto_regval = 0;
reg_val = SDE_REG_READ(c, UIDLE_CTL);
reg_val = (reg_val & ~BIT(31)) | (cfg->uidle_enable ? BIT(31) : 0);
@ -176,6 +177,9 @@ void sde_hw_uidle_setup_ctl(struct sde_hw_uidle *uidle,
FAL10_EXIT_CNT_MSK);
SDE_REG_WRITE(c, UIDLE_CTL, reg_val);
if (!cfg->uidle_enable)
fal10_veto_regval |= (BIT(31) | BIT(0));
SDE_REG_WRITE(c, UIDLE_FAL10_VETO_OVERRIDE, fal10_veto_regval);
}
static void sde_hw_uilde_active_override(struct sde_hw_uidle *uidle,

View File

@ -215,9 +215,8 @@ static void sde_hw_wb_crop(struct sde_hw_wb *ctx, struct sde_hw_wb_cfg *wb, bool
struct sde_hw_blk_reg_map *c = &ctx->hw;
u32 crop_xy;
crop_xy = (wb->crop.y << 16) | wb->crop.x;
if (crop) {
crop_xy = (wb->crop.y << 16) | wb->crop.x;
SDE_REG_WRITE(c, WB_CROP_CTRL, 0x1);
SDE_REG_WRITE(c, WB_CROP_OFFSET, crop_xy);
} else {

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2019, 2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@ -22,6 +23,8 @@ void sde_irq_update(struct msm_kms *msm_kms, bool enable)
SDE_ERROR("invalid kms arguments\n");
return;
}
if (sde_kms->irq_num < 0)
return;
sde_kms->irq_enabled = enable;
@ -101,7 +104,7 @@ void sde_irq_preinstall(struct msm_kms *kms)
}
/* disable irq until power event enables it */
if (!sde_kms->splash_data.num_splash_displays && !sde_kms->irq_enabled)
if (!sde_kms->irq_enabled)
irq_set_status_flags(sde_kms->irq_num, IRQ_NOAUTOEN);
}

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
@ -111,6 +112,8 @@ static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
static int _sde_kms_mmu_init(struct sde_kms *sde_kms);
static int _sde_kms_register_events(struct msm_kms *kms,
struct drm_mode_object *obj, u32 event, bool en);
static void sde_kms_handle_power_event(u32 event_type, void *usr);
bool sde_is_custom_client(void)
{
return sdecustom;
@ -2176,14 +2179,40 @@ static int sde_kms_postinit(struct msm_kms *kms)
struct sde_kms *sde_kms = to_sde_kms(kms);
struct drm_device *dev;
struct drm_crtc *crtc;
int rc;
struct drm_connector *conn;
struct drm_connector_list_iter conn_iter;
struct msm_drm_private *priv;
int i, rc;
if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev ||
!sde_kms->dev->dev_private) {
SDE_ERROR("invalid sde_kms\n");
return -EINVAL;
}
dev = sde_kms->dev;
priv = sde_kms->dev->dev_private;
/*
* Handle (re)initializations during power enable, the sde power
* event call has to be after drm_irq_install to handle irq update.
*/
sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
SDE_POWER_EVENT_POST_ENABLE |
SDE_POWER_EVENT_PRE_DISABLE,
sde_kms_handle_power_event, sde_kms, "kms");
if (sde_kms->splash_data.num_splash_displays) {
SDE_DEBUG("Skipping MDP Resources disable\n");
} else {
for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
sde_power_data_bus_set_quota(&priv->phandle, i,
SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
pm_runtime_put_sync(sde_kms->dev->dev);
}
rc = _sde_debugfs_init(sde_kms);
if (rc)
@ -2192,6 +2221,10 @@ static int sde_kms_postinit(struct msm_kms *kms)
drm_for_each_crtc(crtc, dev)
sde_crtc_post_init(dev, crtc);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(conn, &conn_iter)
sde_connector_post_init(dev, conn);
drm_connector_list_iter_end(&conn_iter);
return rc;
}
@ -4680,7 +4713,7 @@ static int sde_kms_hw_init(struct msm_kms *kms)
struct drm_device *dev;
struct msm_drm_private *priv;
struct platform_device *platformdev;
int i, irq_num, rc = -EINVAL;
int irq_num, rc = -EINVAL;
if (!kms) {
SDE_ERROR("invalid kms\n");
@ -4729,26 +4762,6 @@ static int sde_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
/*
* Handle (re)initializations during power enable
*/
sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
SDE_POWER_EVENT_POST_ENABLE |
SDE_POWER_EVENT_PRE_DISABLE,
sde_kms_handle_power_event, sde_kms, "kms");
if (sde_kms->splash_data.num_splash_displays) {
SDE_DEBUG("Skipping MDP Resources disable\n");
} else {
for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
sde_power_data_bus_set_quota(&priv->phandle, i,
SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
pm_runtime_put_sync(sde_kms->dev->dev);
}
sde_kms->affinity_notify.notify = sde_kms_irq_affinity_notify;
sde_kms->affinity_notify.release = sde_kms_irq_affinity_release;
@ -4795,6 +4808,7 @@ struct msm_kms *sde_kms_init(struct drm_device *dev)
msm_kms_init(&sde_kms->base, &kms_funcs);
sde_kms->dev = dev;
sde_kms->irq_num = -1;
return &sde_kms->base;
}

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (C) 2014-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
@ -1962,6 +1963,39 @@ static void sde_plane_cleanup_fb(struct drm_plane *plane,
}
static int _sde_plane_validate_fb(struct sde_plane *psde,
struct drm_plane_state *state)
{
struct sde_plane_state *pstate;
struct drm_framebuffer *fb;
uint32_t fb_ns = 0, fb_sec = 0, fb_sec_dir = 0;
unsigned long flags = 0;
int mode, ret = 0, n, i;
pstate = to_sde_plane_state(state);
mode = sde_plane_get_property(pstate,
PLANE_PROP_FB_TRANSLATION_MODE);
fb = state->fb;
n = fb->format->num_planes;
for (i = 0; i < n; i++) {
ret = msm_fb_obj_get_attrs(fb->obj[i], &fb_ns, &fb_sec,
&fb_sec_dir, &flags);
if (!ret && ((fb_ns && (mode != SDE_DRM_FB_NON_SEC)) ||
(fb_sec && (mode != SDE_DRM_FB_SEC)) ||
(fb_sec_dir && (mode != SDE_DRM_FB_SEC_DIR_TRANS)))) {
SDE_ERROR_PLANE(psde, "mode:%d fb:%d dma_buf flags:0x%x rc:%d\n",
mode, fb->base.id, flags, ret);
SDE_EVT32(psde->base.base.id, fb->base.id, flags,
fb_ns, fb_sec, fb_sec_dir, ret, SDE_EVTLOG_ERROR);
return -EINVAL;
}
}
return 0;
}
static void _sde_plane_sspp_atomic_check_mode_changed(struct sde_plane *psde,
struct drm_plane_state *state,
struct drm_plane_state *old_state)
@ -2631,6 +2665,11 @@ static int sde_plane_sspp_atomic_check(struct drm_plane *plane,
return ret;
ret = _sde_plane_validate_shared_crtc(psde, state);
if (ret)
return ret;
ret = _sde_plane_validate_fb(psde, state);
if (ret)
return ret;
@ -3626,7 +3665,7 @@ static void _sde_plane_setup_capabilities_blob(struct sde_plane *psde,
sde_kms_info_add_keyint(info, "pipe_idx", pipe_id);
index = (master_plane_id == 0) ? 0 : 1;
if (catalog->has_demura &&
if (catalog->has_demura && psde->pipe < SSPP_MAX &&
catalog->demura_supported[psde->pipe][index] != ~0x0)
sde_kms_info_add_keyint(info, "demura_block", index);
@ -3716,7 +3755,7 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
psde->catalog = catalog;
is_master = !psde->is_virtual;
info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
info = vzalloc(sizeof(struct sde_kms_info));
if (!info) {
SDE_ERROR("failed to allocate info memory\n");
return;
@ -3795,7 +3834,7 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
ARRAY_SIZE(e_fb_translation_mode), 0,
PLANE_PROP_FB_TRANSLATION_MODE);
kfree(info);
vfree(info);
}
static inline void _sde_plane_set_csc_v1(struct sde_plane *psde,

View File

@ -111,6 +111,8 @@ struct sde_rm_requirements {
uint64_t top_ctrl;
const struct sde_rm_topology_def *topology;
struct sde_encoder_hw_resources hw_res;
u32 cwb_requested_disp_type;
u32 conn_lm_mask;
};
/**
@ -981,7 +983,8 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
struct sde_rm_hw_blk **dspp,
struct sde_rm_hw_blk **ds,
struct sde_rm_hw_blk **pp,
struct sde_rm_hw_blk *primary_lm)
struct sde_rm_hw_blk *primary_lm,
u32 conn_lm_mask)
{
const struct sde_lm_cfg *lm_cfg = to_sde_hw_mixer(lm->hw)->cap;
const struct sde_pingpong_cfg *pp_cfg;
@ -1038,6 +1041,13 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
SDE_DEBUG("fail: cwb supported lm not allocated\n");
return false;
}
if (lm_cfg->cwb_mask && RM_RQ_CWB(reqs) &&
!(BIT(ffs(conn_lm_mask) - 1) & lm_cfg->cwb_mask)) {
SDE_DEBUG("cwb mixer not compatible id:%d cwb_mask:0x%x lm_mask:0x%x\n",
lm_cfg->id, lm_cfg->cwb_mask, conn_lm_mask);
return false;
}
} else if ((!is_conn_primary && lm_primary_pref) ||
(!is_conn_secondary && lm_secondary_pref)) {
SDE_DEBUG(
@ -1083,7 +1093,7 @@ static int _sde_rm_reserve_lms(
struct sde_rm_hw_blk *ds[MAX_BLOCKS];
struct sde_rm_hw_blk *pp[MAX_BLOCKS];
struct sde_rm_hw_iter iter_i, iter_j;
u32 lm_mask = 0;
u32 lm_mask = 0, conn_lm_mask = 0;
int lm_count = 0;
int i, rc = 0;
@ -1092,6 +1102,9 @@ static int _sde_rm_reserve_lms(
return 0;
}
if (RM_RQ_CWB(reqs))
conn_lm_mask = reqs->conn_lm_mask;
/* Find a primary mixer */
sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
while (lm_count != reqs->topology->num_lm &&
@ -1115,12 +1128,15 @@ static int _sde_rm_reserve_lms(
if (!_sde_rm_check_lm_and_get_connected_blks(
rm, rsvp, reqs, lm[lm_count],
&dspp[lm_count], &ds[lm_count],
&pp[lm_count], NULL))
&pp[lm_count], NULL, conn_lm_mask))
continue;
lm_mask |= (1 << iter_i.blk->id);
++lm_count;
if (RM_RQ_CWB(reqs))
conn_lm_mask = conn_lm_mask & ~BIT(ffs(conn_lm_mask) - 1);
/* Return if peer is not needed */
if (lm_count == reqs->topology->num_lm)
break;
@ -1140,7 +1156,7 @@ static int _sde_rm_reserve_lms(
if (!_sde_rm_check_lm_and_get_connected_blks(
rm, rsvp, reqs, iter_j.blk,
&dspp[lm_count], &ds[lm_count],
&pp[lm_count], iter_i.blk))
&pp[lm_count], iter_i.blk, conn_lm_mask))
continue;
SDE_DEBUG("blk id = %d, _lm_ids[%d] = %d\n",
@ -1153,6 +1169,10 @@ static int _sde_rm_reserve_lms(
lm_mask |= (1 << iter_j.blk->id);
++lm_count;
if (RM_RQ_CWB(reqs))
conn_lm_mask = conn_lm_mask & ~BIT(ffs(conn_lm_mask) - 1);
break;
}
@ -2005,6 +2025,26 @@ int sde_rm_cont_splash_res_init(struct msm_drm_private *priv,
return 0;
}
static struct drm_connector *_sde_rm_get_connector(
struct drm_encoder *enc)
{
struct drm_connector *conn = NULL, *conn_search;
struct sde_connector *c_conn = NULL;
struct drm_connector_list_iter conn_iter;
drm_connector_list_iter_begin(enc->dev, &conn_iter);
drm_for_each_connector_iter(conn_search, &conn_iter) {
c_conn = to_sde_connector(conn_search);
if (c_conn->encoder == enc) {
conn = conn_search;
break;
}
}
drm_connector_list_iter_end(&conn_iter);
return conn;
}
static int _sde_rm_populate_requirements(
struct sde_rm *rm,
struct drm_encoder *enc,
@ -2013,6 +2053,8 @@ static int _sde_rm_populate_requirements(
struct sde_rm_requirements *reqs)
{
const struct drm_display_mode *mode = &crtc_state->mode;
struct drm_encoder *encoder_iter;
struct drm_connector *conn;
int i, num_lm;
reqs->top_ctrl = sde_connector_get_property(conn_state,
@ -2041,7 +2083,7 @@ static int _sde_rm_populate_requirements(
reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
sde_encoder_is_primary_display(enc))
(sde_encoder_get_display_type(enc) == SDE_CONNECTOR_PRIMARY))
reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DS);
}
@ -2075,8 +2117,22 @@ static int _sde_rm_populate_requirements(
reqs->topology->top_name, reqs->topology->num_ctl);
}
SDE_DEBUG("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
reqs->hw_res.display_num_of_h_tiles);
if (RM_RQ_CWB(reqs)) {
drm_for_each_encoder_mask(encoder_iter, enc->dev,
crtc_state->encoder_mask) {
if (drm_encoder_mask(encoder_iter) == drm_encoder_mask(enc))
continue;
reqs->cwb_requested_disp_type = sde_encoder_get_display_type(encoder_iter);
conn = _sde_rm_get_connector(encoder_iter);
if (conn)
reqs->conn_lm_mask = to_sde_connector(conn)->lm_mask;
break;
}
}
SDE_DEBUG("top_ctrl: 0x%llX num_h_tiles: %d cwb_req_disp_type:%d\n", reqs->top_ctrl,
reqs->hw_res.display_num_of_h_tiles, reqs->cwb_requested_disp_type);
SDE_DEBUG("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
reqs->topology->num_lm, reqs->topology->num_ctl,
reqs->topology->top_name,
@ -2120,26 +2176,6 @@ static struct sde_rm_rsvp *_sde_rm_get_rsvp_cur(struct sde_rm *rm,
return _sde_rm_get_rsvp(rm, enc, false);
}
static struct drm_connector *_sde_rm_get_connector(
struct drm_encoder *enc)
{
struct drm_connector *conn = NULL, *conn_search;
struct sde_connector *c_conn = NULL;
struct drm_connector_list_iter conn_iter;
drm_connector_list_iter_begin(enc->dev, &conn_iter);
drm_for_each_connector_iter(conn_search, &conn_iter) {
c_conn = to_sde_connector(conn_search);
if (c_conn->encoder == enc) {
conn = conn_search;
break;
}
}
drm_connector_list_iter_end(&conn_iter);
return conn;
}
int sde_rm_update_topology(struct sde_rm *rm,
struct drm_connector_state *conn_state,
struct msm_display_topology *topology)

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2009-2021, The Linux Foundation. All rights reserved.
*/
@ -228,6 +229,7 @@ struct sde_dbg_regbuf {
* struct sde_dbg_base - global sde debug base structure
* @evtlog: event log instance
* @reglog: reg log instance
* @reg_dump_base: base address of register dump region
* @reg_base_list: list of register dumping regions
* @dev: device pointer
* @mutex: mutex to serialize access to serialze dumps, debugfs access
@ -253,6 +255,7 @@ struct sde_dbg_base {
struct sde_dbg_evtlog *evtlog;
struct sde_dbg_reglog *reglog;
struct list_head reg_base_list;
void *reg_dump_base;
void *reg_dump_addr;
struct device *dev;
struct mutex mutex;
@ -959,6 +962,9 @@ static void _sde_dbg_dump_sde_dbg_bus(struct sde_dbg_sde_debug_bus *bus)
u32 bus_size;
char name[20];
if (!bus || !bus->cmn.entries_size)
return;
reg_base = _sde_dump_get_blk_addr(bus->cmn.name);
if (!reg_base || !reg_base->base) {
pr_err("unable to find mem_base for %s\n", bus->cmn.name);
@ -989,7 +995,7 @@ static void _sde_dbg_dump_sde_dbg_bus(struct sde_dbg_sde_debug_bus *bus)
in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM);
if (in_mem && (!(*dump_mem))) {
*dump_mem = devm_kzalloc(sde_dbg_base.dev, list_size, GFP_KERNEL);
*dump_mem = vzalloc(list_size);
bus->cmn.content_size = list_size / sizeof(u32);
}
@ -1037,7 +1043,7 @@ static void _sde_dbg_dump_dsi_dbg_bus(struct sde_dbg_sde_debug_bus *bus)
mutex_lock(&sde_dbg_dsi_mutex);
in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM);
if (in_mem && (!(*dump_mem))) {
*dump_mem = devm_kzalloc(sde_dbg_base.dev, list_size, GFP_KERNEL);
*dump_mem = vzalloc(list_size);
bus->cmn.content_size = list_size / sizeof(u32);
}
@ -1083,8 +1089,10 @@ static void _sde_dump_array(struct sde_dbg_reg_base *blk_arr[],
mutex_lock(&sde_dbg_base.mutex);
reg_dump_size = _sde_dbg_get_reg_dump_size();
dbg_base->reg_dump_addr = devm_kzalloc(sde_dbg_base.dev,
reg_dump_size, GFP_KERNEL);
if (!dbg_base->reg_dump_base)
dbg_base->reg_dump_base = vzalloc(reg_dump_size);
dbg_base->reg_dump_addr = dbg_base->reg_dump_base;
if (!dbg_base->reg_dump_addr)
pr_err("Failed to allocate memory for reg_dump_addr size:%d\n",
@ -1624,7 +1632,7 @@ static ssize_t sde_recovery_regdump_read(struct file *file, char __user *ubuf,
if (!rbuf->dump_done && !rbuf->cur_blk) {
if (!rbuf->buf)
rbuf->buf = kzalloc(DUMP_BUF_SIZE, GFP_KERNEL);
rbuf->buf = vzalloc(DUMP_BUF_SIZE);
if (!rbuf->buf) {
len = -ENOMEM;
goto err;
@ -2399,6 +2407,7 @@ static void sde_dbg_reg_base_destroy(void)
list_del(&blk_base->reg_base_head);
kfree(blk_base);
}
vfree(dbg_base->reg_dump_base);
}
static void sde_dbg_dsi_ctrl_destroy(void)
@ -2413,12 +2422,22 @@ static void sde_dbg_dsi_ctrl_destroy(void)
mutex_unlock(&sde_dbg_dsi_mutex);
}
static void sde_dbg_buses_destroy(void)
{
struct sde_dbg_base *dbg_base = &sde_dbg_base;
vfree(dbg_base->dbgbus_sde.cmn.dumped_content);
vfree(dbg_base->dbgbus_vbif_rt.cmn.dumped_content);
vfree(dbg_base->dbgbus_dsi.cmn.dumped_content);
vfree(dbg_base->dbgbus_lutdma.cmn.dumped_content);
}
/**
* sde_dbg_destroy - destroy sde debug facilities
*/
void sde_dbg_destroy(void)
{
kfree(sde_dbg_base.regbuf.buf);
vfree(sde_dbg_base.regbuf.buf);
memset(&sde_dbg_base.regbuf, 0, sizeof(sde_dbg_base.regbuf));
_sde_dbg_debugfs_destroy();
sde_dbg_base_evtlog = NULL;
@ -2428,6 +2447,7 @@ void sde_dbg_destroy(void)
sde_dbg_base.reglog = NULL;
sde_dbg_reg_base_destroy();
sde_dbg_dsi_ctrl_destroy();
sde_dbg_buses_destroy();
mutex_destroy(&sde_dbg_base.mutex);
}

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
@ -219,7 +220,7 @@ struct sde_dbg_evtlog *sde_evtlog_init(void)
{
struct sde_dbg_evtlog *evtlog;
evtlog = kzalloc(sizeof(*evtlog), GFP_KERNEL);
evtlog = vzalloc(sizeof(*evtlog));
if (!evtlog)
return ERR_PTR(-ENOMEM);
@ -239,7 +240,7 @@ struct sde_dbg_reglog *sde_reglog_init(void)
{
struct sde_dbg_reglog *reglog;
reglog = kzalloc(sizeof(*reglog), GFP_KERNEL);
reglog = vzalloc(sizeof(*reglog));
if (!reglog)
return ERR_PTR(-ENOMEM);
@ -350,7 +351,7 @@ void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog)
list_del(&filter_node->list);
kfree(filter_node);
}
kfree(evtlog);
vfree(evtlog);
}
void sde_reglog_destroy(struct sde_dbg_reglog *reglog)
@ -358,5 +359,5 @@ void sde_reglog_destroy(struct sde_dbg_reglog *reglog)
if (!reglog)
return;
kfree(reglog);
vfree(reglog);
}

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
*/
@ -455,6 +456,40 @@ static int sde_power_mnoc_bus_parse(struct platform_device *pdev,
return rc;
}
static void sde_power_parse_ib_votes(struct platform_device *pdev,
struct sde_power_handle *phandle)
{
int rc = 0;
u32 tmp = 0;
if (!pdev || !phandle) {
pr_err("invalid input param pdev:%pK phandle:%pK\n", pdev,
phandle);
return;
}
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,sde-min-core-ib-kbps", &tmp);
if (rc)
pr_err("error reading min core ib vote. rc=%d, np=%x\n", rc, pdev->dev.of_node);
phandle->min_ib_vote.min_core_ib = (!rc ? tmp*1000 : 0);
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,sde-min-llcc-ib-kbps", &tmp);
if (rc)
pr_err("error reading min llcc ib vote. rc=%d\n", rc);
phandle->min_ib_vote.min_llcc_ib = (!rc ? tmp*1000 : 0);
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,sde-min-dram-ib-kbps", &tmp);
if (rc)
pr_err("error reading min dram ib vote. rc=%d\n", rc);
phandle->min_ib_vote.min_dram_ib = (!rc ? tmp*1000 : 0);
}
static int sde_power_bus_parse(struct platform_device *pdev,
struct sde_power_handle *phandle)
{
@ -599,6 +634,8 @@ int sde_power_resource_init(struct platform_device *pdev,
goto bus_err;
}
sde_power_parse_ib_votes(pdev, phandle);
INIT_LIST_HEAD(&phandle->event_list);
phandle->rsc_client = NULL;
@ -695,6 +732,7 @@ int sde_power_resource_enable(struct sde_power_handle *phandle, bool enable)
{
int rc = 0, i = 0;
struct dss_module_power *mp;
u32 bus_ib_quota = 0;
if (!phandle) {
pr_err("invalid input argument\n");
@ -718,10 +756,26 @@ int sde_power_resource_enable(struct sde_power_handle *phandle, bool enable)
for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX &&
phandle->data_bus_handle[i].data_paths_cnt > 0; i++) {
bus_ib_quota = 0;
switch (i) {
case SDE_POWER_HANDLE_DBUS_ID_MNOC:
bus_ib_quota =
phandle->min_ib_vote.min_core_ib;
break;
case SDE_POWER_HANDLE_DBUS_ID_LLCC:
bus_ib_quota =
phandle->min_ib_vote.min_llcc_ib;
break;
case SDE_POWER_HANDLE_DBUS_ID_EBI:
bus_ib_quota =
phandle->min_ib_vote.min_dram_ib;
}
rc = _sde_power_data_bus_set_quota(
&phandle->data_bus_handle[i],
SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
bus_ib_quota);
if (rc) {
pr_err("failed to set data bus vote id=%d rc=%d\n",
i, rc);

View File

@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
@ -125,6 +126,18 @@ struct sde_power_reg_bus_handle {
struct sde_power_bus_scaling_data scale_table[VOTE_INDEX_MAX];
};
/**
* struct sde_min_ib_vote: ib votes on data bus
* @min_core_ib: ib vote on mnoc
* @min_llcc_ib: ib vote on llcc
* @min_dram_ib: ib vote on dram
*/
struct sde_min_ib_vote {
u32 min_core_ib;
u32 min_llcc_ib;
u32 min_dram_ib;
};
/*
* struct sde_power_event - local event registration structure
* @client_name: name of the client registering
@ -165,6 +178,7 @@ struct sde_power_handle {
u32 last_event_handled;
struct sde_rsc_client *rsc_client;
bool rsc_client_init;
struct sde_min_ib_vote min_ib_vote;
};
/**

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[sde_rsc:%s:%d]: " fmt, __func__, __LINE__
@ -331,11 +332,7 @@ static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
line_time_ns = div_u64(line_time_ns, rsc->cmd_config.vtotal);
prefill_time_ns = line_time_ns * rsc->cmd_config.prefill_lines;
/* only take jitter into account for CMD mode */
if (state == SDE_RSC_CMD_STATE)
total = frame_time_ns - frame_jitter - prefill_time_ns;
else
total = frame_time_ns - prefill_time_ns;
total = frame_time_ns - frame_jitter - prefill_time_ns;
if (total < 0) {
pr_err("invalid total time period time:%llu jiter_time:%llu blanking time:%llu\n",
@ -352,6 +349,8 @@ static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
line_time_ns, prefill_time_ns);
pr_debug("static wakeup time:%lld cxo:%u\n", total, cxo_period_ns);
SDE_EVT32(rsc->cmd_config.fps, rsc->cmd_config.vtotal, total);
pdc_backoff_time_ns = rsc_backoff_time_ns;
rsc_backoff_time_ns = div_u64(rsc_backoff_time_ns, cxo_period_ns);
rsc->timer_config.rsc_backoff_time_ns = (u32) rsc_backoff_time_ns;
@ -852,6 +851,33 @@ bool sde_rsc_client_is_state_update_complete(
return vsync_timestamp0 != 0;
}
static int sde_rsc_hw_init(struct sde_rsc_priv *rsc)
{
int ret;
ret = regulator_enable(rsc->fs);
if (ret) {
pr_err("sde rsc: fs on failed ret:%d\n", ret);
goto sde_rsc_fail;
}
rsc->sw_fs_enabled = true;
ret = sde_rsc_resource_enable(rsc);
if (ret < 0) {
pr_err("failed to enable sde rsc power resources rc:%d\n", ret);
goto sde_rsc_fail;
}
if (sde_rsc_timer_calculate(rsc, NULL, SDE_RSC_IDLE_STATE))
goto sde_rsc_fail;
sde_rsc_resource_disable(rsc);
sde_rsc_fail:
return ret;
}
/**
* sde_rsc_client_state_update() - rsc client state update
* Video mode, cmd mode and clk state are suppoed as modes. A client need to
@ -902,6 +928,13 @@ int sde_rsc_client_state_update(struct sde_rsc_client *caller_client,
__builtin_return_address(0), rsc->current_state,
caller_client->name, state);
/* hw init is required after hibernation */
if (rsc->hw_reinit && rsc->need_hwinit &&
state != SDE_RSC_IDLE_STATE) {
sde_rsc_hw_init(rsc);
rsc->need_hwinit = false;
}
/**
* This can only happen if splash is active or qsync is enabled.
* In both cases timers need to be updated for when a transition to
@ -1681,6 +1714,9 @@ static int sde_rsc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rsc);
rsc->dev = &pdev->dev;
rsc->hw_reinit = of_property_read_bool(pdev->dev.of_node,
"qcom,sde-rsc-need-hw-reinit");
of_property_read_u32(pdev->dev.of_node, "qcom,sde-rsc-version",
&rsc->version);
@ -1748,25 +1784,12 @@ static int sde_rsc_probe(struct platform_device *pdev)
goto sde_rsc_fail;
}
ret = regulator_enable(rsc->fs);
ret = sde_rsc_hw_init(rsc);
if (ret) {
pr_err("sde rsc: fs on failed ret:%d\n", ret);
pr_err("sde rsc: hw init failed ret:%d\n", ret);
goto sde_rsc_fail;
}
rsc->sw_fs_enabled = true;
ret = sde_rsc_resource_enable(rsc);
if (ret < 0) {
pr_err("failed to enable sde rsc power resources rc:%d\n", ret);
goto sde_rsc_fail;
}
if (sde_rsc_timer_calculate(rsc, NULL, SDE_RSC_IDLE_STATE))
goto sde_rsc_fail;
sde_rsc_resource_disable(rsc);
INIT_LIST_HEAD(&rsc->client_list);
INIT_LIST_HEAD(&rsc->event_list);
mutex_init(&rsc->client_lock);
@ -1794,6 +1817,20 @@ rsc_alloc_fail:
return ret;
}
static int sde_rsc_pm_freeze_late(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct sde_rsc_priv *rsc = platform_get_drvdata(pdev);
rsc->need_hwinit = true;
return 0;
}
static const struct dev_pm_ops sde_rsc_pm_ops = {
.freeze_late = sde_rsc_pm_freeze_late,
};
static int sde_rsc_remove(struct platform_device *pdev)
{
struct sde_rsc_priv *rsc = platform_get_drvdata(pdev);
@ -1841,6 +1878,7 @@ static struct platform_driver sde_rsc_platform_driver = {
.driver = {
.name = "sde_rsc",
.of_match_table = dt_match,
.pm = &sde_rsc_pm_ops,
.suppress_bind_attrs = true,
},
};

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _SDE_RSC_PRIV_H_
@ -154,6 +155,8 @@ struct sde_rsc_bw_config {
* @phandle: module power handle for clocks
* @fs: "MDSS GDSC" handle
* @sw_fs_enabled: track "MDSS GDSC" sw vote during probe
* @need_hwinit: rsc hw init is required for the next update
* @hw_reinit: rsc hw reinit support enable
*
* @rpmh_dev: rpmh device node
* @drv_io: sde drv io data mapping
@ -201,6 +204,8 @@ struct sde_rsc_priv {
struct sde_power_handle phandle;
struct regulator *fs;
bool sw_fs_enabled;
bool need_hwinit;
bool hw_reinit;
struct device *rpmh_dev;
struct dss_io_data drv_io;

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012, 2015-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2012, 2015-2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
@ -79,7 +79,7 @@ u32 sde_apply_comp_ratio_factor(u32 quota,
return quota;
}
#define RES_1080p (1088*1920)
#define RES_1080p (1088*1920)
#define RES_UHD (3840*2160)
#define RES_WQXGA (2560*1600)
#define XIN_HALT_TIMEOUT_US 0x4000
@ -507,10 +507,10 @@ int sde_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
}
if (ret) {
pr_err("rotator: reg_bus_hdl set failed ab=%llu, ib=%llu\n",
SDEROT_ERR("rotator: reg_bus_hdl set failed ab=%llu, ib=%llu\n",
reg_bus_value->ab, reg_bus_value->ib);
if (sde_res->reg_bus_usecase_ndx == VOTE_INDEX_DISABLE)
pr_err("rotator: reg_bus_hdl was disabled\n");
SDEROT_ERR("rotator: reg_bus_hdl was disabled\n");
} else {
sde_res->reg_bus_usecase_ndx = max_usecase_ndx;
}
@ -841,7 +841,7 @@ static int sde_mdp_bus_scale_register(struct sde_rot_data_type *mdata)
mdata->reg_bus_hdl = of_icc_get(&mdata->pdev->dev, "qcom,sde-reg-bus");
if (mdata->reg_bus_hdl == NULL) {
pr_err("rotator: reg bus dt node missing\n");
SDEROT_ERR("rotator: reg bus dt node missing\n");
return 0;
} else if (IS_ERR(mdata->reg_bus_hdl)) {
SDEROT_ERR("reg bus handle parsing failed\n");

View File

@ -406,6 +406,168 @@ static const u32 sde_hw_rotator_v4_outpixfmts[] = {
SDE_PIX_FMT_XBGR_2101010_TILE,
};
static const u32 sde_hw_rotator_v5_inpixfmts[] = {
SDE_PIX_FMT_XRGB_8888,
SDE_PIX_FMT_ARGB_8888,
SDE_PIX_FMT_ABGR_8888,
SDE_PIX_FMT_RGBA_8888,
SDE_PIX_FMT_BGRA_8888,
SDE_PIX_FMT_RGBX_8888,
SDE_PIX_FMT_BGRX_8888,
SDE_PIX_FMT_XBGR_8888,
SDE_PIX_FMT_RGBA_5551,
SDE_PIX_FMT_ARGB_1555,
SDE_PIX_FMT_ABGR_1555,
SDE_PIX_FMT_BGRA_5551,
SDE_PIX_FMT_BGRX_5551,
SDE_PIX_FMT_RGBX_5551,
SDE_PIX_FMT_XBGR_1555,
SDE_PIX_FMT_XRGB_1555,
SDE_PIX_FMT_ARGB_4444,
SDE_PIX_FMT_RGBA_4444,
SDE_PIX_FMT_BGRA_4444,
SDE_PIX_FMT_ABGR_4444,
SDE_PIX_FMT_RGBX_4444,
SDE_PIX_FMT_XRGB_4444,
SDE_PIX_FMT_BGRX_4444,
SDE_PIX_FMT_XBGR_4444,
SDE_PIX_FMT_RGB_888,
SDE_PIX_FMT_BGR_888,
SDE_PIX_FMT_RGB_565,
SDE_PIX_FMT_BGR_565,
SDE_PIX_FMT_Y_CB_CR_H2V2,
SDE_PIX_FMT_Y_CR_CB_H2V2,
SDE_PIX_FMT_Y_CR_CB_GH2V2,
SDE_PIX_FMT_Y_CBCR_H2V2,
SDE_PIX_FMT_Y_CRCB_H2V2,
SDE_PIX_FMT_Y_CBCR_H1V2,
SDE_PIX_FMT_Y_CRCB_H1V2,
SDE_PIX_FMT_Y_CBCR_H2V1,
SDE_PIX_FMT_Y_CRCB_H2V1,
SDE_PIX_FMT_YCBYCR_H2V1,
SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
/* SDE_PIX_FMT_RGBA_8888_UBWC */
/* SDE_PIX_FMT_RGBX_8888_UBWC */
/* SDE_PIX_FMT_RGB_565_UBWC */
/* SDE_PIX_FMT_Y_CBCR_H2V2_UBWC */
SDE_PIX_FMT_RGBA_1010102,
SDE_PIX_FMT_RGBX_1010102,
SDE_PIX_FMT_ARGB_2101010,
SDE_PIX_FMT_XRGB_2101010,
SDE_PIX_FMT_BGRA_1010102,
SDE_PIX_FMT_BGRX_1010102,
SDE_PIX_FMT_ABGR_2101010,
SDE_PIX_FMT_XBGR_2101010,
/* SDE_PIX_FMT_RGBA_1010102_UBWC */
/* SDE_PIX_FMT_RGBX_1010102_UBWC */
SDE_PIX_FMT_Y_CBCR_H2V2_P010,
SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS,
SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
/* SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC */
/* SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC */
SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
SDE_PIX_FMT_XRGB_8888_TILE,
SDE_PIX_FMT_ARGB_8888_TILE,
SDE_PIX_FMT_ABGR_8888_TILE,
SDE_PIX_FMT_XBGR_8888_TILE,
SDE_PIX_FMT_RGBA_8888_TILE,
SDE_PIX_FMT_BGRA_8888_TILE,
SDE_PIX_FMT_RGBX_8888_TILE,
SDE_PIX_FMT_BGRX_8888_TILE,
SDE_PIX_FMT_RGBA_1010102_TILE,
SDE_PIX_FMT_RGBX_1010102_TILE,
SDE_PIX_FMT_ARGB_2101010_TILE,
SDE_PIX_FMT_XRGB_2101010_TILE,
SDE_PIX_FMT_BGRA_1010102_TILE,
SDE_PIX_FMT_BGRX_1010102_TILE,
SDE_PIX_FMT_ABGR_2101010_TILE,
SDE_PIX_FMT_XBGR_2101010_TILE,
};
static const u32 sde_hw_rotator_v5_outpixfmts[] = {
SDE_PIX_FMT_XRGB_8888,
SDE_PIX_FMT_ARGB_8888,
SDE_PIX_FMT_ABGR_8888,
SDE_PIX_FMT_RGBA_8888,
SDE_PIX_FMT_BGRA_8888,
SDE_PIX_FMT_RGBX_8888,
SDE_PIX_FMT_BGRX_8888,
SDE_PIX_FMT_XBGR_8888,
SDE_PIX_FMT_RGBA_5551,
SDE_PIX_FMT_ARGB_1555,
SDE_PIX_FMT_ABGR_1555,
SDE_PIX_FMT_BGRA_5551,
SDE_PIX_FMT_BGRX_5551,
SDE_PIX_FMT_RGBX_5551,
SDE_PIX_FMT_XBGR_1555,
SDE_PIX_FMT_XRGB_1555,
SDE_PIX_FMT_ARGB_4444,
SDE_PIX_FMT_RGBA_4444,
SDE_PIX_FMT_BGRA_4444,
SDE_PIX_FMT_ABGR_4444,
SDE_PIX_FMT_RGBX_4444,
SDE_PIX_FMT_XRGB_4444,
SDE_PIX_FMT_BGRX_4444,
SDE_PIX_FMT_XBGR_4444,
SDE_PIX_FMT_RGB_888,
SDE_PIX_FMT_BGR_888,
SDE_PIX_FMT_RGB_565,
SDE_PIX_FMT_BGR_565,
/* SDE_PIX_FMT_Y_CB_CR_H2V2 */
/* SDE_PIX_FMT_Y_CR_CB_H2V2 */
/* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
SDE_PIX_FMT_Y_CBCR_H2V2,
SDE_PIX_FMT_Y_CRCB_H2V2,
SDE_PIX_FMT_Y_CBCR_H1V2,
SDE_PIX_FMT_Y_CRCB_H1V2,
SDE_PIX_FMT_Y_CBCR_H2V1,
SDE_PIX_FMT_Y_CRCB_H2V1,
/* SDE_PIX_FMT_YCBYCR_H2V1 */
SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
/* SDE_PIX_FMT_RGBA_8888_UBWC */
/* SDE_PIX_FMT_RGBX_8888_UBWC */
/* SDE_PIX_FMT_RGB_565_UBWC */
/* SDE_PIX_FMT_Y_CBCR_H2V2_UBWC */
SDE_PIX_FMT_RGBA_1010102,
SDE_PIX_FMT_RGBX_1010102,
SDE_PIX_FMT_ARGB_2101010,
SDE_PIX_FMT_XRGB_2101010,
SDE_PIX_FMT_BGRA_1010102,
SDE_PIX_FMT_BGRX_1010102,
SDE_PIX_FMT_ABGR_2101010,
SDE_PIX_FMT_XBGR_2101010,
/* SDE_PIX_FMT_RGBA_1010102_UBWC */
/* SDE_PIX_FMT_RGBX_1010102_UBWC */
SDE_PIX_FMT_Y_CBCR_H2V2_P010,
SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS,
SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
/* SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC */
/* SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC */
SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
SDE_PIX_FMT_XRGB_8888_TILE,
SDE_PIX_FMT_ARGB_8888_TILE,
SDE_PIX_FMT_ABGR_8888_TILE,
SDE_PIX_FMT_XBGR_8888_TILE,
SDE_PIX_FMT_RGBA_8888_TILE,
SDE_PIX_FMT_BGRA_8888_TILE,
SDE_PIX_FMT_RGBX_8888_TILE,
SDE_PIX_FMT_BGRX_8888_TILE,
SDE_PIX_FMT_RGBA_1010102_TILE,
SDE_PIX_FMT_RGBX_1010102_TILE,
SDE_PIX_FMT_ARGB_2101010_TILE,
SDE_PIX_FMT_XRGB_2101010_TILE,
SDE_PIX_FMT_BGRA_1010102_TILE,
SDE_PIX_FMT_BGRX_1010102_TILE,
SDE_PIX_FMT_ABGR_2101010_TILE,
SDE_PIX_FMT_XBGR_2101010_TILE,
};
static const u32 sde_hw_rotator_v4_inpixfmts_sbuf[] = {
SDE_PIX_FMT_Y_CBCR_H2V2_P010,
SDE_PIX_FMT_Y_CBCR_H2V2,
@ -3507,13 +3669,13 @@ static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
sde_hw_rotator_v4_inpixfmts;
sde_hw_rotator_v5_inpixfmts;
rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
ARRAY_SIZE(sde_hw_rotator_v5_inpixfmts);
rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
sde_hw_rotator_v4_outpixfmts;
sde_hw_rotator_v5_outpixfmts;
rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
ARRAY_SIZE(sde_hw_rotator_v5_outpixfmts);
rot->downscale_caps =
"LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
} else {

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
@ -249,11 +249,13 @@ void sde_rotator_resync_timeline(struct sde_rot_timeline *tl)
spin_lock_irqsave(&tl->lock, flags);
val = tl->next_value - tl->curr_value;
if (val > 0) {
SDEROT_WARN("flush %s:%d\n", tl->name, val);
if (val > 0)
sde_rotator_inc_timeline_locked(tl, val);
}
spin_unlock_irqrestore(&tl->lock, flags);
if (val > 0)
SDEROT_WARN("flush %s:%d\n", tl->name, val);
}
/*

View File

@ -918,14 +918,22 @@ static const struct drm_mode_config_funcs qpic_mode_config_funcs = {
static void qpic_display_fb_mark_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
u32 size;
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
struct drm_gem_cma_object *cma_obj = NULL;
struct dma_buf_attachment *import_attach = NULL;
struct qpic_display_data *qpic_display = fb->dev->dev_private;
if (!qpic_display->is_qpic_on || !qpic_display->is_panel_on) {
pr_info("%s: qpic or panel is not enabled\n", __func__);
return;
}
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
if (!cma_obj) {
pr_err("failed to get gem obj\n");
return;
}
import_attach = cma_obj->base.import_attach;
/* currently QPIC display SW can't support partial updates */
rect->x1 = 0;
rect->x2 = fb->width;
@ -1194,7 +1202,7 @@ int qpic_display_get_resource(struct qpic_display_data *qpic_display)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qpic_base");
if (!res) {
pr_err("unable to get QPIC reg base address\n");
rc = -ENOMEM;
return -ENOMEM;
}
qpic_display->qpic_reg_size = resource_size(res);
@ -1202,7 +1210,7 @@ int qpic_display_get_resource(struct qpic_display_data *qpic_display)
qpic_display->qpic_reg_size);
if (unlikely(!qpic_display->qpic_base)) {
pr_err("unable to map MDSS QPIC base\n");
rc = -ENOMEM;
return -ENOMEM;
}
qpic_display->qpic_phys = res->start;
pr_info("MDSS QPIC HW Base phy_Address=0x%x virt=0x%x\n",
@ -1212,7 +1220,7 @@ int qpic_display_get_resource(struct qpic_display_data *qpic_display)
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
pr_err("unable to get QPIC irq\n");
rc = -ENODEV;
return -ENODEV;
}
qpic_display->qpic_clk = devm_clk_get(&pdev->dev, "core_clk");

View File

@ -132,6 +132,6 @@ struct qpic_display_data {
};
int get_ili_qvga_panel_config(struct qpic_display_data *qpic_display);
void get_ili_qvga_panel_config(struct qpic_display_data *qpic_display);
#endif

View File

@ -76,7 +76,7 @@ static struct qpic_panel_config ili_qvga_panel = {
.bpp = 16,
};
int get_ili_qvga_panel_config(struct qpic_display_data *qpic_display)
void get_ili_qvga_panel_config(struct qpic_display_data *qpic_display)
{
qpic_display->panel_config = &ili_qvga_panel;
qpic_display->panel_on = ili9341_on;