msm: cvp: Fixed a deadlock during power update

Change to use dyn_clk mutex from core mutex to avoid the deadlock.

Change-Id: I01a5aaba6f461255861f09e75c992d22217ae66a
Signed-off-by: George Shen <sqiao@codeaurora.org>
This commit is contained in:
George Shen 2020-08-17 12:05:22 -07:00
parent a63da6461e
commit 81c40bf201
5 changed files with 56 additions and 31 deletions

View File

@ -115,7 +115,6 @@ static void init_cycle_info(struct cvp_cycle_info *info)
memset(info->cycle, 0,
HFI_MAX_HW_THREADS*sizeof(struct cvp_cycle_stat));
info->conf_freq = 0;
mutex_init(&info->lock);
}
static int msm_cvp_initialize_core(struct platform_device *pdev,
@ -134,6 +133,7 @@ static int msm_cvp_initialize_core(struct platform_device *pdev,
INIT_LIST_HEAD(&core->instances);
mutex_init(&core->lock);
mutex_init(&core->clk_lock);
core->state = CVP_CORE_UNINIT;
for (i = SYS_MSG_INDEX(SYS_MSG_START);
@ -502,7 +502,7 @@ static int msm_cvp_remove(struct platform_device *pdev)
sysfs_remove_group(&pdev->dev.kobj, &msm_cvp_core_attr_group);
dev_set_drvdata(&pdev->dev, NULL);
mutex_destroy(&core->lock);
mutex_destroy(&core->dyn_clk.lock);
mutex_destroy(&core->clk_lock);
kfree(core);
return rc;
}

View File

@ -376,7 +376,7 @@ static int cvp_check_clock(struct msm_cvp_inst *inst,
__func__, fw_cycles, hw_cycles[0],
hw_cycles[1], hw_cycles[2], hw_cycles[3]);
mutex_lock(&core->dyn_clk.lock);
mutex_lock(&core->clk_lock);
for (i = 0; i < HFI_MAX_HW_THREADS; ++i) {
dprintk(CVP_PWR, "%s - %d: hw_cycles %u, tens_thresh %u\n",
__func__, i, hw_cycles[i],
@ -437,7 +437,7 @@ static int cvp_check_clock(struct msm_cvp_inst *inst,
}
}
}
mutex_unlock(&core->dyn_clk.lock);
mutex_unlock(&core->clk_lock);
return rc;
}
@ -861,6 +861,8 @@ static void aggregate_power_update(struct msm_cvp_core *core,
* Clock vote from realtime session will be hard request. If aggregated
* session clock request exceeds max limit, the function will return
* error.
*
* Ensure caller acquires clk_lock!
*/
static int adjust_bw_freqs(void)
{
@ -943,7 +945,6 @@ static int adjust_bw_freqs(void)
}
ctrl_freq = (core->curr_freq*3)>>1;
mutex_lock(&core->dyn_clk.lock);
core->dyn_clk.conf_freq = core->curr_freq;
for (i = 0; i < HFI_MAX_HW_THREADS; ++i) {
core->dyn_clk.hi_ctrl_lim[i] = core->dyn_clk.sum_fps[i] ?
@ -951,7 +952,6 @@ static int adjust_bw_freqs(void)
core->dyn_clk.lo_ctrl_lim[i] =
core->dyn_clk.hi_ctrl_lim[i];
}
mutex_unlock(&core->dyn_clk.lock);
hdev->clk_freq = core->curr_freq;
rc = icc_set_bw(bus->client, bw_sum, 0);
@ -980,9 +980,9 @@ static int msm_cvp_update_power(struct msm_cvp_inst *inst)
inst->cur_cmd_type = CVP_KMD_UPDATE_POWER;
core = inst->core;
mutex_lock(&core->lock);
mutex_lock(&core->clk_lock);
rc = adjust_bw_freqs();
mutex_unlock(&core->lock);
mutex_unlock(&core->clk_lock);
inst->cur_cmd_type = 0;
cvp_put_inst(s);

View File

@ -9,6 +9,23 @@
#include "msm_cvp_core.h"
#include "msm_cvp_dsp.h"
#define CLEAR_USE_BITMAP(idx, inst) \
do { \
clear_bit(idx, &inst->dma_cache.usage_bitmap); \
dprintk(CVP_MEM, "clear %x bit %d dma_cache bitmap 0x%llx\n", \
hash32_ptr(inst->session), smem->bitmap_index, \
inst->dma_cache.usage_bitmap); \
} while (0)
#define SET_USE_BITMAP(idx, inst) \
do { \
set_bit(idx, &inst->dma_cache.usage_bitmap); \
dprintk(CVP_MEM, "Set %x bit %d dma_cache bitmap 0x%llx\n", \
hash32_ptr(inst->session), idx, \
inst->dma_cache.usage_bitmap); \
} while (0)
void print_smem(u32 tag, const char *str, struct msm_cvp_inst *inst,
struct msm_cvp_smem *smem)
{
@ -16,9 +33,11 @@ void print_smem(u32 tag, const char *str, struct msm_cvp_inst *inst,
return;
if (smem->dma_buf) {
dprintk(tag, "%s: %x : %s size %d flags %#x iova %#x", str,
hash32_ptr(inst->session), smem->dma_buf->name,
smem->size, smem->flags, smem->device_addr);
dprintk(tag,
"%s: %x : %s size %d flags %#x iova %#x idx %d ref %d",
str, hash32_ptr(inst->session), smem->dma_buf->name,
smem->size, smem->flags, smem->device_addr,
smem->bitmap_index, smem->refcount);
}
}
@ -126,7 +145,7 @@ int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst, struct cvp_kmd_buffer *buf)
smem->dma_buf = dma_buf;
smem->bitmap_index = MAX_DMABUF_NUMS;
dprintk(CVP_DSP, "%s: dma_buf = %llx\n", __func__, dma_buf);
dprintk(CVP_MEM, "%s: dma_buf = %llx\n", __func__, dma_buf);
rc = msm_cvp_map_smem(inst, smem, "map dsp");
if (rc) {
print_client_buffer(CVP_ERR, "map failed", inst, buf);
@ -274,7 +293,7 @@ static struct msm_cvp_smem *msm_cvp_session_find_smem(struct msm_cvp_inst *inst,
mutex_lock(&inst->dma_cache.lock);
for (i = 0; i < inst->dma_cache.nr; i++)
if (inst->dma_cache.entries[i]->dma_buf == dma_buf) {
set_bit(i, &inst->dma_cache.usage_bitmap);
SET_USE_BITMAP(i, inst);
smem = inst->dma_cache.entries[i];
smem->bitmap_index = i;
atomic_inc(&smem->refcount);
@ -303,7 +322,7 @@ static int msm_cvp_session_add_smem(struct msm_cvp_inst *inst,
mutex_lock(&inst->dma_cache.lock);
if (inst->dma_cache.nr < MAX_DMABUF_NUMS) {
inst->dma_cache.entries[inst->dma_cache.nr] = smem;
set_bit(inst->dma_cache.nr, &inst->dma_cache.usage_bitmap);
SET_USE_BITMAP(inst->dma_cache.nr, inst);
smem->bitmap_index = inst->dma_cache.nr;
inst->dma_cache.nr++;
i = smem->bitmap_index;
@ -318,7 +337,7 @@ static int msm_cvp_session_add_smem(struct msm_cvp_inst *inst,
inst->dma_cache.entries[i] = smem;
smem->bitmap_index = i;
set_bit(i, &inst->dma_cache.usage_bitmap);
SET_USE_BITMAP(i, inst);
} else {
dprintk(CVP_WARN, "%s: not enough memory\n", __func__);
mutex_unlock(&inst->dma_cache.lock);
@ -495,13 +514,14 @@ static void msm_cvp_unmap_frame_buf(struct msm_cvp_inst *inst,
dma_buf_put(smem->dma_buf);
kmem_cache_free(cvp_driver->smem_cache, smem);
buf->smem = NULL;
} else if (atomic_dec_and_test(&smem->refcount)) {
clear_bit(smem->bitmap_index,
&inst->dma_cache.usage_bitmap);
dprintk(CVP_MEM, "smem %x %d iova %#x to be reused\n",
hash32_ptr(inst->session),
smem->size,
smem->device_addr);
} else {
mutex_lock(&inst->dma_cache.lock);
if (atomic_dec_and_test(&smem->refcount)) {
CLEAR_USE_BITMAP(smem->bitmap_index, inst);
print_smem(CVP_MEM, "Map dereference",
inst, smem);
}
mutex_unlock(&inst->dma_cache.lock);
}
}
@ -550,7 +570,7 @@ int msm_cvp_unmap_user_persist(struct msm_cvp_inst *inst,
struct msm_cvp_smem *smem = NULL;
if (!offset || !buf_num)
return 0;
return rc;
cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
ktid = cmd_hdr->client_data.kdata & (FENCE_BIT - 1);
@ -574,10 +594,13 @@ int msm_cvp_unmap_user_persist(struct msm_cvp_inst *inst,
cvp_driver->smem_cache,
smem);
pbuf->smem = NULL;
} else if (atomic_dec_and_test(
&smem->refcount)) {
clear_bit(smem->bitmap_index,
&inst->dma_cache.usage_bitmap);
} else {
mutex_lock(&inst->dma_cache.lock);
if (atomic_dec_and_test(&smem->refcount))
CLEAR_USE_BITMAP(
smem->bitmap_index,
inst);
mutex_unlock(&inst->dma_cache.lock);
}
kmem_cache_free(cvp_driver->buf_cache, pbuf);

View File

@ -218,7 +218,6 @@ struct cvp_cycle_info {
u32 lo_ctrl_lim[HFI_MAX_HW_THREADS];
struct cvp_cycle_stat cycle[HFI_MAX_HW_THREADS];
unsigned long conf_freq;
struct mutex lock;
};
struct cvp_session_prop {
@ -263,6 +262,7 @@ struct cvp_session_event {
struct msm_cvp_core {
struct list_head list;
struct mutex lock;
struct mutex clk_lock;
int id;
dev_t dev_num;
struct cdev cdev;

View File

@ -219,7 +219,7 @@ void *cvp_get_drv_data(struct device *dev)
driver_data = (struct msm_cvp_platform_data *)match->data;
if (!strcmp(match->compatible, "qcom,kona-cvp")) {
if (!strcmp(match->compatible, "qcom,lahaina-cvp")) {
ddr_type = of_fdt_get_ddrtype();
if (ddr_type == -ENOENT) {
dprintk(CVP_ERR,
@ -228,9 +228,11 @@ void *cvp_get_drv_data(struct device *dev)
if (driver_data->ubwc_config &&
(ddr_type == DDR_TYPE_LPDDR4 ||
ddr_type == DDR_TYPE_LPDDR4X ||
ddr_type == DDR_TYPE_LPDDR4Y))
ddr_type == DDR_TYPE_LPDDR4X))
driver_data->ubwc_config->highest_bank_bit = 15;
dprintk(CVP_CORE, "DDR Type 0x%x hbb 0x%x\n",
ddr_type, driver_data->ubwc_config ?
driver_data->ubwc_config->highest_bank_bit : -1);
}
exit:
return driver_data;