Merge tag 'LA.UM.9.14.r1-20700-LAHAINA.QSSI13.0' of https://git.codelinaro.org/clo/la/kernel/msm-5.4 into android13-5.4-lahaina

"LA.UM.9.14.r1-20700-LAHAINA.QSSI13.0"

* tag 'LA.UM.9.14.r1-20700-LAHAINA.QSSI13.0' of https://git.codelinaro.org/clo/la/kernel/msm-5.4:
  msm: adsprpc: fix UAF process init_mem
  msm: ipa3: Add multi IDU support for external router mode FR
  cpu-topology: Don't error on more than CONFIG_NR_CPUS CPUs in device tree
  soc: qcom: socinfo: correct the name of softsku_id
  cnss2: Add code to fallback to non-contiguous FW mem allocation
  defconfig: sdxlemur: Enable configs on sdxlemur
  soc: qcom: socinfo: Get SKU ID from kernel command line
  BACKPORT: f2fs: do not set compression bit if kernel doesn't support
  UPSTREAM: f2fs: fix UAF in f2fs_available_free_memory
  ANDROID: f2fs: check nr_pages for readahead
  UPSTREAM: f2fs: guarantee to write dirty data when enabling checkpoint back
  FROMGIT: f2fs: flush data when enabling checkpoint back
  BACKPORT: f2fs: introduce FI_COMPRESS_RELEASED instead of using IMMUTABLE bit
  BACKPORT: f2fs: enforce the immutable flag on open files
  BACKPORT: f2fs: change i_compr_blocks of inode to atomic value
  BACKPORT: f2fs: make file immutable even if releasing zero compression block
  BACKPORT: f2fs: compress: remove unneeded preallocation
  ANDROID: binder: fix pending prio state for early exit
  ANDROID: binder: fix race in priority restore
  ANDROID: binder: switch task argument for binder_thread
  ANDROID: binder: pass desired priority by reference
  ANDROID: binder: fold common setup of node_prio
  BACKPORT: Bluetooth: L2CAP: Fix use-after-free caused by l2cap_chan_put
  FROMLIST: binder: fix UAF of ref->proc caused by race condition

Change-Id: I18e9ed7b3659a920fb9048976d171d3666fe4ed3
This commit is contained in:
Michael Bestas 2022-11-09 19:36:48 +02:00
commit 4220ba2dae
No known key found for this signature in database
GPG Key ID: CC95044519BE6669
20 changed files with 382 additions and 146 deletions

View File

@ -1 +1 @@
LTS_5.4.197_3970bc62738d
LTS_5.4.197_26eb689452c8

View File

@ -468,3 +468,39 @@ CONFIG_NET_ACT_CT=y
CONFIG_NET_TC_SKB_EXT=y
CONFIG_NET_SCH_FIFO=y
CONFIG_NET_SCHED_ACT_MPLS_QGKI=y
CONFIG_MD=y
# CONFIG_BLK_DEV_MD is not set
# CONFIG_BCACHE is not set
CONFIG_BLK_DEV_DM_BUILTIN=y
CONFIG_BLK_DEV_DM=y
# CONFIG_DM_DEBUG is not set
CONFIG_DM_BUFIO=y
# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set
# CONFIG_DM_UNSTRIPED is not set
# CONFIG_DM_CRYPT is not set
# CONFIG_DM_SNAPSHOT is not set
# CONFIG_DM_THIN_PROVISIONING is not set
# CONFIG_DM_CACHE is not set
# CONFIG_DM_WRITECACHE is not set
# CONFIG_DM_ERA is not set
# CONFIG_DM_CLONE is not set
# CONFIG_DM_MIRROR is not set
# CONFIG_DM_RAID is not set
# CONFIG_DM_ZERO is not set
# CONFIG_DM_MULTIPATH is not set
# CONFIG_DM_DELAY is not set
# CONFIG_DM_DUST is not set
# CONFIG_DM_INIT is not set
# CONFIG_DM_UEVENT is not set
# CONFIG_DM_FLAKEY is not set
CONFIG_DM_VERITY=y
# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set
# CONFIG_DM_VERITY_AVB is not set
# CONFIG_DM_VERITY_FEC is not set
# CONFIG_DM_SWITCH is not set
# CONFIG_DM_LOG_WRITES is not set
# CONFIG_DM_INTEGRITY is not set
# CONFIG_DM_BOW is not set
# CONFIG_DEVMEM is not set
CONFIG_DAX=y
CONFIG_LSM_MMAP_MIN_ADDR=32768

View File

@ -666,20 +666,26 @@ static int to_kernel_prio(int policy, int user_priority)
return MAX_USER_RT_PRIO - 1 - user_priority;
}
static void binder_do_set_priority(struct task_struct *task,
struct binder_priority desired,
static void binder_do_set_priority(struct binder_thread *thread,
const struct binder_priority *desired,
bool verify)
{
struct task_struct *task = thread->task;
int priority; /* user-space prio value */
bool has_cap_nice;
unsigned int policy = desired.sched_policy;
unsigned int policy = desired->sched_policy;
if (task->policy == policy && task->normal_prio == desired.prio)
if (task->policy == policy && task->normal_prio == desired->prio) {
spin_lock(&thread->prio_lock);
if (thread->prio_state == BINDER_PRIO_PENDING)
thread->prio_state = BINDER_PRIO_SET;
spin_unlock(&thread->prio_lock);
return;
}
has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
priority = to_userspace_prio(policy, desired.prio);
priority = to_userspace_prio(policy, desired->prio);
if (verify && is_rt_policy(policy) && !has_cap_nice) {
long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
@ -704,16 +710,30 @@ static void binder_do_set_priority(struct task_struct *task,
}
}
if (policy != desired.sched_policy ||
to_kernel_prio(policy, priority) != desired.prio)
if (policy != desired->sched_policy ||
to_kernel_prio(policy, priority) != desired->prio)
binder_debug(BINDER_DEBUG_PRIORITY_CAP,
"%d: priority %d not allowed, using %d instead\n",
task->pid, desired.prio,
task->pid, desired->prio,
to_kernel_prio(policy, priority));
trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
to_kernel_prio(policy, priority),
desired.prio);
desired->prio);
spin_lock(&thread->prio_lock);
if (!verify && thread->prio_state == BINDER_PRIO_ABORT) {
/*
* A new priority has been set by an incoming nested
* transaction. Abort this priority restore and allow
* the transaction to run at the new desired priority.
*/
spin_unlock(&thread->prio_lock);
binder_debug(BINDER_DEBUG_PRIORITY_CAP,
"%d: %s: aborting priority restore\n",
thread->pid, __func__);
return;
}
/* Set the actual priority */
if (task->policy != policy || is_rt_policy(policy)) {
@ -727,37 +747,42 @@ static void binder_do_set_priority(struct task_struct *task,
}
if (is_fair_policy(policy))
set_user_nice(task, priority);
thread->prio_state = BINDER_PRIO_SET;
spin_unlock(&thread->prio_lock);
}
static void binder_set_priority(struct task_struct *task,
struct binder_priority desired)
static void binder_set_priority(struct binder_thread *thread,
const struct binder_priority *desired)
{
binder_do_set_priority(task, desired, /* verify = */ true);
binder_do_set_priority(thread, desired, /* verify = */ true);
}
static void binder_restore_priority(struct task_struct *task,
struct binder_priority desired)
static void binder_restore_priority(struct binder_thread *thread,
const struct binder_priority *desired)
{
binder_do_set_priority(task, desired, /* verify = */ false);
binder_do_set_priority(thread, desired, /* verify = */ false);
}
static void binder_transaction_priority(struct task_struct *task,
static void binder_transaction_priority(struct binder_thread *thread,
struct binder_transaction *t,
struct binder_priority node_prio,
bool inherit_rt)
struct binder_node *node)
{
struct binder_priority desired_prio = t->priority;
struct task_struct *task = thread->task;
struct binder_priority desired = t->priority;
const struct binder_priority node_prio = {
.sched_policy = node->sched_policy,
.prio = node->min_priority,
};
if (t->set_priority_called)
return;
t->set_priority_called = true;
t->saved_priority.sched_policy = task->policy;
t->saved_priority.prio = task->normal_prio;
if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
desired_prio.prio = NICE_TO_PRIO(0);
desired_prio.sched_policy = SCHED_NORMAL;
if (!node->inherit_rt && is_rt_policy(desired.sched_policy)) {
desired.prio = NICE_TO_PRIO(0);
desired.sched_policy = SCHED_NORMAL;
}
if (node_prio.prio < t->priority.prio ||
@ -770,10 +795,29 @@ static void binder_transaction_priority(struct task_struct *task,
* SCHED_FIFO, prefer SCHED_FIFO, since it can
* run unbounded, unlike SCHED_RR.
*/
desired_prio = node_prio;
desired = node_prio;
}
binder_set_priority(task, desired_prio);
spin_lock(&thread->prio_lock);
if (thread->prio_state == BINDER_PRIO_PENDING) {
/*
* Task is in the process of changing priorities
* saving its current values would be incorrect.
* Instead, save the pending priority and signal
* the task to abort the priority restore.
*/
t->saved_priority = thread->prio_next;
thread->prio_state = BINDER_PRIO_ABORT;
binder_debug(BINDER_DEBUG_PRIORITY_CAP,
"%d: saved pending priority %d\n",
current->pid, thread->prio_next.prio);
} else {
t->saved_priority.sched_policy = task->policy;
t->saved_priority.prio = task->normal_prio;
}
spin_unlock(&thread->prio_lock);
binder_set_priority(thread, &desired);
trace_android_vh_binder_set_priority(t, task);
}
@ -1486,6 +1530,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
}
ret = binder_inc_ref_olocked(ref, strong, target_list);
*rdata = ref->data;
if (ret && ref == new_ref) {
/*
* Cleanup the failed reference here as the target
* could now be dead and have already released its
* references by now. Calling on the new reference
* with strong=0 and a tmp_refs will not decrement
* the node. The new_ref gets kfree'd below.
*/
binder_cleanup_ref_olocked(new_ref);
ref = NULL;
}
binder_proc_unlock(proc);
if (new_ref && ref != new_ref)
/*
@ -2492,14 +2548,11 @@ static int binder_proc_transaction(struct binder_transaction *t,
struct binder_thread *thread)
{
struct binder_node *node = t->buffer->target_node;
struct binder_priority node_prio;
bool oneway = !!(t->flags & TF_ONE_WAY);
bool pending_async = false;
BUG_ON(!node);
binder_node_lock(node);
node_prio.prio = node->min_priority;
node_prio.sched_policy = node->sched_policy;
if (oneway) {
BUG_ON(thread);
@ -2527,8 +2580,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
thread = binder_select_thread_ilocked(proc);
if (thread) {
binder_transaction_priority(thread->task, t, node_prio,
node->inherit_rt);
binder_transaction_priority(thread, t, node);
binder_enqueue_thread_work_ilocked(thread, &t->work);
} else if (!pending_async) {
binder_enqueue_work_ilocked(&t->work, &proc->todo);
@ -2615,6 +2667,7 @@ static void binder_transaction(struct binder_proc *proc,
int t_debug_id = atomic_inc_return(&binder_last_id);
char *secctx = NULL;
u32 secctx_sz = 0;
bool is_nested = false;
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
@ -2791,6 +2844,7 @@ static void binder_transaction(struct binder_proc *proc,
atomic_inc(&from->tmp_ref);
target_thread = from;
spin_unlock(&tmp->lock);
is_nested = true;
break;
}
spin_unlock(&tmp->lock);
@ -2855,6 +2909,7 @@ static void binder_transaction(struct binder_proc *proc,
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->is_nested = is_nested;
if (!(t->flags & TF_ONE_WAY) &&
binder_supported_policy(current->policy)) {
/* Inherit supported policies for synchronous transactions */
@ -3192,9 +3247,15 @@ static void binder_transaction(struct binder_proc *proc,
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
target_proc->outstanding_txns++;
binder_inner_proc_unlock(target_proc);
if (in_reply_to->is_nested) {
spin_lock(&thread->prio_lock);
thread->prio_state = BINDER_PRIO_PENDING;
thread->prio_next = in_reply_to->saved_priority;
spin_unlock(&thread->prio_lock);
}
wake_up_interruptible_sync(&target_thread->wait);
trace_android_vh_binder_restore_priority(in_reply_to, current);
binder_restore_priority(current, in_reply_to->saved_priority);
binder_restore_priority(thread, &in_reply_to->saved_priority);
binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
@ -3308,7 +3369,7 @@ err_invalid_target_handle:
BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
trace_android_vh_binder_restore_priority(in_reply_to, current);
binder_restore_priority(current, in_reply_to->saved_priority);
binder_restore_priority(thread, &in_reply_to->saved_priority);
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
binder_enqueue_thread_work(thread, &thread->return_error.work);
binder_send_failed_reply(in_reply_to, return_error);
@ -3979,7 +4040,7 @@ retry:
binder_stop_on_user_error < 2);
}
trace_android_vh_binder_restore_priority(NULL, current);
binder_restore_priority(current, proc->default_priority);
binder_restore_priority(thread, &proc->default_priority);
}
if (non_block) {
@ -4201,14 +4262,10 @@ retry:
BUG_ON(t->buffer == NULL);
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
struct binder_priority node_prio;
trd->target.ptr = target_node->ptr;
trd->cookie = target_node->cookie;
node_prio.sched_policy = target_node->sched_policy;
node_prio.prio = target_node->min_priority;
binder_transaction_priority(current, t, node_prio,
target_node->inherit_rt);
binder_transaction_priority(thread, t, target_node);
cmd = BR_TRANSACTION;
} else {
trd->target.ptr = 0;
@ -4443,6 +4500,8 @@ static struct binder_thread *binder_get_thread_ilocked(
thread->return_error.cmd = BR_OK;
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
thread->reply_error.cmd = BR_OK;
spin_lock_init(&thread->prio_lock);
thread->prio_state = BINDER_PRIO_SET;
INIT_LIST_HEAD(&new_thread->waiting_thread_node);
return thread;
}

View File

@ -366,6 +366,12 @@ struct binder_priority {
int prio;
};
enum binder_prio_state {
BINDER_PRIO_SET, /* desired priority set */
BINDER_PRIO_PENDING, /* initiated a saved priority restore */
BINDER_PRIO_ABORT, /* abort the pending priority restore */
};
/**
* struct binder_proc - binder process bookkeeping
* @proc_node: element for binder_procs list
@ -526,6 +532,12 @@ static inline const struct cred *binder_get_cred(struct binder_proc *proc)
* when outstanding transactions are cleaned up
* (protected by @proc->inner_lock)
* @task: struct task_struct for this thread
* @prio_lock: protects thread priority fields
* @prio_next: saved priority to be restored next
* (protected by @prio_lock)
* @prio_state: state of the priority restore process as
* defined by enum binder_prio_state
* (protected by @prio_lock)
*
* Bookkeeping structure for binder threads.
*/
@ -546,6 +558,9 @@ struct binder_thread {
atomic_t tmp_ref;
bool is_dead;
struct task_struct *task;
spinlock_t prio_lock;
struct binder_priority prio_next;
enum binder_prio_state prio_state;
};
/**
@ -582,6 +597,7 @@ struct binder_transaction {
struct binder_priority priority;
struct binder_priority saved_priority;
bool set_priority_called;
bool is_nested;
kuid_t sender_euid;
struct list_head fd_fixups;
binder_uintptr_t security_ctx;

View File

@ -278,6 +278,16 @@ core_initcall(free_raw_capacity);
#endif
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
/*
* This function returns the logic cpu number of the node.
* There are basically three kinds of return values:
* (1) logic cpu number which is > 0.
* (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
* there is no possible logical CPU in the kernel to match. This happens
* when CONFIG_NR_CPUS is configure to be smaller than the number of
* CPU nodes in DT. We need to just ignore this case.
* (3) -1 if the node does not exist in the device tree
*/
static int __init get_cpu_for_node(struct device_node *node)
{
struct device_node *cpu_node;
@ -291,7 +301,8 @@ static int __init get_cpu_for_node(struct device_node *node)
if (cpu >= 0)
topology_parse_cpu_capacity(cpu_node, cpu);
else
pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
cpu_node, cpumask_pr_args(cpu_possible_mask));
of_node_put(cpu_node);
return cpu;
@ -316,9 +327,8 @@ static int __init parse_core(struct device_node *core, int package_id,
cpu_topology[cpu].package_id = package_id;
cpu_topology[cpu].core_id = core_id;
cpu_topology[cpu].thread_id = i;
} else {
pr_err("%pOF: Can't get CPU for thread\n",
t);
} else if (cpu != -ENODEV) {
pr_err("%pOF: Can't get CPU for thread\n", t);
of_node_put(t);
return -EINVAL;
}
@ -337,7 +347,7 @@ static int __init parse_core(struct device_node *core, int package_id,
cpu_topology[cpu].package_id = package_id;
cpu_topology[cpu].core_id = core_id;
} else if (leaf) {
} else if (leaf && cpu != -ENODEV) {
pr_err("%pOF: Can't get CPU for leaf core\n", core);
return -EINVAL;
}

View File

@ -656,7 +656,7 @@ struct fastrpc_file {
/* Flag to indicate ram dump collection status*/
bool is_ramdump_pend;
/* Flag to indicate dynamic process creation status*/
bool in_process_create;
enum fastrpc_process_create_state dsp_process_state;
};
static struct fastrpc_apps gfa;
@ -3755,13 +3755,13 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl,
} inbuf;
spin_lock(&fl->hlock);
if (fl->in_process_create) {
if (fl->dsp_process_state) {
err = -EALREADY;
ADSPRPC_ERR("Already in create dynamic process\n");
spin_unlock(&fl->hlock);
return err;
}
fl->in_process_create = true;
fl->dsp_process_state = PROCESS_CREATE_IS_INPROGRESS;
spin_unlock(&fl->hlock);
inbuf.pgid = fl->tgid;
inbuf.namelen = strlen(current->comm) + 1;
@ -3916,9 +3916,11 @@ bail:
fastrpc_mmap_free(file, 0);
mutex_unlock(&fl->map_mutex);
}
if (err) {
spin_lock(&fl->hlock);
locked = 1;
if (err) {
fl->dsp_process_state = PROCESS_CREATE_DEFAULT;
if (!IS_ERR_OR_NULL(fl->init_mem)) {
init_mem = fl->init_mem;
fl->init_mem = NULL;
@ -3926,14 +3928,13 @@ bail:
locked = 0;
fastrpc_buf_free(init_mem, 0);
}
} else {
fl->dsp_process_state = PROCESS_CREATE_SUCCESS;
}
if (locked) {
spin_unlock(&fl->hlock);
locked = 0;
}
}
spin_lock(&fl->hlock);
fl->in_process_create = false;
spin_unlock(&fl->hlock);
return err;
}
@ -5355,7 +5356,7 @@ skip_dump_wait:
spin_lock(&fl->apps->hlock);
hlist_del_init(&fl->hn);
fl->is_ramdump_pend = false;
fl->in_process_create = false;
fl->dsp_process_state = PROCESS_CREATE_DEFAULT;
spin_unlock(&fl->apps->hlock);
kfree(fl->debug_buf);
kfree(fl->gidlist.gids);
@ -5773,7 +5774,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
fl->qos_request = 0;
fl->dsp_proc_init = 0;
fl->is_ramdump_pend = false;
fl->in_process_create = false;
fl->dsp_process_state = PROCESS_CREATE_DEFAULT;
init_completion(&fl->work);
fl->file_close = FASTRPC_PROCESS_DEFAULT_STATE;
filp->private_data = fl;

View File

@ -513,6 +513,11 @@ enum fastrpc_response_flags {
COMPLETE_SIGNAL = 3
};
enum fastrpc_process_create_state {
PROCESS_CREATE_DEFAULT = 0, /* Process is not created */
PROCESS_CREATE_IS_INPROGRESS = 1, /* Process creation is in progress */
PROCESS_CREATE_SUCCESS = 2, /* Process creation is successful */
};
struct smq_invoke_rspv2 {
uint64_t ctx; /* invoke caller context */
int retval; /* invoke return value */

View File

@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/cma.h>
#include <linux/firmware.h>
@ -3984,15 +3986,27 @@ int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
if (!fw_mem[i].va && fw_mem[i].size) {
retry:
fw_mem[i].va =
dma_alloc_attrs(dev, fw_mem[i].size,
&fw_mem[i].pa, GFP_KERNEL,
fw_mem[i].attrs);
if (!fw_mem[i].va) {
if ((fw_mem[i].attrs &
DMA_ATTR_FORCE_CONTIGUOUS)) {
fw_mem[i].attrs &=
~DMA_ATTR_FORCE_CONTIGUOUS;
cnss_pr_dbg("Fallback to non-contiguous memory for FW, Mem type: %u\n",
fw_mem[i].type);
goto retry;
}
cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n",
fw_mem[i].size, fw_mem[i].type);
BUG();
CNSS_ASSERT(0);
return -ENOMEM;
}
}
}
@ -5083,17 +5097,21 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
mhi_dump_sfr(pci_priv->mhi_ctrl);
cnss_pr_dbg("Collect remote heap dump segment\n");
for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
cnss_pci_add_dump_seg(pci_priv, dump_seg,
CNSS_FW_REMOTE_HEAP, j,
fw_mem[i].va, fw_mem[i].pa,
fw_mem[i].size);
dump_seg++;
dump_data->nentries++;
j++;
if (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
cnss_pr_dbg("Collect remote heap dump segment\n");
cnss_pci_add_dump_seg(pci_priv, dump_seg,
CNSS_FW_REMOTE_HEAP, j,
fw_mem[i].va,
fw_mem[i].pa,
fw_mem[i].size);
dump_seg++;
dump_data->nentries++;
j++;
} else {
cnss_pr_dbg("Skip remote heap dumps as it is non-contiguous\n");
}
}
}
@ -5138,7 +5156,8 @@ void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
}
for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
if (fw_mem[i].type == CNSS_MEM_TYPE_DDR &&
(fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
cnss_pci_remove_dump_seg(pci_priv, dump_seg,
CNSS_FW_REMOTE_HEAP, j,
fw_mem[i].va, fw_mem[i].pa,

View File

@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */
/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/firmware.h>
#include <linux/module.h>
@ -2206,7 +2208,8 @@ static void cnss_wlfw_request_mem_ind_cb(struct qmi_handle *qmi_wlfw,
ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type);
plat_priv->fw_mem[i].type = ind_msg->mem_seg[i].type;
plat_priv->fw_mem[i].size = ind_msg->mem_seg[i].size;
if (plat_priv->fw_mem[i].type == CNSS_MEM_TYPE_DDR)
if (!plat_priv->fw_mem[i].va &&
plat_priv->fw_mem[i].type == CNSS_MEM_TYPE_DDR)
plat_priv->fw_mem[i].attrs |=
DMA_ATTR_FORCE_CONTIGUOUS;
}

View File

@ -187,6 +187,9 @@ static struct socinfo {
#define SMEM_IMAGE_VERSION_OEM_OFFSET 95
#define SMEM_IMAGE_VERSION_PARTITION_APPS 10
int softsku_idx;
module_param_named(softsku_idx, softsku_idx, int, 0644);
/* Version 2 */
static uint32_t socinfo_get_raw_id(void)
{

View File

@ -809,6 +809,10 @@ static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
ret++;
}
}
f2fs_bug_on(F2FS_I_SB(inode),
!compr && ret != cc->cluster_size &&
!is_inode_flag_set(cc->inode, FI_COMPRESS_RELEASED));
}
fail:
f2fs_put_dnode(&dn);
@ -879,21 +883,16 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
struct address_space *mapping = cc->inode->i_mapping;
struct page *page;
struct dnode_of_data dn;
sector_t last_block_in_bio;
unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
pgoff_t start_idx = start_idx_of_cluster(cc);
int i, ret;
bool prealloc;
retry:
ret = f2fs_cluster_blocks(cc, false);
if (ret <= 0)
return ret;
/* compressed case */
prealloc = (ret < cc->cluster_size);
ret = f2fs_init_compress_ctx(cc);
if (ret)
return ret;
@ -949,25 +948,6 @@ retry:
}
}
if (prealloc) {
__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
for (i = cc->cluster_size - 1; i > 0; i--) {
ret = f2fs_get_block(&dn, start_idx + i);
if (ret) {
i = cc->cluster_size;
break;
}
if (dn.data_blkaddr != NEW_ADDR)
break;
}
__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
}
if (likely(!ret)) {
*fsdata = cc->rpages;
*pagep = cc->rpages[offset_in_cluster(cc, index)];

View File

@ -2363,6 +2363,10 @@ int f2fs_mpage_readpages(struct address_space *mapping,
unsigned max_nr_pages = nr_pages;
int ret = 0;
/* this is real from f2fs_merkle_tree_readahead() in old kernel only. */
if (!nr_pages)
return 0;
map.m_pblk = 0;
map.m_lblk = 0;
map.m_len = 0;

View File

@ -762,6 +762,7 @@ enum {
FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
FI_COMPRESSED_FILE, /* indicate file's data can be compressed */
FI_MMAP_FILE, /* indicate file was mmapped */
FI_COMPRESS_RELEASED, /* compressed blocks were released */
FI_MAX, /* max flag, never be used */
};
@ -814,7 +815,7 @@ struct f2fs_inode_info {
struct timespec64 i_disk_time[4];/* inode disk times */
/* for file compress */
u64 i_compr_blocks; /* # of compressed blocks */
atomic_t i_compr_blocks; /* # of compressed blocks */
unsigned char i_compress_algorithm; /* algorithm type */
unsigned char i_log_cluster_size; /* log of cluster size */
unsigned int i_cluster_size; /* cluster size */
@ -2663,6 +2664,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
case FI_DATA_EXIST:
case FI_INLINE_DOTS:
case FI_PIN_FILE:
case FI_COMPRESS_RELEASED:
f2fs_mark_inode_dirty_sync(inode, true);
}
}
@ -2784,6 +2786,8 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
set_bit(FI_EXTRA_ATTR, fi->flags);
if (ri->i_inline & F2FS_PIN_FILE)
set_bit(FI_PIN_FILE, fi->flags);
if (ri->i_inline & F2FS_COMPRESS_RELEASED)
set_bit(FI_COMPRESS_RELEASED, fi->flags);
}
static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
@ -2804,6 +2808,8 @@ static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
ri->i_inline |= F2FS_EXTRA_ATTR;
if (is_inode_flag_set(inode, FI_PIN_FILE))
ri->i_inline |= F2FS_PIN_FILE;
if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
ri->i_inline |= F2FS_COMPRESS_RELEASED;
}
static inline int f2fs_has_extra_attr(struct inode *inode)
@ -3926,8 +3932,9 @@ static inline int f2fs_init_compress_mempool(void) { return 0; }
static inline void f2fs_destroy_compress_mempool(void) { }
#endif
static inline void set_compress_context(struct inode *inode)
static inline int set_compress_context(struct inode *inode)
{
#ifdef CONFIG_F2FS_FS_COMPRESSION
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
F2FS_I(inode)->i_compress_algorithm =
@ -3940,19 +3947,25 @@ static inline void set_compress_context(struct inode *inode)
set_inode_flag(inode, FI_COMPRESSED_FILE);
stat_inc_compr_inode(inode);
f2fs_mark_inode_dirty_sync(inode, true);
return 0;
#else
return -EOPNOTSUPP;
#endif
}
static inline u64 f2fs_disable_compressed_file(struct inode *inode)
static inline u32 f2fs_disable_compressed_file(struct inode *inode)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
u32 i_compr_blocks;
if (!f2fs_compressed_file(inode))
return 0;
if (S_ISREG(inode->i_mode)) {
if (get_dirty_pages(inode))
return 1;
if (fi->i_compr_blocks)
return fi->i_compr_blocks;
i_compr_blocks = atomic_read(&fi->i_compr_blocks);
if (i_compr_blocks)
return i_compr_blocks;
}
fi->i_flags &= ~F2FS_COMPR_FL;
@ -4070,16 +4083,17 @@ static inline void f2fs_i_compr_blocks_update(struct inode *inode,
u64 blocks, bool add)
{
int diff = F2FS_I(inode)->i_cluster_size - blocks;
struct f2fs_inode_info *fi = F2FS_I(inode);
/* don't update i_compr_blocks if saved blocks were released */
if (!add && !F2FS_I(inode)->i_compr_blocks)
if (!add && !atomic_read(&fi->i_compr_blocks))
return;
if (add) {
F2FS_I(inode)->i_compr_blocks += diff;
atomic_add(diff, &fi->i_compr_blocks);
stat_add_compr_blocks(inode, diff);
} else {
F2FS_I(inode)->i_compr_blocks -= diff;
atomic_sub(diff, &fi->i_compr_blocks);
stat_sub_compr_blocks(inode, diff);
}
f2fs_mark_inode_dirty_sync(inode, true);

View File

@ -58,6 +58,12 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
bool need_alloc = true;
int err = 0;
if (unlikely(IS_IMMUTABLE(inode)))
return VM_FAULT_SIGBUS;
if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
return VM_FAULT_SIGBUS;
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
goto err;
@ -80,10 +86,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
err = ret;
goto err;
} else if (ret) {
if (ret < F2FS_I(inode)->i_cluster_size) {
err = -EAGAIN;
goto err;
}
need_alloc = false;
}
}
@ -258,8 +260,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
};
unsigned int seq_id = 0;
if (unlikely(f2fs_readonly(inode->i_sb) ||
is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
if (unlikely(f2fs_readonly(inode->i_sb)))
return 0;
trace_f2fs_sync_file_enter(inode);
@ -273,7 +274,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
ret = file_write_and_wait_range(file, start, end);
clear_inode_flag(inode, FI_NEED_IPU);
if (ret) {
if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
return ret;
}
@ -561,7 +562,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
bool compressed_cluster = false;
int cluster_index = 0, valid_blocks = 0;
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
bool released = !F2FS_I(dn->inode)->i_compr_blocks;
bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
base = get_extra_isize(dn->inode);
@ -878,6 +879,14 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
return -EIO;
if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
if (unlikely(IS_APPEND(inode) &&
(attr->ia_valid & (ATTR_MODE | ATTR_UID |
ATTR_GID | ATTR_TIMES_SET))))
return -EPERM;
if ((attr->ia_valid & ATTR_SIZE) &&
!f2fs_is_compress_backend_ready(inode))
return -EOPNOTSUPP;
@ -1838,8 +1847,8 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
if (iflags & F2FS_COMPR_FL) {
if (!f2fs_may_compress(inode))
return -EINVAL;
set_compress_context(inode);
if (set_compress_context(inode))
return -EOPNOTSUPP;
}
}
if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
@ -3433,7 +3442,7 @@ static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
if (!f2fs_compressed_file(inode))
return -EINVAL;
blocks = F2FS_I(inode)->i_compr_blocks;
blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
return put_user(blocks, (u64 __user *)arg);
}
@ -3523,7 +3532,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
goto out;
}
if (IS_IMMUTABLE(inode)) {
if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ret = -EINVAL;
goto out;
}
@ -3532,14 +3541,13 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
if (ret)
goto out;
if (!F2FS_I(inode)->i_compr_blocks)
goto out;
F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
f2fs_set_inode_flags(inode);
set_inode_flag(inode, FI_COMPRESS_RELEASED);
inode->i_ctime = current_time(inode);
f2fs_mark_inode_dirty_sync(inode, true);
if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
goto out;
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
down_write(&F2FS_I(inode)->i_mmap_sem);
@ -3585,14 +3593,15 @@ out:
if (ret >= 0) {
ret = put_user(released_blocks, (u64 __user *)arg);
} else if (released_blocks && F2FS_I(inode)->i_compr_blocks) {
} else if (released_blocks &&
atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
"iblocks=%llu, released=%u, compr_blocks=%llu, "
"iblocks=%llu, released=%u, compr_blocks=%u, "
"run fsck to fix.",
__func__, inode->i_ino, inode->i_blocks,
released_blocks,
F2FS_I(inode)->i_compr_blocks);
atomic_read(&F2FS_I(inode)->i_compr_blocks));
}
return ret;
@ -3680,14 +3689,14 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
if (ret)
return ret;
if (F2FS_I(inode)->i_compr_blocks)
if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
goto out;
f2fs_balance_fs(F2FS_I_SB(inode), true);
inode_lock(inode);
if (!IS_IMMUTABLE(inode)) {
if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ret = -EINVAL;
goto unlock_inode;
}
@ -3732,8 +3741,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
up_write(&F2FS_I(inode)->i_mmap_sem);
if (ret >= 0) {
F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
f2fs_set_inode_flags(inode);
clear_inode_flag(inode, FI_COMPRESS_RELEASED);
inode->i_ctime = current_time(inode);
f2fs_mark_inode_dirty_sync(inode, true);
}
@ -3744,14 +3752,15 @@ out:
if (ret >= 0) {
ret = put_user(reserved_blocks, (u64 __user *)arg);
} else if (reserved_blocks && F2FS_I(inode)->i_compr_blocks) {
} else if (reserved_blocks &&
atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
"iblocks=%llu, reserved=%u, compr_blocks=%llu, "
"iblocks=%llu, reserved=%u, compr_blocks=%u, "
"run fsck to fix.",
__func__, inode->i_ino, inode->i_blocks,
reserved_blocks,
F2FS_I(inode)->i_compr_blocks);
atomic_read(&F2FS_I(inode)->i_compr_blocks));
}
return ret;
@ -3890,6 +3899,16 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
inode_lock(inode);
}
if (unlikely(IS_IMMUTABLE(inode))) {
ret = -EPERM;
goto unlock;
}
if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ret = -EPERM;
goto unlock;
}
ret = generic_write_checks(iocb, from);
if (ret > 0) {
bool preallocated = false;
@ -3954,6 +3973,7 @@ write:
if (ret > 0)
f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
}
unlock:
inode_unlock(inode);
out:
trace_f2fs_file_write_iter(inode, iocb->ki_pos,

View File

@ -443,7 +443,8 @@ static int do_read_inode(struct inode *inode)
(fi->i_flags & F2FS_COMPR_FL)) {
if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
i_log_cluster_size)) {
fi->i_compr_blocks = le64_to_cpu(ri->i_compr_blocks);
atomic_set(&fi->i_compr_blocks,
le64_to_cpu(ri->i_compr_blocks));
fi->i_compress_algorithm = ri->i_compress_algorithm;
fi->i_log_cluster_size = ri->i_log_cluster_size;
fi->i_cluster_size = 1 << fi->i_log_cluster_size;
@ -461,7 +462,7 @@ static int do_read_inode(struct inode *inode)
stat_inc_inline_inode(inode);
stat_inc_inline_dir(inode);
stat_inc_compr_inode(inode);
stat_add_compr_blocks(inode, F2FS_I(inode)->i_compr_blocks);
stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
return 0;
}
@ -620,7 +621,8 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
i_log_cluster_size)) {
ri->i_compr_blocks =
cpu_to_le64(F2FS_I(inode)->i_compr_blocks);
cpu_to_le64(atomic_read(
&F2FS_I(inode)->i_compr_blocks));
ri->i_compress_algorithm =
F2FS_I(inode)->i_compress_algorithm;
ri->i_log_cluster_size =
@ -769,7 +771,8 @@ no_delete:
stat_dec_inline_dir(inode);
stat_dec_inline_inode(inode);
stat_dec_compr_inode(inode);
stat_sub_compr_blocks(inode, F2FS_I(inode)->i_compr_blocks);
stat_sub_compr_blocks(inode,
atomic_read(&F2FS_I(inode)->i_compr_blocks));
if (likely(!f2fs_cp_error(sbi) &&
!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))

View File

@ -1054,6 +1054,7 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
/* Initialize f2fs-specific inode info */
atomic_set(&fi->dirty_pages, 0);
atomic_set(&fi->i_compr_blocks, 0);
init_rwsem(&fi->i_sem);
spin_lock_init(&fi->i_size_lock);
INIT_LIST_HEAD(&fi->dirty_list);
@ -1782,6 +1783,18 @@ restore_flag:
static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
{
int retry = DEFAULT_RETRY_IO_COUNT;
/* we should flush all the data to keep data consistency */
do {
sync_inodes_sb(sbi->sb);
cond_resched();
congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
} while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
if (unlikely(retry < 0))
f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
down_write(&sbi->gc_lock);
f2fs_dirty_to_prefree(sbi);
@ -3904,6 +3917,8 @@ free_node_inode:
free_stats:
f2fs_destroy_stats(sbi);
free_nm:
/* stop discard thread before destroying node manager */
f2fs_stop_discard_thread(sbi);
f2fs_destroy_node_manager(sbi);
free_sm:
f2fs_destroy_segment_manager(sbi);

View File

@ -229,6 +229,7 @@ struct f2fs_extent {
#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
#define F2FS_PIN_FILE 0x40 /* file should not be gced */
#define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */
struct f2fs_inode {
__le16 i_mode; /* file mode */

View File

@ -802,6 +802,7 @@ enum {
};
void l2cap_chan_hold(struct l2cap_chan *c);
struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c);
void l2cap_chan_put(struct l2cap_chan *c);
static inline void l2cap_chan_lock(struct l2cap_chan *chan)

View File

@ -219,6 +219,11 @@
#define IPA_CV2X_SUPPORT
/**
* Max number of delegated IDUs for prefix delegation FR
*/
#define IPA_PREFIX_MAPPING_MAX 16
/**
* the attributes of the rule (routing or filtering)
*/
@ -3423,14 +3428,20 @@ enum ipa_ext_router_mode {
* struct ipa_ioc_ext_router_info - provide ext_router info
* @ipa_ext_router_mode: prefix sharing, prefix delegation, or disabled mode
* @pdn_name: PDN interface name
* @ipv6_addr: the prefix addr used for dummy or delegated prefixes
* @ipv6_addr: the prefix addr used for the dummy prefix. (prefix sharing mode)
* @ipv6_mask: the ipv6 mask used to mask above addr to get the correct prefix
* @num_of_del_prefix_mapping: number of delegated prefix to IDU IP mapping
* @idu_del_wan_ip: array of IDU WAN IP to be mapped to a delegated prefix
* @idu_del_client_prefix: Array of delegated prefixes
*/
struct ipa_ioc_ext_router_info {
enum ipa_ext_router_mode mode;
char pdn_name[IPA_RESOURCE_NAME_MAX];
uint32_t ipv6_addr[4];
uint32_t ipv6_mask[4];
int num_of_idu_prefix_mapping;
uint32_t idu_wan_ip[IPA_PREFIX_MAPPING_MAX][4];
uint32_t idu_client_prefix[IPA_PREFIX_MAPPING_MAX][4];
};
/**

View File

@ -110,7 +110,8 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
}
/* Find channel with given SCID.
* Returns locked channel. */
* Returns a reference locked channel.
*/
static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
u16 cid)
{
@ -118,15 +119,19 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_scid(conn, cid);
if (c)
l2cap_chan_lock(c);
if (c) {
/* Only lock if chan reference is not 0 */
c = l2cap_chan_hold_unless_zero(c);
if (c)
l2cap_chan_lock(c);
}
mutex_unlock(&conn->chan_lock);
return c;
}
/* Find channel with given DCID.
* Returns locked channel.
* Returns a reference locked channel.
*/
static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
u16 cid)
@ -135,8 +140,12 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_dcid(conn, cid);
if (c)
l2cap_chan_lock(c);
if (c) {
/* Only lock if chan reference is not 0 */
c = l2cap_chan_hold_unless_zero(c);
if (c)
l2cap_chan_lock(c);
}
mutex_unlock(&conn->chan_lock);
return c;
@ -161,8 +170,12 @@ static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_ident(conn, ident);
if (c)
l2cap_chan_lock(c);
if (c) {
/* Only lock if chan reference is not 0 */
c = l2cap_chan_hold_unless_zero(c);
if (c)
l2cap_chan_lock(c);
}
mutex_unlock(&conn->chan_lock);
return c;
@ -496,6 +509,16 @@ void l2cap_chan_hold(struct l2cap_chan *c)
kref_get(&c->kref);
}
struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
{
BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
if (!kref_get_unless_zero(&c->kref))
return NULL;
return c;
}
void l2cap_chan_put(struct l2cap_chan *c)
{
BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
@ -1812,7 +1835,10 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
src_match = !bacmp(&c->src, src);
dst_match = !bacmp(&c->dst, dst);
if (src_match && dst_match) {
l2cap_chan_hold(c);
c = l2cap_chan_hold_unless_zero(c);
if (!c)
continue;
read_unlock(&chan_list_lock);
return c;
}
@ -1827,7 +1853,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
}
if (c1)
l2cap_chan_hold(c1);
c1 = l2cap_chan_hold_unless_zero(c1);
read_unlock(&chan_list_lock);
@ -4221,6 +4247,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
unlock:
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
return err;
}
@ -4334,6 +4361,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
done:
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
return err;
}
@ -5062,6 +5090,7 @@ send_move_response:
l2cap_send_move_chan_rsp(chan, result);
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
return 0;
}
@ -5154,6 +5183,7 @@ static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
}
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
}
static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
@ -5183,6 +5213,7 @@ static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
}
static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
@ -5246,6 +5277,7 @@ static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
return 0;
}
@ -5281,6 +5313,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
}
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
return 0;
}
@ -5653,12 +5686,11 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
if (credits > max_credits) {
BT_ERR("LE credits overflow");
l2cap_send_disconn_req(chan, ECONNRESET);
l2cap_chan_unlock(chan);
/* Return 0 so that we don't trigger an unnecessary
* command reject packet.
*/
return 0;
goto unlock;
}
chan->tx_credits += credits;
@ -5669,7 +5701,9 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
if (chan->tx_credits)
chan->ops->resume(chan);
unlock:
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
return 0;
}
@ -6983,6 +7017,7 @@ drop:
done:
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
}
static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
@ -7386,7 +7421,7 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
if (src_type != c->src_type)
continue;
l2cap_chan_hold(c);
c = l2cap_chan_hold_unless_zero(c);
read_unlock(&chan_list_lock);
return c;
}