Merge tag 'LA.UM.9.14.r1-21600-LAHAINA.QSSI14.0' of https://git.codelinaro.org/clo/la/kernel/msm-5.4 into android13-5.4-lahaina
"LA.UM.9.14.r1-21600-LAHAINA.QSSI14.0" * tag 'LA.UM.9.14.r1-21600-LAHAINA.QSSI14.0' of https://git.codelinaro.org/clo/la/kernel/msm-5.4: msm: kgsl: Keep postamble packets in a privileged buffer msm: kgsl: Check user generated timestamp before queuing drawobjs msm: ipa3: add ioctl interface for dual backhaul dwc3: Add check for sg queued trbs while reclaiming soc: spcom: Addressing KASAN issue slab-out-of-bounds clk: qcom: gcc: Add support for edp ref clock for Yupik clk: qcom: gcc: Add support for EDP Ref clock Yupik msm: Add config option for Realtek R8168 driver msm: synx: Check for zero before reducing bind handles ANDROID: mm/filemap: Fix missing put_page() for speculative page fault BACKPORT: FROMGIT: cgroup: Use separate src/dst nodes when preloading css_sets for migration usb: pd: Send extcon notification as soon as APSD detection is done pci: msm: Flush workqueue and destroy it in mhi controller unregister msm: mhi_dev: Update msi_disable on fetching MSI config mdt_loader: check for overflow before allocating memory ANDROID: Re-enable fast mremap and fix UAF with SPF ANDROID: mm: fix invalid backport in speculative page fault path ANDROID: disable page table moves when speculative page faults are enabled ANDROID: mm: assert that mmap_lock is taken exclusively in vm_write_begin ANDROID: mm: remove sequence counting when mmap_lock is not exclusively owned ANDROID: mm/khugepaged: add missing vm_write_{begin|end} BACKPORT: FROMLIST: mm: implement speculative handling in filemap_fault() ANDROID: mm: prevent reads of unstable pmd during speculation ANDROID: mm: prevent speculative page fault handling for in do_swap_page() ANDROID: mm: skip pte_alloc during speculative page fault i2c-msm-geni: KASAN: use-after-free in __list_add_valid+0x2c/0xc4 Conflicts: drivers/soc/qcom/mdt_loader.c kernel/cgroup/cgroup.c mm/khugepaged.c Change-Id: I762818d33586457eebad3e0bcc28f75f3865d0b6
This commit is contained in:
commit
5b92479b88
@ -1 +1 @@
|
||||
LTS_5.4.226_d72fdcc7094f
|
||||
LTS_5.4.226_2af3bdf29330
|
||||
|
@ -1746,6 +1746,8 @@ void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl)
|
||||
struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
|
||||
struct mhi_sfr_info *sfr_info = mhi_cntrl->mhi_sfr;
|
||||
|
||||
destroy_workqueue(mhi_cntrl->wq);
|
||||
|
||||
kfree(mhi_cntrl->mhi_cmd);
|
||||
kfree(mhi_cntrl->mhi_event);
|
||||
vfree(mhi_cntrl->mhi_chan);
|
||||
|
@ -2173,6 +2173,19 @@ static struct clk_branch gcc_pcie_clkref_en = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct clk_branch gcc_edp_clkref_en = {
|
||||
.halt_reg = 0x8c008,
|
||||
.halt_check = BRANCH_HALT,
|
||||
.clkr = {
|
||||
.enable_reg = 0x8c008,
|
||||
.enable_mask = BIT(0),
|
||||
.hw.init = &(struct clk_init_data){
|
||||
.name = "gcc_edp_clkref_en",
|
||||
.ops = &clk_branch2_ops,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static struct clk_branch gcc_pcie_throttle_core_clk = {
|
||||
.halt_reg = 0x90018,
|
||||
.halt_check = BRANCH_HALT_SKIP,
|
||||
@ -3510,6 +3523,7 @@ static struct clk_regmap *gcc_yupik_clocks[] = {
|
||||
[GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
|
||||
[GCC_VIDEO_MVP_THROTTLE_CORE_CLK] =
|
||||
&gcc_video_mvp_throttle_core_clk.clkr,
|
||||
[GCC_EDP_CLKREF_EN] = &gcc_edp_clkref_en.clkr,
|
||||
};
|
||||
|
||||
static const struct qcom_reset_map gcc_yupik_resets[] = {
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2008-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
#ifndef __ADRENO_H
|
||||
#define __ADRENO_H
|
||||
@ -16,9 +16,6 @@
|
||||
#include "adreno_ringbuffer.h"
|
||||
#include "kgsl_sharedmem.h"
|
||||
|
||||
/* Index to preemption scratch buffer to store KMD postamble */
|
||||
#define KMD_POSTAMBLE_IDX 100
|
||||
|
||||
/* ADRENO_DEVICE - Given a kgsl_device return the adreno device struct */
|
||||
#define ADRENO_DEVICE(device) \
|
||||
container_of(device, struct adreno_device, dev)
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "adreno.h"
|
||||
@ -553,8 +553,7 @@ unsigned int a6xx_preemption_pre_ibsubmit(
|
||||
|
||||
/* Add a KMD post amble to clear the perf counters during preemption */
|
||||
if (!adreno_dev->perfcounter) {
|
||||
u64 kmd_postamble_addr =
|
||||
PREEMPT_SCRATCH_ADDR(adreno_dev, KMD_POSTAMBLE_IDX);
|
||||
u64 kmd_postamble_addr = SCRATCH_POSTAMBLE_ADDR(KGSL_DEVICE(adreno_dev));
|
||||
|
||||
*cmds++ = cp_type7_packet(CP_SET_AMBLE, 3);
|
||||
*cmds++ = lower_32_bits(kmd_postamble_addr);
|
||||
@ -695,6 +694,7 @@ static int a6xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
|
||||
|
||||
int a6xx_preemption_init(struct adreno_device *adreno_dev)
|
||||
{
|
||||
u32 flags = ADRENO_FEATURE(adreno_dev, ADRENO_APRIV) ? KGSL_MEMDESC_PRIVILEGED : 0;
|
||||
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
|
||||
struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
|
||||
struct adreno_preemption *preempt = &adreno_dev->preempt;
|
||||
@ -717,7 +717,7 @@ int a6xx_preemption_init(struct adreno_device *adreno_dev)
|
||||
|
||||
if (IS_ERR_OR_NULL(preempt->scratch)) {
|
||||
preempt->scratch = kgsl_allocate_global(device, PAGE_SIZE,
|
||||
0, 0, 0, "preempt_scratch");
|
||||
0, 0, flags, "preempt_scratch");
|
||||
if (IS_ERR(preempt->scratch))
|
||||
return PTR_ERR(preempt->scratch);
|
||||
}
|
||||
@ -733,12 +733,13 @@ int a6xx_preemption_init(struct adreno_device *adreno_dev)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* First 8 dwords of the preemption scratch buffer is used to store the address for CP
|
||||
* to save/restore VPC data. Reserve 11 dwords in the preemption scratch buffer from
|
||||
* index KMD_POSTAMBLE_IDX for KMD postamble pm4 packets
|
||||
* First 28 dwords of the device scratch buffer are used to store shadow rb data.
|
||||
* Reserve 11 dwords in the device scratch buffer from SCRATCH_POSTAMBLE_OFFSET for
|
||||
* KMD postamble pm4 packets. This should be in *device->scratch* so that userspace
|
||||
* cannot access it.
|
||||
*/
|
||||
if (!adreno_dev->perfcounter) {
|
||||
u32 *postamble = preempt->scratch->hostptr + (KMD_POSTAMBLE_IDX * sizeof(u64));
|
||||
u32 *postamble = device->scratch->hostptr + SCRATCH_POSTAMBLE_OFFSET;
|
||||
u32 count = 0;
|
||||
|
||||
postamble[count++] = cp_type7_packet(CP_REG_RMW, 3);
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "adreno.h"
|
||||
@ -903,6 +904,23 @@ int adreno_hwsched_queue_cmds(struct kgsl_device_private *dev_priv,
|
||||
|
||||
user_ts = *timestamp;
|
||||
|
||||
/*
|
||||
* If there is only one drawobj in the array and it is of
|
||||
* type SYNCOBJ_TYPE, skip comparing user_ts as it can be 0
|
||||
*/
|
||||
if (!(count == 1 && drawobj[0]->type == SYNCOBJ_TYPE) &&
|
||||
(drawctxt->base.flags & KGSL_CONTEXT_USER_GENERATED_TS)) {
|
||||
/*
|
||||
* User specified timestamps need to be greater than the last
|
||||
* issued timestamp in the context
|
||||
*/
|
||||
if (timestamp_cmp(drawctxt->timestamp, user_ts) >= 0) {
|
||||
spin_unlock(&drawctxt->lock);
|
||||
kmem_cache_free(jobs_cache, job);
|
||||
return -ERANGE;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
||||
switch (drawobj[i]->type) {
|
||||
|
@ -1,6 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2008-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
#ifndef __KGSL_H
|
||||
#define __KGSL_H
|
||||
@ -71,6 +72,11 @@
|
||||
#define SCRATCH_RPTR_GPU_ADDR(dev, id) \
|
||||
((dev)->scratch->gpuaddr + SCRATCH_RPTR_OFFSET(id))
|
||||
|
||||
/* OFFSET to KMD postamble packets in scratch buffer */
|
||||
#define SCRATCH_POSTAMBLE_OFFSET (100 * sizeof(u64))
|
||||
#define SCRATCH_POSTAMBLE_ADDR(dev) \
|
||||
((dev)->scratch->gpuaddr + SCRATCH_POSTAMBLE_OFFSET)
|
||||
|
||||
/* Timestamp window used to detect rollovers (half of integer range) */
|
||||
#define KGSL_TIMESTAMP_WINDOW 0x80000000
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
@ -136,10 +137,12 @@ struct geni_i2c_dev {
|
||||
bool disable_dma_mode;
|
||||
bool prev_cancel_pending; //Halt cancel till IOS in good state
|
||||
bool is_i2c_rtl_based; /* doing pending cancel only for rtl based SE's */
|
||||
atomic_t is_xfer_in_progress; /* Used to maintain xfer inprogress status */
|
||||
};
|
||||
|
||||
static struct geni_i2c_dev *gi2c_dev_dbg[MAX_SE];
|
||||
static int arr_idx;
|
||||
static int geni_i2c_runtime_suspend(struct device *dev);
|
||||
|
||||
struct geni_i2c_err_log {
|
||||
int err;
|
||||
@ -1052,11 +1055,13 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
|
||||
int i, ret = 0, timeout = 0;
|
||||
|
||||
gi2c->err = 0;
|
||||
atomic_set(&gi2c->is_xfer_in_progress, 1);
|
||||
|
||||
/* Client to respect system suspend */
|
||||
if (!pm_runtime_enabled(gi2c->dev)) {
|
||||
GENI_SE_ERR(gi2c->ipcl, false, gi2c->dev,
|
||||
"%s: System suspended\n", __func__);
|
||||
atomic_set(&gi2c->is_xfer_in_progress, 0);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
@ -1068,6 +1073,7 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
|
||||
pm_runtime_put_noidle(gi2c->dev);
|
||||
/* Set device in suspended since resume failed */
|
||||
pm_runtime_set_suspended(gi2c->dev);
|
||||
atomic_set(&gi2c->is_xfer_in_progress, 0);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -1078,12 +1084,13 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
|
||||
if (ret) {
|
||||
pm_runtime_mark_last_busy(gi2c->dev);
|
||||
pm_runtime_put_autosuspend(gi2c->dev);
|
||||
atomic_set(&gi2c->is_xfer_in_progress, 0);
|
||||
return ret; //Don't perform xfer is cancel failed
|
||||
}
|
||||
}
|
||||
|
||||
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
|
||||
"n:%d addr:0x%x\n", num, msgs[0].addr);
|
||||
"n:%d addr:0x%x\n", num, msgs[0].addr);
|
||||
|
||||
gi2c->dbg_num = num;
|
||||
kfree(gi2c->dbg_buf_ptr);
|
||||
@ -1268,7 +1275,7 @@ geni_i2c_txn_ret:
|
||||
pm_runtime_mark_last_busy(gi2c->dev);
|
||||
pm_runtime_put_autosuspend(gi2c->dev);
|
||||
}
|
||||
|
||||
atomic_set(&gi2c->is_xfer_in_progress, 0);
|
||||
gi2c->cur = NULL;
|
||||
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
|
||||
"i2c txn ret:%d, num:%d, err:%d\n", ret, num, gi2c->err);
|
||||
@ -1476,10 +1483,10 @@ static int geni_i2c_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
atomic_set(&gi2c->is_xfer_in_progress, 0);
|
||||
snprintf(boot_marker, sizeof(boot_marker),
|
||||
"M - DRIVER GENI_I2C_%d Ready", gi2c->adap.nr);
|
||||
"M - DRIVER GENI_I2C_%d Ready", gi2c->adap.nr);
|
||||
place_marker(boot_marker);
|
||||
|
||||
dev_info(gi2c->dev, "I2C probed\n");
|
||||
return 0;
|
||||
}
|
||||
@ -1489,6 +1496,33 @@ static int geni_i2c_remove(struct platform_device *pdev)
|
||||
struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
|
||||
int i;
|
||||
|
||||
if (atomic_read(&gi2c->is_xfer_in_progress)) {
|
||||
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
|
||||
"%s: Xfer is in progress\n", __func__);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!pm_runtime_status_suspended(gi2c->dev)) {
|
||||
if (geni_i2c_runtime_suspend(gi2c->dev))
|
||||
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
|
||||
"%s: runtime suspend failed\n", __func__);
|
||||
}
|
||||
|
||||
if (gi2c->se_mode == GSI_ONLY) {
|
||||
if (gi2c->tx_c) {
|
||||
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
|
||||
"%s: clearing tx dma resource\n", __func__);
|
||||
dma_release_channel(gi2c->tx_c);
|
||||
}
|
||||
if (gi2c->rx_c) {
|
||||
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
|
||||
"%s: clearing rx dma resource\n", __func__);
|
||||
dma_release_channel(gi2c->rx_c);
|
||||
}
|
||||
}
|
||||
|
||||
pm_runtime_put_noidle(gi2c->dev);
|
||||
pm_runtime_set_suspended(gi2c->dev);
|
||||
pm_runtime_disable(gi2c->dev);
|
||||
i2c_del_adapter(&gi2c->adap);
|
||||
|
||||
@ -1594,6 +1628,19 @@ static int geni_i2c_suspend_late(struct device *device)
|
||||
int ret;
|
||||
|
||||
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s\n", __func__);
|
||||
|
||||
if (atomic_read(&gi2c->is_xfer_in_progress)) {
|
||||
if (!pm_runtime_status_suspended(gi2c->dev)) {
|
||||
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
|
||||
":%s: runtime PM is active\n", __func__);
|
||||
return -EBUSY;
|
||||
}
|
||||
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
|
||||
"%s System suspend not allowed while xfer in progress\n",
|
||||
__func__);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Make sure no transactions are pending */
|
||||
ret = i2c_trylock_bus(&gi2c->adap, I2C_LOCK_SEGMENT);
|
||||
if (!ret) {
|
||||
|
@ -870,7 +870,8 @@ int synx_bind(struct synx_session session_id,
|
||||
mutex_lock(&synx_obj->obj_lock);
|
||||
memset(&synx_obj->bound_synxs[bound_idx], 0,
|
||||
sizeof(struct synx_external_desc));
|
||||
synx_obj->num_bound_synxs--;
|
||||
if (synx_obj->num_bound_synxs)
|
||||
synx_obj->num_bound_synxs--;
|
||||
goto free;
|
||||
}
|
||||
|
||||
|
@ -168,6 +168,16 @@ config R8125
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called r8125.
|
||||
|
||||
config R8168
|
||||
tristate "Realtek R8168 driver"
|
||||
depends on PCI
|
||||
help
|
||||
This is a 1Gbps ethernet driver for the PCI network cards based on
|
||||
the Realtek RTL8111K chip. If you have one of those, say Y here.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called r8168.
|
||||
|
||||
config R8125_IOSS
|
||||
tristate "Realtek R8125 IOSS glue driver"
|
||||
depends on R8125
|
||||
|
@ -103,6 +103,38 @@ static DECLARE_COMPLETION(write_to_host);
|
||||
static DECLARE_COMPLETION(transfer_host_to_device);
|
||||
static DECLARE_COMPLETION(transfer_device_to_host);
|
||||
|
||||
/*
|
||||
* mhi_dev_get_msi_config () - Fetch the MSI config from
|
||||
* PCIe and set the msi_disable flag accordingly
|
||||
*
|
||||
* @phandle : phandle structure
|
||||
* @cfg : PCIe MSI config structure
|
||||
*/
|
||||
static int mhi_dev_get_msi_config(struct ep_pcie_hw *phandle,
|
||||
struct ep_pcie_msi_config *cfg)
|
||||
{
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* Fetching MSI config to read the MSI capability and setting the
|
||||
* msi_disable flag based on it.
|
||||
*/
|
||||
rc = ep_pcie_get_msi_config(phandle, cfg);
|
||||
if (rc == -EOPNOTSUPP) {
|
||||
mhi_log(MHI_MSG_VERBOSE, "MSI is disabled\n");
|
||||
mhi_ctx->msi_disable = true;
|
||||
} else if (!rc) {
|
||||
mhi_ctx->msi_disable = false;
|
||||
} else {
|
||||
mhi_log(MHI_MSG_ERROR,
|
||||
"Error retrieving pcie msi logic\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
mhi_log(MHI_MSG_VERBOSE, "msi_disable = %d\n", mhi_ctx->msi_disable);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* mhi_dev_ring_cache_completion_cb () - Call back function called
|
||||
* by IPA driver when ring element cache is done
|
||||
@ -280,16 +312,16 @@ static int mhi_dev_schedule_msi_ipa(struct mhi_dev *mhi, struct event_req *ereq)
|
||||
union mhi_dev_ring_ctx *ctx;
|
||||
int rc;
|
||||
|
||||
rc = ep_pcie_get_msi_config(mhi->phandle, &cfg);
|
||||
if (rc == -EOPNOTSUPP) {
|
||||
mhi_log(MHI_MSG_VERBOSE, "MSI is disabled\n");
|
||||
mhi_ctx->msi_disable = true;
|
||||
return 0;
|
||||
} else if (rc) {
|
||||
rc = mhi_dev_get_msi_config(mhi->phandle, &cfg);
|
||||
if (rc) {
|
||||
mhi_log(MHI_MSG_ERROR, "Error retrieving pcie msi logic\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* If MSI is disabled, bailing out */
|
||||
if (mhi_ctx->msi_disable)
|
||||
return 0;
|
||||
|
||||
ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[ereq->event_ring];
|
||||
|
||||
msi_addr.size = sizeof(uint32_t);
|
||||
@ -436,17 +468,16 @@ static int mhi_trigger_msi_edma(struct mhi_dev_ring *ring, u32 idx)
|
||||
unsigned long flags;
|
||||
|
||||
if (!mhi_ctx->msi_lower) {
|
||||
rc = ep_pcie_get_msi_config(mhi_ctx->phandle, &cfg);
|
||||
if (rc == -EOPNOTSUPP) {
|
||||
mhi_log(MHI_MSG_VERBOSE, "MSI is disabled\n");
|
||||
mhi_ctx->msi_disable = true;
|
||||
return 0;
|
||||
} else if (rc) {
|
||||
mhi_log(MHI_MSG_ERROR,
|
||||
"Error retrieving pcie msi logic\n");
|
||||
rc = mhi_dev_get_msi_config(mhi_ctx->phandle, &cfg);
|
||||
if (rc) {
|
||||
mhi_log(MHI_MSG_ERROR, "Error retrieving pcie msi logic\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* If MSI is disabled, bailing out */
|
||||
if (mhi_ctx->msi_disable)
|
||||
return 0;
|
||||
|
||||
mhi_ctx->msi_data = cfg.data;
|
||||
mhi_ctx->msi_lower = cfg.lower;
|
||||
}
|
||||
@ -1450,13 +1481,9 @@ static int mhi_hwc_init(struct mhi_dev *mhi)
|
||||
}
|
||||
|
||||
/* Call IPA HW_ACC Init with MSI Address and db routing info */
|
||||
rc = ep_pcie_get_msi_config(mhi_ctx->phandle, &cfg);
|
||||
if (rc == -EOPNOTSUPP) {
|
||||
mhi_log(MHI_MSG_VERBOSE, "MSI is disabled\n");
|
||||
mhi_ctx->msi_disable = true;
|
||||
} else if (rc) {
|
||||
mhi_log(MHI_MSG_ERROR,
|
||||
"Error retrieving pcie msi logic\n");
|
||||
rc = mhi_dev_get_msi_config(mhi_ctx->phandle, &cfg);
|
||||
if (rc) {
|
||||
mhi_log(MHI_MSG_ERROR, "Error retrieving pcie msi logic\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1671,11 +1698,8 @@ int mhi_dev_send_event(struct mhi_dev *mhi, int evnt_ring,
|
||||
struct ep_pcie_msi_config cfg;
|
||||
struct mhi_addr transfer_addr;
|
||||
|
||||
rc = ep_pcie_get_msi_config(mhi->phandle, &cfg);
|
||||
if (rc == -EOPNOTSUPP) {
|
||||
mhi_log(MHI_MSG_VERBOSE, "MSI is disabled\n");
|
||||
mhi_ctx->msi_disable = true;
|
||||
} else if (rc) {
|
||||
rc = mhi_dev_get_msi_config(mhi->phandle, &cfg);
|
||||
if (rc) {
|
||||
mhi_log(MHI_MSG_ERROR, "Error retrieving pcie msi logic\n");
|
||||
return rc;
|
||||
}
|
||||
@ -4398,24 +4422,12 @@ static int mhi_dev_resume_mmio_mhi_init(struct mhi_dev *mhi_ctx)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fetching MSI config to read the MSI capability and setting the
|
||||
* msi_disable flag based on it.
|
||||
*/
|
||||
rc = ep_pcie_get_msi_config(mhi_ctx->phandle, &cfg);
|
||||
if (rc == -EOPNOTSUPP) {
|
||||
mhi_log(MHI_MSG_VERBOSE, "MSI is disabled\n");
|
||||
mhi_ctx->msi_disable = true;
|
||||
} else if (!rc) {
|
||||
mhi_ctx->msi_disable = false;
|
||||
} else {
|
||||
mhi_log(MHI_MSG_ERROR,
|
||||
"Error retrieving pcie msi logic\n");
|
||||
rc = mhi_dev_get_msi_config(mhi_ctx->phandle, &cfg);
|
||||
if (rc) {
|
||||
mhi_log(MHI_MSG_ERROR, "Error retrieving pcie msi logic\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
mhi_log(MHI_MSG_VERBOSE, "msi_disable = %d\n", mhi_ctx->msi_disable);
|
||||
|
||||
rc = mhi_dev_recover(mhi_ctx);
|
||||
if (rc) {
|
||||
mhi_log(MHI_MSG_ERROR, "get mhi state failed\n");
|
||||
|
@ -108,7 +108,7 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len)
|
||||
hash_size = phdrs[1].p_filesz;
|
||||
|
||||
/* Overflow check */
|
||||
if (ehdr_size > SIZE_MAX - hash_size)
|
||||
if (ehdr_size > SIZE_MAX - hash_size)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
data = kmalloc(ehdr_size + hash_size, GFP_KERNEL);
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -631,8 +632,12 @@ static int spcom_handle_create_channel_command(void *cmd_buf, int cmd_size)
|
||||
mutex_lock(&spcom_dev->chdev_count_lock);
|
||||
ret = spcom_create_channel_chardev(cmd->ch_name, cmd->is_sharable);
|
||||
mutex_unlock(&spcom_dev->chdev_count_lock);
|
||||
if (ret)
|
||||
spcom_pr_err("failed to create ch[%s], ret [%d]\n", cmd->ch_name, ret);
|
||||
if (ret) {
|
||||
if (-EINVAL == ret)
|
||||
spcom_pr_err("failed to create channel, ret [%d]\n", ret);
|
||||
else
|
||||
spcom_pr_err("failed to create ch[%s], ret [%d]\n", cmd->ch_name, ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -3277,7 +3277,13 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
|
||||
* processed by the core. Hence do not reclaim it until
|
||||
* it is processed by the core.
|
||||
*/
|
||||
if (req->trb->ctrl & DWC3_TRB_CTRL_HWO) {
|
||||
/*
|
||||
* If sg transfer are in progress, avoid checking
|
||||
* HWO bit here as these will get cleared during
|
||||
* ep reclaim.
|
||||
*/
|
||||
if ((req->trb->ctrl & DWC3_TRB_CTRL_HWO)
|
||||
&& (req->num_queued_sgs == 0)) {
|
||||
dbg_event(0xFF, "PEND TRB", dep->number);
|
||||
return 1;
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/completion.h>
|
||||
@ -3724,6 +3725,7 @@ static void psy_changed_notifier_work(struct work_struct *w)
|
||||
union power_supply_propval val;
|
||||
enum power_supply_typec_mode typec_mode;
|
||||
int ret;
|
||||
int usb_extcon_state;
|
||||
|
||||
ret = usbpd_get_psy_iio_property(pd,
|
||||
POWER_SUPPLY_PROP_TYPEC_MODE, &val);
|
||||
@ -3794,8 +3796,28 @@ static void psy_changed_notifier_work(struct work_struct *w)
|
||||
return;
|
||||
}
|
||||
|
||||
if (pd->typec_mode == typec_mode)
|
||||
if (pd->typec_mode == typec_mode) {
|
||||
if (!((pd->current_dr == DR_NONE) || (pd->current_dr == DR_UFP)))
|
||||
return;
|
||||
|
||||
usb_extcon_state = extcon_get_state(pd->extcon, EXTCON_USB);
|
||||
|
||||
if (usb_extcon_state == 0) {
|
||||
ret = usbpd_get_psy_iio_property(pd, POWER_SUPPLY_PROP_REAL_TYPE,
|
||||
&val);
|
||||
if (ret) {
|
||||
usbpd_err(&pd->dev, "Unable to read USB PROP_REAL_TYPE: %d\n",
|
||||
ret);
|
||||
return;
|
||||
}
|
||||
|
||||
if (val.intval == POWER_SUPPLY_TYPE_USB ||
|
||||
val.intval == POWER_SUPPLY_TYPE_USB_CDP ||
|
||||
val.intval == QTI_POWER_SUPPLY_TYPE_USB_FLOAT)
|
||||
queue_work(pd->wq, &pd->start_periph_work);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
pd->typec_mode = typec_mode;
|
||||
|
||||
|
@ -177,6 +177,7 @@
|
||||
#define GCC_AGGRE_NOC_PCIE_CENTER_SF_AXI_CLK 167
|
||||
#define GCC_AGGRE_NOC_PCIE_TBU_CLK 168
|
||||
#define GCC_PCIE_CLKREF_EN 169
|
||||
#define GCC_EDP_CLKREF_EN 170
|
||||
|
||||
/* GCC power domains */
|
||||
#define GCC_PCIE_0_GDSC 0
|
||||
|
@ -1542,6 +1542,12 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
static inline void vm_write_begin(struct vm_area_struct *vma)
|
||||
{
|
||||
/*
|
||||
* Isolated vma might be freed without exclusive mmap_lock but
|
||||
* speculative page fault handler still needs to know it was changed.
|
||||
*/
|
||||
if (!RB_EMPTY_NODE(&vma->vm_rb))
|
||||
WARN_ON_ONCE(!rwsem_is_locked(&(vma->vm_mm)->mmap_sem));
|
||||
/*
|
||||
* The reads never spins and preemption
|
||||
* disablement is not required.
|
||||
|
@ -37,6 +37,7 @@
|
||||
#define WAN_IOCTL_RMV_OFFLOAD_CONNECTION 19
|
||||
#define WAN_IOCTL_GET_WAN_MTU 20
|
||||
#define WAN_IOCTL_NOTIFY_NAT_MOVE_RES 21
|
||||
#define WAN_IOCTL_NOTIFY_DUAL_BACKHAUL_INFO 22
|
||||
|
||||
/* User space may not have this defined. */
|
||||
#ifndef IFNAMSIZ
|
||||
@ -195,6 +196,10 @@ struct wan_ioctl_query_per_client_stats {
|
||||
WAN_IOCTL_ADD_FLT_RULE, \
|
||||
struct ipa_install_fltr_rule_req_msg_v01 *)
|
||||
|
||||
#define WAN_IOC_NOTIFY_DUAL_BACKHAUL_INFO _IOWR(WAN_IOC_MAGIC, \
|
||||
WAN_IOCTL_NOTIFY_DUAL_BACKHAUL_INFO, \
|
||||
struct ipa_eth_backhaul_info_req_msg_v01 *)
|
||||
|
||||
#define WAN_IOC_ADD_FLT_RULE_INDEX _IOWR(WAN_IOC_MAGIC, \
|
||||
WAN_IOCTL_ADD_FLT_INDEX, \
|
||||
struct ipa_fltr_installed_notif_req_msg_v01 *)
|
||||
|
@ -3044,8 +3044,8 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
|
||||
DEFINE_CGROUP_MGCTX(mgctx);
|
||||
struct cgroup_subsys_state *d_css;
|
||||
struct cgroup *dsct;
|
||||
struct ext_css_set *ext_src_set;
|
||||
bool has_tasks;
|
||||
struct ext_css_set *ext_src_set;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&cgroup_mutex);
|
||||
|
50
mm/filemap.c
50
mm/filemap.c
@ -2495,7 +2495,9 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
|
||||
* it in the page cache, and handles the special cases reasonably without
|
||||
* having a lot of duplicated code.
|
||||
*
|
||||
* vma->vm_mm->mmap_sem must be held on entry (except FAULT_FLAG_SPECULATIVE).
|
||||
* If FAULT_FLAG_SPECULATIVE is set, this function runs with elevated vma
|
||||
* refcount and with mmap lock not held.
|
||||
* Otherwise, vma->vm_mm->mmap_sem must be held on entry.
|
||||
*
|
||||
* If our return value has VM_FAULT_RETRY set, it's because the mmap_sem
|
||||
* may be dropped before doing I/O or by lock_page_maybe_drop_mmap().
|
||||
@ -2520,6 +2522,52 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
|
||||
struct page *page;
|
||||
vm_fault_t ret = 0;
|
||||
|
||||
if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
|
||||
page = find_get_page(mapping, offset);
|
||||
if (unlikely(!page))
|
||||
return VM_FAULT_RETRY;
|
||||
|
||||
if (unlikely(PageReadahead(page)))
|
||||
goto page_put;
|
||||
|
||||
if (!trylock_page(page))
|
||||
goto page_put;
|
||||
|
||||
if (unlikely(compound_head(page)->mapping != mapping))
|
||||
goto page_unlock;
|
||||
VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
|
||||
if (unlikely(!PageUptodate(page)))
|
||||
goto page_unlock;
|
||||
|
||||
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
|
||||
if (unlikely(offset >= max_off))
|
||||
goto page_unlock;
|
||||
|
||||
/*
|
||||
* Update readahead mmap_miss statistic.
|
||||
*
|
||||
* Note that we are not sure if finish_fault() will
|
||||
* manage to complete the transaction. If it fails,
|
||||
* we'll come back to filemap_fault() non-speculative
|
||||
* case which will update mmap_miss a second time.
|
||||
* This is not ideal, we would prefer to guarantee the
|
||||
* update will happen exactly once.
|
||||
*/
|
||||
if (!(vmf->vma->vm_flags & VM_RAND_READ) && ra->ra_pages) {
|
||||
unsigned int mmap_miss = READ_ONCE(ra->mmap_miss);
|
||||
if (mmap_miss)
|
||||
WRITE_ONCE(ra->mmap_miss, --mmap_miss);
|
||||
}
|
||||
|
||||
vmf->page = page;
|
||||
return VM_FAULT_LOCKED;
|
||||
page_unlock:
|
||||
unlock_page(page);
|
||||
page_put:
|
||||
put_page(page);
|
||||
return VM_FAULT_RETRY;
|
||||
}
|
||||
|
||||
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
|
||||
if (unlikely(offset >= max_off))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
@ -1345,6 +1345,8 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
|
||||
if (!pmd)
|
||||
goto drop_hpage;
|
||||
|
||||
vm_write_begin(vma);
|
||||
|
||||
/*
|
||||
* We need to lock the mapping so that from here on, only GUP-fast and
|
||||
* hardware page walks can access the parts of the page tables that
|
||||
@ -1412,6 +1414,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
|
||||
haddr + HPAGE_PMD_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
_pmd = pmdp_collapse_flush(vma, haddr, pmd);
|
||||
vm_write_end(vma);
|
||||
mm_dec_nr_ptes(mm);
|
||||
tlb_remove_table_sync_one();
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
@ -1428,6 +1431,7 @@ drop_hpage:
|
||||
|
||||
abort:
|
||||
pte_unmap_unlock(start_pte, ptl);
|
||||
vm_write_end(vma);
|
||||
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
||||
goto drop_hpage;
|
||||
}
|
||||
@ -1508,8 +1512,10 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
|
||||
NULL, mm, addr,
|
||||
addr + HPAGE_PMD_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
vm_write_begin(vma);
|
||||
/* assume page table is clear */
|
||||
_pmd = pmdp_collapse_flush(vma, addr, pmd);
|
||||
vm_write_end(vma);
|
||||
mm_dec_nr_ptes(mm);
|
||||
tlb_remove_table_sync_one();
|
||||
pte_free(mm, pmd_pgtable(_pmd));
|
||||
|
@ -500,11 +500,9 @@ static void madvise_cold_page_range(struct mmu_gather *tlb,
|
||||
.target_task = task,
|
||||
};
|
||||
|
||||
vm_write_begin(vma);
|
||||
tlb_start_vma(tlb, vma);
|
||||
walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
|
||||
tlb_end_vma(tlb, vma);
|
||||
vm_write_end(vma);
|
||||
}
|
||||
|
||||
static long madvise_cold(struct task_struct *task,
|
||||
@ -538,11 +536,9 @@ static void madvise_pageout_page_range(struct mmu_gather *tlb,
|
||||
.target_task = task,
|
||||
};
|
||||
|
||||
vm_write_begin(vma);
|
||||
tlb_start_vma(tlb, vma);
|
||||
walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
|
||||
tlb_end_vma(tlb, vma);
|
||||
vm_write_end(vma);
|
||||
}
|
||||
|
||||
static inline bool can_do_pageout(struct vm_area_struct *vma)
|
||||
@ -745,12 +741,10 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
|
||||
update_hiwater_rss(mm);
|
||||
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
vm_write_begin(vma);
|
||||
tlb_start_vma(&tlb, vma);
|
||||
walk_page_range(vma->vm_mm, range.start, range.end,
|
||||
&madvise_free_walk_ops, &tlb);
|
||||
tlb_end_vma(&tlb, vma);
|
||||
vm_write_end(vma);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
tlb_finish_mmu(&tlb, range.start, range.end);
|
||||
|
||||
|
42
mm/memory.c
42
mm/memory.c
@ -1292,7 +1292,6 @@ void unmap_page_range(struct mmu_gather *tlb,
|
||||
unsigned long next;
|
||||
|
||||
BUG_ON(addr >= end);
|
||||
vm_write_begin(vma);
|
||||
tlb_start_vma(tlb, vma);
|
||||
pgd = pgd_offset(vma->vm_mm, addr);
|
||||
do {
|
||||
@ -1302,7 +1301,6 @@ void unmap_page_range(struct mmu_gather *tlb,
|
||||
next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
tlb_end_vma(tlb, vma);
|
||||
vm_write_end(vma);
|
||||
}
|
||||
|
||||
|
||||
@ -3050,6 +3048,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
||||
int exclusive = 0;
|
||||
vm_fault_t ret;
|
||||
|
||||
if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
|
||||
pte_unmap(vmf->pte);
|
||||
return VM_FAULT_RETRY;
|
||||
}
|
||||
|
||||
ret = pte_unmap_same(vmf);
|
||||
if (ret) {
|
||||
/*
|
||||
@ -3296,6 +3299,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
|
||||
if (vmf->vma_flags & VM_SHARED)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
/* Do not check unstable pmd, if it's changed will retry later */
|
||||
if (vmf->flags & FAULT_FLAG_SPECULATIVE)
|
||||
goto skip_pmd_checks;
|
||||
|
||||
/*
|
||||
* Use pte_alloc() instead of pte_alloc_map(). We can't run
|
||||
* pte_offset_map() on pmds where a huge pmd might be created
|
||||
@ -3313,6 +3320,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
|
||||
if (unlikely(pmd_trans_unstable(vmf->pmd)))
|
||||
return 0;
|
||||
|
||||
skip_pmd_checks:
|
||||
/* Use the zero-page for reads */
|
||||
if (!(vmf->flags & FAULT_FLAG_WRITE) &&
|
||||
!mm_forbids_zeropage(vma->vm_mm)) {
|
||||
@ -3417,6 +3425,10 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
vm_fault_t ret;
|
||||
|
||||
/* Do not check unstable pmd, if it's changed will retry later */
|
||||
if (vmf->flags & FAULT_FLAG_SPECULATIVE)
|
||||
goto skip_pmd_checks;
|
||||
|
||||
/*
|
||||
* Preallocate pte before we take page_lock because this might lead to
|
||||
* deadlocks for memcg reclaim which waits for pages under writeback:
|
||||
@ -3439,6 +3451,7 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
|
||||
smp_wmb(); /* See comment in __pte_alloc() */
|
||||
}
|
||||
|
||||
skip_pmd_checks:
|
||||
ret = vma->vm_ops->fault(vmf);
|
||||
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
|
||||
VM_FAULT_DONE_COW)))
|
||||
@ -3812,7 +3825,8 @@ static vm_fault_t do_fault_around(struct vm_fault *vmf)
|
||||
end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
|
||||
start_pgoff + nr_pages - 1);
|
||||
|
||||
if (pmd_none(*vmf->pmd)) {
|
||||
if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) &&
|
||||
pmd_none(*vmf->pmd)) {
|
||||
vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
|
||||
if (!vmf->prealloc_pte)
|
||||
goto out;
|
||||
@ -4179,16 +4193,11 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
|
||||
pte_t entry;
|
||||
vm_fault_t ret = 0;
|
||||
|
||||
/* Do not check unstable pmd, if it's changed will retry later */
|
||||
if (vmf->flags & FAULT_FLAG_SPECULATIVE)
|
||||
goto skip_pmd_checks;
|
||||
|
||||
if (unlikely(pmd_none(*vmf->pmd))) {
|
||||
/*
|
||||
* In the case of the speculative page fault handler we abort
|
||||
* the speculative path immediately as the pmd is probably
|
||||
* in the way to be converted in a huge one. We will try
|
||||
* again holding the mmap_sem (which implies that the collapse
|
||||
* operation is done).
|
||||
*/
|
||||
if (vmf->flags & FAULT_FLAG_SPECULATIVE)
|
||||
return VM_FAULT_RETRY;
|
||||
/*
|
||||
* Leave __pte_alloc() until later: because vm_ops->fault may
|
||||
* want to allocate huge page, and if we expose page table
|
||||
@ -4196,8 +4205,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
|
||||
* concurrent faults and from rmap lookups.
|
||||
*/
|
||||
vmf->pte = NULL;
|
||||
} else if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
|
||||
/* See comment in pte_alloc_one_map() */
|
||||
} else {
|
||||
if (pmd_devmap_trans_unstable(vmf->pmd))
|
||||
return 0;
|
||||
/*
|
||||
@ -4227,6 +4235,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
|
||||
}
|
||||
}
|
||||
|
||||
skip_pmd_checks:
|
||||
if (!vmf->pte) {
|
||||
if (vma_is_anonymous(vmf->vma))
|
||||
return do_anonymous_page(vmf);
|
||||
@ -4465,9 +4474,8 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address,
|
||||
pol = __get_vma_policy(vmf.vma, address);
|
||||
if (!pol)
|
||||
pol = get_task_policy(current);
|
||||
if (!pol)
|
||||
if (pol && pol->mode == MPOL_INTERLEAVE)
|
||||
return VM_FAULT_RETRY;
|
||||
if (pol && pol->mode == MPOL_INTERLEAVE)
|
||||
return VM_FAULT_RETRY;
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -600,11 +600,9 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
|
||||
{
|
||||
int nr_updated;
|
||||
|
||||
vm_write_begin(vma);
|
||||
nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
|
||||
if (nr_updated)
|
||||
count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
|
||||
vm_write_end(vma);
|
||||
|
||||
return nr_updated;
|
||||
}
|
||||
|
18
mm/mmap.c
18
mm/mmap.c
@ -2344,8 +2344,22 @@ struct vm_area_struct *get_vma(struct mm_struct *mm, unsigned long addr)
|
||||
|
||||
read_lock(&mm->mm_rb_lock);
|
||||
vma = __find_vma(mm, addr);
|
||||
if (vma)
|
||||
atomic_inc(&vma->vm_ref_count);
|
||||
|
||||
/*
|
||||
* If there is a concurrent fast mremap, bail out since the entire
|
||||
* PMD/PUD subtree may have been remapped.
|
||||
*
|
||||
* This is usually safe for conventional mremap since it takes the
|
||||
* PTE locks as does SPF. However fast mremap only takes the lock
|
||||
* at the PMD/PUD level which is ok as it is done with the mmap
|
||||
* write lock held. But since SPF, as the term implies forgoes,
|
||||
* taking the mmap read lock and also cannot take PTL lock at the
|
||||
* larger PMD/PUD granualrity, since it would introduce huge
|
||||
* contention in the page fault path; fall back to regular fault
|
||||
* handling.
|
||||
*/
|
||||
if (vma && !atomic_inc_unless_negative(&vma->vm_ref_count))
|
||||
vma = NULL;
|
||||
read_unlock(&mm->mm_rb_lock);
|
||||
|
||||
return vma;
|
||||
|
41
mm/mremap.c
41
mm/mremap.c
@ -191,6 +191,38 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
||||
drop_rmap_locks(vma);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
|
||||
{
|
||||
/*
|
||||
* If we have the only reference, swap the refcount to -1. This
|
||||
* will prevent other concurrent references by get_vma() for SPFs.
|
||||
*/
|
||||
return atomic_cmpxchg(&vma->vm_ref_count, 1, -1) == 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore the VMA reference count to 1 after a fast mremap.
|
||||
*/
|
||||
static inline void unlock_vma_ref_count(struct vm_area_struct *vma)
|
||||
{
|
||||
/*
|
||||
* This should only be called after a corresponding,
|
||||
* successful trylock_vma_ref_count().
|
||||
*/
|
||||
VM_BUG_ON_VMA(atomic_cmpxchg(&vma->vm_ref_count, -1, 1) != -1,
|
||||
vma);
|
||||
}
|
||||
#else /* !CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||
static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
static inline void unlock_vma_ref_count(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||
|
||||
#ifdef CONFIG_HAVE_MOVE_PMD
|
||||
static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
unsigned long new_addr, unsigned long old_end,
|
||||
@ -211,6 +243,14 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
if (WARN_ON(!pmd_none(*new_pmd)))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We hold both exclusive mmap_lock and rmap_lock at this point and
|
||||
* cannot block. If we cannot immediately take exclusive ownership
|
||||
* of the VMA fallback to the move_ptes().
|
||||
*/
|
||||
if (!trylock_vma_ref_count(vma))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We don't have to worry about the ordering of src and dst
|
||||
* ptlocks because exclusive mmap_sem prevents deadlock.
|
||||
@ -233,6 +273,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
spin_unlock(new_ptl);
|
||||
spin_unlock(old_ptl);
|
||||
|
||||
unlock_vma_ref_count(vma);
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user