Merge 5.4.233 into android11-5.4-lts

Changes in 5.4.233
	dma-mapping: add generic helpers for mapping sgtable objects
	scatterlist: add generic wrappers for iterating over sgtable objects
	drm: etnaviv: fix common struct sg_table related issues
	drm/etnaviv: don't truncate physical page address
	wifi: rtl8xxxu: gen2: Turn on the rate control
	powerpc: dts: t208x: Mark MAC1 and MAC2 as 10G
	random: always mix cycle counter in add_latent_entropy()
	KVM: x86: Fail emulation during EMULTYPE_SKIP on any exception
	KVM: VMX: Execute IBPB on emulated VM-exit when guest has IBRS
	can: kvaser_usb: hydra: help gcc-13 to figure out cmd_len
	powerpc: dts: t208x: Disable 10G on MAC1 and MAC2
	alarmtimer: Prevent starvation by small intervals and SIG_IGN
	drm/i915/gvt: fix double free bug in split_2MB_gtt_entry
	mac80211: mesh: embedd mesh_paths and mpp_paths into ieee80211_if_mesh
	uaccess: Add speculation barrier to copy_from_user()
	wifi: mwifiex: Add missing compatible string for SD8787
	ext4: Fix function prototype mismatch for ext4_feat_ktype
	Revert "net/sched: taprio: make qdisc_leaf() see the per-netdev-queue pfifo child qdiscs"
	bpf: add missing header file include
	Linux 5.4.233

Change-Id: I83441dec5bd3c2ad6af19017fe9b467f7ffa2ea5
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-02-25 15:52:40 +00:00
commit 6bb176e034
25 changed files with 410 additions and 144 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 232
SUBLEVEL = 233
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -0,0 +1,44 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* QorIQ FMan v3 10g port #2 device tree stub [ controller @ offset 0x400000 ]
*
* Copyright 2022 Sean Anderson <sean.anderson@seco.com>
* Copyright 2012 - 2015 Freescale Semiconductor Inc.
*/
fman@400000 {
fman0_rx_0x08: port@88000 {
cell-index = <0x8>;
compatible = "fsl,fman-v3-port-rx";
reg = <0x88000 0x1000>;
fsl,fman-10g-port;
};
fman0_tx_0x28: port@a8000 {
cell-index = <0x28>;
compatible = "fsl,fman-v3-port-tx";
reg = <0xa8000 0x1000>;
fsl,fman-10g-port;
};
ethernet@e0000 {
cell-index = <0>;
compatible = "fsl,fman-memac";
reg = <0xe0000 0x1000>;
fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>;
ptp-timer = <&ptp_timer0>;
pcsphy-handle = <&pcsphy0>;
};
mdio@e1000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe1000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy0: ethernet-phy@0 {
reg = <0x0>;
};
};
};

View File

@ -0,0 +1,44 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* QorIQ FMan v3 10g port #3 device tree stub [ controller @ offset 0x400000 ]
*
* Copyright 2022 Sean Anderson <sean.anderson@seco.com>
* Copyright 2012 - 2015 Freescale Semiconductor Inc.
*/
fman@400000 {
fman0_rx_0x09: port@89000 {
cell-index = <0x9>;
compatible = "fsl,fman-v3-port-rx";
reg = <0x89000 0x1000>;
fsl,fman-10g-port;
};
fman0_tx_0x29: port@a9000 {
cell-index = <0x29>;
compatible = "fsl,fman-v3-port-tx";
reg = <0xa9000 0x1000>;
fsl,fman-10g-port;
};
ethernet@e2000 {
cell-index = <1>;
compatible = "fsl,fman-memac";
reg = <0xe2000 0x1000>;
fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>;
ptp-timer = <&ptp_timer0>;
pcsphy-handle = <&pcsphy1>;
};
mdio@e3000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe3000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy1: ethernet-phy@0 {
reg = <0x0>;
};
};
};

View File

@ -609,8 +609,8 @@
/include/ "qoriq-bman1.dtsi"
/include/ "qoriq-fman3-0.dtsi"
/include/ "qoriq-fman3-0-1g-0.dtsi"
/include/ "qoriq-fman3-0-1g-1.dtsi"
/include/ "qoriq-fman3-0-10g-2.dtsi"
/include/ "qoriq-fman3-0-10g-3.dtsi"
/include/ "qoriq-fman3-0-1g-2.dtsi"
/include/ "qoriq-fman3-0-1g-3.dtsi"
/include/ "qoriq-fman3-0-1g-4.dtsi"
@ -659,3 +659,19 @@
interrupts = <16 2 1 9>;
};
};
&fman0_rx_0x08 {
/delete-property/ fsl,fman-10g-port;
};
&fman0_tx_0x28 {
/delete-property/ fsl,fman-10g-port;
};
&fman0_rx_0x09 {
/delete-property/ fsl,fman-10g-port;
};
&fman0_tx_0x29 {
/delete-property/ fsl,fman-10g-port;
};

View File

@ -4118,6 +4118,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
/*
* If IBRS is advertised to the vCPU, KVM must flush the indirect
* branch predictors when transitioning from L2 to L1, as L1 expects
* hardware (KVM in this case) to provide separate predictor modes.
* Bare metal isolates VMX root (host) from VMX non-root (guest), but
* doesn't isolate different VMCSs, i.e. in this case, doesn't provide
* separate modes for L2 vs L1.
*/
if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
indirect_branch_prediction_barrier();
/* Update any VMCS fields that might have changed while L2 ran */
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);

View File

@ -1397,8 +1397,10 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
/*
* No indirect branch prediction barrier needed when switching
* the active VMCS within a guest, e.g. on nested VM-Enter.
* The L1 VMM can protect itself with retpolines, IBPB or IBRS.
* the active VMCS within a vCPU, unless IBRS is advertised to
* the vCPU. To minimize the number of IBPBs executed, KVM
* performs IBPB on nested VM-Exit (a single nested transition
* may switch the active VMCS multiple times).
*/
if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
indirect_branch_prediction_barrier();

View File

@ -6787,7 +6787,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
write_fault_to_spt,
emulation_type))
return 1;
if (ctxt->have_exception) {
if (ctxt->have_exception &&
!(emulation_type & EMULTYPE_SKIP)) {
/*
* #UD should result in just EMULATION_FAILED, and trap-like
* exception should not be encountered during decode.

View File

@ -27,7 +27,7 @@ static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
* because display controller, GPU, etc. are not coherent.
*/
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
}
static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
@ -51,7 +51,7 @@ static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj
* discard those writes.
*/
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
}
/* called with etnaviv_obj->lock held */
@ -403,9 +403,8 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
etnaviv_obj->sgt->nents,
etnaviv_op_to_dma_dir(op));
dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
etnaviv_op_to_dma_dir(op));
etnaviv_obj->last_cpu_prep_op = op;
}
@ -420,8 +419,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
/* fini without a prep is almost certainly a userspace error */
WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
etnaviv_obj->sgt->nents,
dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
etnaviv_obj->last_cpu_prep_op = 0;
}

View File

@ -73,17 +73,17 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
struct sg_table *sgt, unsigned len, int prot)
{ struct scatterlist *sg;
unsigned int da = iova;
unsigned int i, j;
unsigned int i;
int ret;
if (!context || !sgt)
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
u32 pa = sg_dma_address(sg) - sg->offset;
for_each_sgtable_dma_sg(sgt, sg, i) {
phys_addr_t pa = sg_dma_address(sg) - sg->offset;
size_t bytes = sg_dma_len(sg) + sg->offset;
VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
ret = etnaviv_context_map(context, da, pa, bytes, prot);
if (ret)
@ -95,14 +95,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
return 0;
fail:
da = iova;
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes = sg_dma_len(sg) + sg->offset;
etnaviv_context_unmap(context, da, bytes);
da += bytes;
}
etnaviv_context_unmap(context, iova, da - iova);
return ret;
}
@ -113,7 +106,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
unsigned int da = iova;
int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
for_each_sgtable_dma_sg(sgt, sg, i) {
size_t bytes = sg_dma_len(sg) + sg->offset;
etnaviv_context_unmap(context, da, bytes);

View File

@ -1186,10 +1186,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
start_gfn + sub_index, PAGE_SIZE, &dma_addr);
if (ret) {
ppgtt_invalidate_spt(spt);
return ret;
}
if (ret)
goto err;
sub_se.val64 = se->val64;
/* Copy the PAT field from PDE. */
@ -1208,6 +1206,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
ops->set_pfn(se, sub_spt->shadow_page.mfn);
ppgtt_set_shadow_entry(spt, se, index);
return 0;
err:
/* Cancel the existing addess mappings of DMA addr. */
for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
gvt_vdbg_mm("invalidate 4K entry\n");
ppgtt_invalidate_pte(sub_spt, &sub_se);
}
/* Release the new allocated spt. */
trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
ppgtt_free_spt(sub_spt);
return ret;
}
static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,

View File

@ -518,6 +518,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
u8 cmd_no, int channel)
{
struct kvaser_cmd *cmd;
size_t cmd_len;
int err;
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
@ -525,6 +526,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
return -ENOMEM;
cmd->header.cmd_no = cmd_no;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
if (channel < 0) {
kvaser_usb_hydra_set_cmd_dest_he
(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
@ -541,7 +543,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err)
goto end;
@ -557,6 +559,7 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
{
struct kvaser_cmd *cmd;
struct kvaser_usb *dev = priv->dev;
size_t cmd_len;
int err;
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
@ -564,14 +567,14 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
return -ENOMEM;
cmd->header.cmd_no = cmd_no;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
err = kvaser_usb_send_cmd_async(priv, cmd,
kvaser_usb_hydra_cmd_size(cmd));
err = kvaser_usb_send_cmd_async(priv, cmd, cmd_len);
if (err)
kfree(cmd);
@ -715,6 +718,7 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
{
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
struct kvaser_cmd *cmd;
size_t cmd_len;
u32 value = 0;
u32 mask = 0;
u16 cap_cmd_res;
@ -726,13 +730,14 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
return -ENOMEM;
cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he);
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err)
goto end;
@ -1555,6 +1560,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
struct kvaser_usb *dev = priv->dev;
struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv;
struct kvaser_cmd *cmd;
size_t cmd_len;
int err;
if (!hydra)
@ -1565,6 +1571,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
return -ENOMEM;
cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
kvaser_usb_hydra_set_cmd_transid
@ -1574,7 +1581,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
reinit_completion(&priv->get_busparams_comp);
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err)
return err;
@ -1601,6 +1608,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
struct kvaser_cmd *cmd;
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
size_t cmd_len;
int err;
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
@ -1608,6 +1616,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
return -ENOMEM;
cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
memcpy(&cmd->set_busparams_req.busparams_nominal, busparams,
sizeof(cmd->set_busparams_req.busparams_nominal));
@ -1616,7 +1625,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
kfree(cmd);
@ -1629,6 +1638,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
struct kvaser_cmd *cmd;
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
size_t cmd_len;
int err;
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
@ -1636,6 +1646,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
return -ENOMEM;
cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
memcpy(&cmd->set_busparams_req.busparams_data, busparams,
sizeof(cmd->set_busparams_req.busparams_data));
@ -1653,7 +1664,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
kfree(cmd);
@ -1781,6 +1792,7 @@ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
{
struct kvaser_cmd *cmd;
size_t cmd_len;
int err;
u32 flags;
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
@ -1790,6 +1802,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
return -ENOMEM;
cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
cmd->sw_detail_req.use_ext_cmd = 1;
kvaser_usb_hydra_set_cmd_dest_he
(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
@ -1797,7 +1810,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err)
goto end;
@ -1913,6 +1926,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
{
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
size_t cmd_len;
int err;
if ((priv->can.ctrlmode &
@ -1928,6 +1942,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
return -ENOMEM;
cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
kvaser_usb_hydra_set_cmd_transid
@ -1937,7 +1952,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
else
cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL;
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
kfree(cmd);
return err;

View File

@ -58,6 +58,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
};
static const struct of_device_id mwifiex_sdio_of_match_table[] = {
{ .compatible = "marvell,sd8787" },
{ .compatible = "marvell,sd8897" },
{ .compatible = "marvell,sd8997" },
{ }

View File

@ -4372,12 +4372,9 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect)
{
#ifdef RTL8XXXU_GEN2_REPORT_CONNECT
/*
* Barry Day reports this causes issues with 8192eu and 8723bu
* devices reconnecting. The reason for this is unclear, but
* until it is better understood, leave the code in place but
* disabled, so it is not lost.
* The firmware turns on the rate control when it knows it's
* connected to a network.
*/
struct h2c_cmd h2c;
@ -4390,7 +4387,6 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
h2c.media_status_rpt.parm &= ~BIT(0);
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
#endif
}
void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)

View File

@ -391,6 +391,11 @@ static void ext4_sb_release(struct kobject *kobj)
complete(&sbi->s_kobj_unregister);
}
static void ext4_feat_release(struct kobject *kobj)
{
kfree(kobj);
}
static const struct sysfs_ops ext4_attr_ops = {
.show = ext4_attr_show,
.store = ext4_attr_store,
@ -405,7 +410,7 @@ static struct kobj_type ext4_sb_ktype = {
static struct kobj_type ext4_feat_ktype = {
.default_groups = ext4_feat_groups,
.sysfs_ops = &ext4_attr_ops,
.release = (void (*)(struct kobject *))kfree,
.release = ext4_feat_release,
};
static struct kobject *ext4_root;

View File

@ -618,6 +618,86 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
return dma_sync_single_for_device(dev, addr + offset, size, dir);
}
/**
* dma_map_sgtable - Map the given buffer for DMA
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
* @dir: DMA direction
* @attrs: Optional DMA attributes for the map operation
*
* Maps a buffer described by a scatterlist stored in the given sg_table
* object for the @dir DMA operation by the @dev device. After success the
* ownership for the buffer is transferred to the DMA domain. One has to
* call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
* ownership of the buffer back to the CPU domain before touching the
* buffer by the CPU.
*
* Returns 0 on success or -EINVAL on error during mapping the buffer.
*/
static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs)
{
int nents;
nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
if (nents <= 0)
return -EINVAL;
sgt->nents = nents;
return 0;
}
/**
* dma_unmap_sgtable - Unmap the given buffer for DMA
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
* @dir: DMA direction
* @attrs: Optional DMA attributes for the unmap operation
*
* Unmaps a buffer described by a scatterlist stored in the given sg_table
* object for the @dir DMA operation by the @dev device. After this function
* the ownership of the buffer is transferred back to the CPU domain.
*/
static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs)
{
dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
}
/**
* dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
* @dir: DMA direction
*
* Performs the needed cache synchronization and moves the ownership of the
* buffer back to the CPU domain, so it is safe to perform any access to it
* by the CPU. Before doing any further DMA operations, one has to transfer
* the ownership of the buffer back to the DMA domain by calling the
* dma_sync_sgtable_for_device().
*/
static inline void dma_sync_sgtable_for_cpu(struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir)
{
dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
}
/**
* dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
* @dir: DMA direction
*
* Performs the needed cache synchronization and moves the ownership of the
* buffer back to the DMA domain, so it is safe to perform the DMA operation.
* Once finished, one has to call dma_sync_sgtable_for_cpu() or
* dma_unmap_sgtable().
*/
static inline void dma_sync_sgtable_for_device(struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir)
{
dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
}
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)

View File

@ -9,6 +9,10 @@
struct task_struct;
#ifndef barrier_nospec
# define barrier_nospec() do { } while (0)
#endif
/**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
* @index: array element index

View File

@ -19,14 +19,14 @@ void add_input_randomness(unsigned int type, unsigned int code,
void add_interrupt_randomness(int irq) __latent_entropy;
void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
static inline void add_latent_entropy(void)
{
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
}
#else
static inline void add_latent_entropy(void) { }
add_device_randomness(NULL, 0);
#endif
}
void get_random_bytes(void *buf, int len);
size_t __must_check get_random_bytes_arch(void *buf, size_t len);

View File

@ -151,6 +151,20 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
/*
* Loop over each sg element in the given sg_table object.
*/
#define for_each_sgtable_sg(sgt, sg, i) \
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
/*
* Loop over each sg element in the given *DMA mapped* sg_table object.
* Please use sg_dma_address(sg) and sg_dma_len(sg) to extract DMA addresses
* of the each element.
*/
#define for_each_sgtable_dma_sg(sgt, sg, i) \
for_each_sg(sgt->sgl, sg, sgt->nents, i)
/**
* sg_chain - Chain two sglists together
* @prv: First scatterlist
@ -401,9 +415,10 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
* @sglist: sglist to iterate over
* @piter: page iterator to hold current page, sg, sg_pgoffset
* @nents: maximum number of sg entries to iterate over
* @pgoffset: starting page offset
* @pgoffset: starting page offset (in pages)
*
* Callers may use sg_page_iter_page() to get each page pointer.
* In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sg_page(sglist, piter, nents, pgoffset) \
for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
@ -412,18 +427,47 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
/**
* for_each_sg_dma_page - iterate over the pages of the given sg list
* @sglist: sglist to iterate over
* @dma_iter: page iterator to hold current page
* @dma_iter: DMA page iterator to hold current page
* @dma_nents: maximum number of sg entries to iterate over, this is the value
* returned from dma_map_sg
* @pgoffset: starting page offset
* @pgoffset: starting page offset (in pages)
*
* Callers may use sg_page_iter_dma_address() to get each page's DMA address.
* In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \
for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \
pgoffset); \
__sg_page_iter_dma_next(dma_iter);)
/**
* for_each_sgtable_page - iterate over all pages in the sg_table object
* @sgt: sg_table object to iterate over
* @piter: page iterator to hold current page
* @pgoffset: starting page offset (in pages)
*
* Iterates over the all memory pages in the buffer described by
* a scatterlist stored in the given sg_table object.
* See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sgtable_page(sgt, piter, pgoffset) \
for_each_sg_page(sgt->sgl, piter, sgt->orig_nents, pgoffset)
/**
* for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
* @sgt: sg_table object to iterate over
* @dma_iter: DMA page iterator to hold current page
* @pgoffset: starting page offset (in pages)
*
* Iterates over the all DMA mapped pages in the buffer described by
* a scatterlist stored in the given sg_table object.
* See also for_each_sg_dma_page(). In each loop it operates on PAGE_SIZE
* unit.
*/
#define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \
for_each_sg_dma_page(sgt->sgl, dma_iter, sgt->nents, pgoffset)
/*
* Mapping sg iterator
*

View File

@ -30,6 +30,7 @@
#include <linux/kallsyms.h>
#include <linux/rcupdate.h>
#include <linux/perf_event.h>
#include <linux/nospec.h>
#include <asm/barrier.h>
#include <asm/unaligned.h>
@ -1576,9 +1577,7 @@ out:
* reuse preexisting logic from Spectre v1 mitigation that
* happens to produce the required code on x86 for v4 as well.
*/
#ifdef CONFIG_X86
barrier_nospec();
#endif
CONT;
#define LDST(SIZEOP, SIZE) \
STX_MEM_##SIZEOP: \

View File

@ -479,11 +479,35 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
}
EXPORT_SYMBOL_GPL(alarm_forward);
u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
static u64 __alarm_forward_now(struct alarm *alarm, ktime_t interval, bool throttle)
{
struct alarm_base *base = &alarm_bases[alarm->type];
ktime_t now = base->gettime();
return alarm_forward(alarm, base->gettime(), interval);
if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && throttle) {
/*
* Same issue as with posix_timer_fn(). Timers which are
* periodic but the signal is ignored can starve the system
* with a very small interval. The real fix which was
* promised in the context of posix_timer_fn() never
* materialized, but someone should really work on it.
*
* To prevent DOS fake @now to be 1 jiffie out which keeps
* the overrun accounting correct but creates an
* inconsistency vs. timer_gettime(2).
*/
ktime_t kj = NSEC_PER_SEC / HZ;
if (interval < kj)
now = ktime_add(now, kj);
}
return alarm_forward(alarm, now, interval);
}
u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
{
return __alarm_forward_now(alarm, interval, false);
}
EXPORT_SYMBOL_GPL(alarm_forward_now);
@ -557,9 +581,10 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
if (posix_timer_event(ptr, si_private) && ptr->it_interval) {
/*
* Handle ignored signals and rearm the timer. This will go
* away once we handle ignored signals proper.
* away once we handle ignored signals proper. Ensure that
* small intervals cannot starve the system.
*/
ptr->it_overrun += alarm_forward_now(alarm, ptr->it_interval);
ptr->it_overrun += __alarm_forward_now(alarm, ptr->it_interval, true);
++ptr->it_requeue_pending;
ptr->it_active = 1;
result = ALARMTIMER_RESTART;

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/nospec.h>
/* out-of-line parts */
@ -10,6 +11,12 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
unsigned long res = n;
might_fault();
if (likely(access_ok(from, n))) {
/*
* Ensure that bad access_ok() speculation will not
* lead to nasty side effects *after* the copy is
* finished:
*/
barrier_nospec();
kasan_check_write(to, n);
res = raw_copy_from_user(to, from, n);
}

View File

@ -632,6 +632,26 @@ struct mesh_csa_settings {
struct cfg80211_csa_settings settings;
};
/**
* struct mesh_table
*
* @known_gates: list of known mesh gates and their mpaths by the station. The
* gate's mpath may or may not be resolved and active.
* @gates_lock: protects updates to known_gates
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
* @walk_head: linked list containing all mesh_path objects
* @walk_lock: lock protecting walk_head
* @entries: number of entries in the table
*/
struct mesh_table {
struct hlist_head known_gates;
spinlock_t gates_lock;
struct rhashtable rhead;
struct hlist_head walk_head;
spinlock_t walk_lock;
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
};
struct ieee80211_if_mesh {
struct timer_list housekeeping_timer;
struct timer_list mesh_path_timer;
@ -706,8 +726,8 @@ struct ieee80211_if_mesh {
/* offset from skb->data while building IE */
int meshconf_offset;
struct mesh_table *mesh_paths;
struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
struct mesh_table mesh_paths;
struct mesh_table mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation;
int mpp_paths_generation;
};

View File

@ -127,26 +127,6 @@ struct mesh_path {
u32 path_change_count;
};
/**
* struct mesh_table
*
* @known_gates: list of known mesh gates and their mpaths by the station. The
* gate's mpath may or may not be resolved and active.
* @gates_lock: protects updates to known_gates
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
* @walk_head: linked list containging all mesh_path objects
* @walk_lock: lock protecting walk_head
* @entries: number of entries in the table
*/
struct mesh_table {
struct hlist_head known_gates;
spinlock_t gates_lock;
struct rhashtable rhead;
struct hlist_head walk_head;
spinlock_t walk_lock;
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
};
/* Recent multicast cache */
/* RMC_BUCKETS must be a power of 2, maximum 256 */
#define RMC_BUCKETS 256
@ -308,7 +288,7 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
void mesh_path_flush_pending(struct mesh_path *mpath);
void mesh_path_tx_pending(struct mesh_path *mpath);
int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
void mesh_path_timer(struct timer_list *t);

View File

@ -47,32 +47,24 @@ static void mesh_path_rht_free(void *ptr, void *tblptr)
mesh_path_free_rcu(tbl, mpath);
}
static struct mesh_table *mesh_table_alloc(void)
static void mesh_table_init(struct mesh_table *tbl)
{
struct mesh_table *newtbl;
INIT_HLIST_HEAD(&tbl->known_gates);
INIT_HLIST_HEAD(&tbl->walk_head);
atomic_set(&tbl->entries, 0);
spin_lock_init(&tbl->gates_lock);
spin_lock_init(&tbl->walk_lock);
newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
if (!newtbl)
return NULL;
INIT_HLIST_HEAD(&newtbl->known_gates);
INIT_HLIST_HEAD(&newtbl->walk_head);
atomic_set(&newtbl->entries, 0);
spin_lock_init(&newtbl->gates_lock);
spin_lock_init(&newtbl->walk_lock);
if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
kfree(newtbl);
return NULL;
}
return newtbl;
/* rhashtable_init() may fail only in case of wrong
* mesh_rht_params
*/
WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params));
}
static void mesh_table_free(struct mesh_table *tbl)
{
rhashtable_free_and_destroy(&tbl->rhead,
mesh_path_rht_free, tbl);
kfree(tbl);
}
/**
@ -240,13 +232,13 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
struct mesh_path *
mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{
return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata);
}
struct mesh_path *
mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{
return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata);
}
static struct mesh_path *
@ -283,7 +275,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
struct mesh_path *
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx);
}
/**
@ -298,7 +290,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
struct mesh_path *
mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx);
}
/**
@ -311,7 +303,7 @@ int mesh_path_add_gate(struct mesh_path *mpath)
int err;
rcu_read_lock();
tbl = mpath->sdata->u.mesh.mesh_paths;
tbl = &mpath->sdata->u.mesh.mesh_paths;
spin_lock_bh(&mpath->state_lock);
if (mpath->is_gate) {
@ -420,7 +412,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
if (!new_mpath)
return ERR_PTR(-ENOMEM);
tbl = sdata->u.mesh.mesh_paths;
tbl = &sdata->u.mesh.mesh_paths;
spin_lock_bh(&tbl->walk_lock);
mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
&new_mpath->rhash,
@ -462,7 +454,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
return -ENOMEM;
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
tbl = sdata->u.mesh.mpp_paths;
tbl = &sdata->u.mesh.mpp_paths;
spin_lock_bh(&tbl->walk_lock);
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
@ -491,7 +483,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
void mesh_plink_broken(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
@ -550,7 +542,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
struct mesh_path *mpath;
struct hlist_node *n;
@ -565,7 +557,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
const u8 *proxy)
{
struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
struct mesh_table *tbl = &sdata->u.mesh.mpp_paths;
struct mesh_path *mpath;
struct hlist_node *n;
@ -599,8 +591,8 @@ static void table_flush_by_iface(struct mesh_table *tbl)
*/
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
{
table_flush_by_iface(sdata->u.mesh.mesh_paths);
table_flush_by_iface(sdata->u.mesh.mpp_paths);
table_flush_by_iface(&sdata->u.mesh.mesh_paths);
table_flush_by_iface(&sdata->u.mesh.mpp_paths);
}
/**
@ -646,7 +638,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
/* flush relevant mpp entries first */
mpp_flush_by_proxy(sdata, addr);
err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr);
sdata->u.mesh.mesh_paths_generation++;
return err;
}
@ -684,7 +676,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
struct mesh_path *gate;
bool copy = false;
tbl = sdata->u.mesh.mesh_paths;
tbl = &sdata->u.mesh.mesh_paths;
rcu_read_lock();
hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
@ -764,29 +756,10 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
mesh_path_tx_pending(mpath);
}
int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *tbl_path, *tbl_mpp;
int ret;
tbl_path = mesh_table_alloc();
if (!tbl_path)
return -ENOMEM;
tbl_mpp = mesh_table_alloc();
if (!tbl_mpp) {
ret = -ENOMEM;
goto free_path;
}
sdata->u.mesh.mesh_paths = tbl_path;
sdata->u.mesh.mpp_paths = tbl_mpp;
return 0;
free_path:
mesh_table_free(tbl_path);
return ret;
mesh_table_init(&sdata->u.mesh.mesh_paths);
mesh_table_init(&sdata->u.mesh.mpp_paths);
}
static
@ -808,12 +781,12 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
{
mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths);
mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths);
}
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
{
mesh_table_free(sdata->u.mesh.mesh_paths);
mesh_table_free(sdata->u.mesh.mpp_paths);
mesh_table_free(&sdata->u.mesh.mesh_paths);
mesh_table_free(&sdata->u.mesh.mpp_paths);
}

View File

@ -1908,14 +1908,12 @@ start_error:
static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
unsigned int ntx = cl - 1;
struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
if (ntx >= dev->num_tx_queues)
if (!dev_queue)
return NULL;
return q->qdiscs[ntx];
return dev_queue->qdisc_sleeping;
}
static unsigned long taprio_find(struct Qdisc *sch, u32 classid)