This is the 5.4.11 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl4bAaMACgkQONu9yGCS
 aT6WThAApG5Lt+rOIIbb0JsTgiqzRs/5VkxQLDsDkn8QXMDDX44eY+cW3XLA+3nv
 UAU7wXraFCq7SznsQADHj4edAQN/urNXcIlLCIfoWCuq4Yk6DIgHNDZcC5a1PPiz
 ri96mrxHq24XDbRcXZFN4usC6Q1Q40W/N2NyZ7gIfCsYOeiaZzNhvw1Sh6Pkajb+
 jKe9Yzjolj2XJxrNgJxfsTJLnCEPwoQ/QoBIp1ffHqhhCjR/7tHm611Pj0q260Fj
 H6OGZaRNMoc4I+2dQXsYUfyPH5aMwx2/Nym4FNHye9LaoQl07m+uR0LqmytPQ0GL
 j6mQuMv+kdeVOOXO+zRJH8A2yq4mwvr80s15myhG9HvAzmcGAvagsCl19yy9/fJx
 6M2Sn8qDwJXRaxTc1e7figXkTZu5+sX7th3sUk0KbCHZ+UkJiCjXpJDgBK/HQkC3
 EsVFZGeIBySbWk2yYKzQkb4ZA32qbzUKW88Rjago3BOV96WHfnAhJDQPssDJqlcs
 cgK+UTQOJb9U1V+Kd4Z8uhlCeboaRj4yOFt2EGxkK2sqJse05eaTN0GPbP/3X6Be
 TyD17Cnv18Ltk2qf2DXanJSlrCUcHEfEDoQQTqJATxV4NLzTcwmVAscsv1aRmcot
 ii1ZTwqi04MLgaNla+6tqqZ/VufUtWVIbN73q2UdU8zZh5PHJ18=
 =cKx6
 -----END PGP SIGNATURE-----

Merge 5.4.11 into android-5.4

Changes in 5.4.11
	USB: dummy-hcd: use usb_urb_dir_in instead of usb_pipein
	bpf: Fix passing modified ctx to ld/abs/ind instruction
	ASoC: rt5682: fix i2c arbitration lost issue
	spi: pxa2xx: Add support for Intel Jasper Lake
	regulator: fix use after free issue
	ASoC: max98090: fix possible race conditions
	spi: fsl: Fix GPIO descriptor support
	gpio: Handle counting of Freescale chipselects
	spi: fsl: Handle the single hardwired chipselect case
	locking/spinlock/debug: Fix various data races
	netfilter: ctnetlink: netns exit must wait for callbacks
	x86/intel: Disable HPET on Intel Ice Lake platforms
	netfilter: nf_tables_offload: Check for the NETDEV_UNREGISTER event
	mwifiex: Fix heap overflow in mmwifiex_process_tdls_action_frame()
	libtraceevent: Fix lib installation with O=
	libtraceevent: Copy pkg-config file to output folder when using O=
	regulator: core: fix regulator_register() error paths to properly release rdev
	x86/efi: Update e820 with reserved EFI boot services data to fix kexec breakage
	ASoC: Intel: bytcr_rt5640: Update quirk for Teclast X89
	selftests: netfilter: use randomized netns names
	efi/gop: Return EFI_NOT_FOUND if there are no usable GOPs
	efi/gop: Return EFI_SUCCESS if a usable GOP was found
	efi/gop: Fix memory leak in __gop_query32/64()
	efi/earlycon: Remap entire framebuffer after page initialization
	ARM: dts: imx6ul: imx6ul-14x14-evk.dtsi: Fix SPI NOR probing
	ARM: vexpress: Set-up shared OPP table instead of individual for each CPU
	netfilter: uapi: Avoid undefined left-shift in xt_sctp.h
	netfilter: nft_set_rbtree: bogus lookup/get on consecutive elements in named sets
	netfilter: nf_tables: validate NFT_SET_ELEM_INTERVAL_END
	netfilter: nf_tables: validate NFT_DATA_VALUE after nft_data_init()
	netfilter: nf_tables: skip module reference count bump on object updates
	netfilter: nf_tables_offload: return EOPNOTSUPP if rule specifies no actions
	ARM: dts: BCM5301X: Fix MDIO node address/size cells
	selftests/ftrace: Fix to check the existence of set_ftrace_filter
	selftests/ftrace: Fix ftrace test cases to check unsupported
	selftests/ftrace: Do not to use absolute debugfs path
	selftests/ftrace: Fix multiple kprobe testcase
	selftests: safesetid: Move link library to LDLIBS
	selftests: safesetid: Check the return value of setuid/setgid
	selftests: safesetid: Fix Makefile to set correct test program
	ARM: exynos_defconfig: Restore debugfs support
	ARM: dts: Cygnus: Fix MDIO node address/size cells
	spi: spi-cavium-thunderx: Add missing pci_release_regions()
	reset: Do not register resource data for missing resets
	ASoC: topology: Check return value for snd_soc_add_dai_link()
	ASoC: topology: Check return value for soc_tplg_pcm_create()
	ASoC: SOF: loader: snd_sof_fw_parse_ext_data log warning on unknown header
	ASoC: SOF: Intel: split cht and byt debug window sizes
	ARM: dts: am335x-sancloud-bbe: fix phy mode
	ARM: omap2plus_defconfig: Add back DEBUG_FS
	ARM: dts: bcm283x: Fix critical trip point
	arm64: dts: ls1028a: fix typo in TMU calibration data
	bpf, riscv: Limit to 33 tail calls
	bpf, mips: Limit to 33 tail calls
	bpftool: Don't crash on missing jited insns or ksyms
	perf metricgroup: Fix printing event names of metric group with multiple events
	perf header: Fix false warning when there are no duplicate cache entries
	spi: spi-ti-qspi: Fix a bug when accessing non default CS
	ARM: dts: am437x-gp/epos-evm: fix panel compatible
	kselftest/runner: Print new line in print of timeout log
	kselftest: Support old perl versions
	samples: bpf: Replace symbol compare of trace_event
	samples: bpf: fix syscall_tp due to unused syscall
	arm64: dts: ls1028a: fix reboot node
	ARM: imx_v6_v7_defconfig: Explicitly restore CONFIG_DEBUG_FS
	pinctrl: aspeed-g6: Fix LPC/eSPI mux configuration
	bus: ti-sysc: Fix missing reset delay handling
	clk: walk orphan list on clock provider registration
	mac80211: fix TID field in monitor mode transmit
	cfg80211: fix double-free after changing network namespace
	pinctrl: pinmux: fix a possible null pointer in pinmux_can_be_used_for_gpio
	powerpc: Ensure that swiotlb buffer is allocated from low memory
	btrfs: Fix error messages in qgroup_rescan_init
	Btrfs: fix cloning range with a hole when using the NO_HOLES feature
	powerpc/vcpu: Assume dedicated processors as non-preempt
	powerpc/spinlocks: Include correct header for static key
	btrfs: handle error in btrfs_cache_block_group
	Btrfs: fix hole extent items with a zero size after range cloning
	ocxl: Fix potential memory leak on context creation
	bpf: Clear skb->tstamp in bpf_redirect when necessary
	habanalabs: rate limit error msg on waiting for CS
	habanalabs: remove variable 'val' set but not used
	bnx2x: Do not handle requests from VFs after parity
	bnx2x: Fix logic to get total no. of PFs per engine
	cxgb4: Fix kernel panic while accessing sge_info
	net: usb: lan78xx: Fix error message format specifier
	parisc: fix compilation when KEXEC=n and KEXEC_FILE=y
	parisc: add missing __init annotation
	rfkill: Fix incorrect check to avoid NULL pointer dereference
	ASoC: wm8962: fix lambda value
	regulator: rn5t618: fix module aliases
	spi: nxp-fspi: Ensure width is respected in spi-mem operations
	clk: at91: fix possible deadlock
	staging: axis-fifo: add unspecified HAS_IOMEM dependency
	iommu/iova: Init the struct iova to fix the possible memleak
	kconfig: don't crash on NULL expressions in expr_eq()
	scripts: package: mkdebian: add missing rsync dependency
	perf/x86: Fix potential out-of-bounds access
	perf/x86/intel: Fix PT PMI handling
	sched/psi: Fix sampling error and rare div0 crashes with cgroups and high uptime
	psi: Fix a division error in psi poll()
	usb: typec: fusb302: Fix an undefined reference to 'extcon_get_state'
	block: end bio with BLK_STS_AGAIN in case of non-mq devs and REQ_NOWAIT
	fs: avoid softlockups in s_inodes iterators
	fs: call fsnotify_sb_delete after evict_inodes
	perf/smmuv3: Remove the leftover put_cpu() in error path
	iommu/dma: Relax locking in iommu_dma_prepare_msi()
	io_uring: don't wait when under-submitting
	clk: Move clk_core_reparent_orphans() under CONFIG_OF
	net: stmmac: selftests: Needs to check the number of Multicast regs
	net: stmmac: Determine earlier the size of RX buffer
	net: stmmac: Do not accept invalid MTU values
	net: stmmac: xgmac: Clear previous RX buffer size
	net: stmmac: RX buffer size must be 16 byte aligned
	net: stmmac: Always arm TX Timer at end of transmission start
	s390/purgatory: do not build purgatory with kcov, kasan and friends
	drm/exynos: gsc: add missed component_del
	tpm/tpm_ftpm_tee: add shutdown call back
	xsk: Add rcu_read_lock around the XSK wakeup
	net/mlx5e: Fix concurrency issues between config flow and XSK
	net/i40e: Fix concurrency issues between config flow and XSK
	net/ixgbe: Fix concurrency issues between config flow and XSK
	platform/x86: pcengines-apuv2: fix simswap GPIO assignment
	arm64: cpu_errata: Add Hisilicon TSV110 to spectre-v2 safe list
	block: Fix a lockdep complaint triggered by request queue flushing
	s390/dasd/cio: Interpret ccw_device_get_mdc return value correctly
	s390/dasd: fix memleak in path handling error case
	block: fix memleak when __blk_rq_map_user_iov() is failed
	parisc: Fix compiler warnings in debug_core.c
	sbitmap: only queue kyber's wait callback if not already active
	s390/qeth: handle error due to unsupported transport mode
	s390/qeth: fix promiscuous mode after reset
	s390/qeth: don't return -ENOTSUPP to userspace
	llc2: Fix return statement of llc_stat_ev_rx_null_dsap_xid_c (and _test_c)
	hv_netvsc: Fix unwanted rx_table reset
	selftests: pmtu: fix init mtu value in description
	tracing: Do not create directories if lockdown is in affect
	gtp: fix bad unlock balance in gtp_encap_enable_socket
	macvlan: do not assume mac_header is set in macvlan_broadcast()
	net: dsa: mv88e6xxx: Preserve priority when setting CPU port.
	net: freescale: fec: Fix ethtool -d runtime PM
	net: stmmac: dwmac-sun8i: Allow all RGMII modes
	net: stmmac: dwmac-sunxi: Allow all RGMII modes
	net: stmmac: Fixed link does not need MDIO Bus
	net: usb: lan78xx: fix possible skb leak
	pkt_sched: fq: do not accept silly TCA_FQ_QUANTUM
	sch_cake: avoid possible divide by zero in cake_enqueue()
	sctp: free cmd->obj.chunk for the unprocessed SCTP_CMD_REPLY
	tcp: fix "old stuff" D-SACK causing SACK to be treated as D-SACK
	vxlan: fix tos value before xmit
	mlxsw: spectrum_qdisc: Ignore grafting of invisible FIFO
	net: sch_prio: When ungrafting, replace with FIFO
	vlan: fix memory leak in vlan_dev_set_egress_priority
	vlan: vlan_changelink() should propagate errors
	macb: Don't unregister clks unconditionally
	net/mlx5: Move devlink registration before interfaces load
	net: dsa: mv88e6xxx: force cmode write on 6141/6341
	net/mlx5e: Always print health reporter message to dmesg
	net/mlx5: DR, No need for atomic refcount for internal SW steering resources
	net/mlx5e: Fix hairpin RSS table size
	net/mlx5: DR, Init lists that are used in rule's member
	usb: dwc3: gadget: Fix request complete check
	USB: core: fix check for duplicate endpoints
	USB: serial: option: add Telit ME910G1 0x110a composition
	usb: missing parentheses in USE_NEW_SCHEME
	Linux 5.4.11

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Idb9985bebc97203fa305f881fd98a62ac08e66d9
This commit is contained in:
Greg Kroah-Hartman 2020-01-12 15:36:52 +01:00
commit fde6e0c654
190 changed files with 1236 additions and 685 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 10
SUBLEVEL = 11
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -108,7 +108,7 @@
&cpsw_emac0 {
phy-handle = <&ethphy0>;
phy-mode = "rgmii-txid";
phy-mode = "rgmii-id";
};
&i2c0 {

View File

@ -86,7 +86,7 @@
};
lcd0: display {
compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
label = "lcd";
backlight = <&lcd_bl>;

View File

@ -42,7 +42,7 @@
};
lcd0: display {
compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
label = "lcd";
backlight = <&lcd_bl>;

View File

@ -174,8 +174,8 @@
mdio: mdio@18002000 {
compatible = "brcm,iproc-mdio";
reg = <0x18002000 0x8>;
#size-cells = <1>;
#address-cells = <0>;
#size-cells = <0>;
#address-cells = <1>;
status = "disabled";
gphy0: ethernet-phy@0 {

View File

@ -40,7 +40,7 @@
trips {
cpu-crit {
temperature = <80000>;
temperature = <90000>;
hysteresis = <0>;
type = "critical";
};

View File

@ -353,8 +353,8 @@
mdio: mdio@18003000 {
compatible = "brcm,iproc-mdio";
reg = <0x18003000 0x8>;
#size-cells = <1>;
#address-cells = <0>;
#size-cells = <0>;
#address-cells = <1>;
};
mdio-bus-mux@18003000 {

View File

@ -215,7 +215,7 @@
flash0: n25q256a@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "micron,n25q256a";
compatible = "micron,n25q256a", "jedec,spi-nor";
spi-max-frequency = <29000000>;
spi-rx-bus-width = <4>;
spi-tx-bus-width = <4>;

View File

@ -348,6 +348,7 @@ CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_SOFTLOCKUP_DETECTOR=y
# CONFIG_DETECT_HUNG_TASK is not set

View File

@ -460,6 +460,7 @@ CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_PROVE_LOCKING=y
# CONFIG_DEBUG_BUGVERBOSE is not set

View File

@ -552,5 +552,6 @@ CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_SPLIT=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_BUGVERBOSE is not set

View File

@ -551,8 +551,9 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
static int __init ve_spc_clk_init(void)
{
int cpu;
int cpu, cluster;
struct clk *clk;
bool init_opp_table[MAX_CLUSTERS] = { false };
if (!info)
return 0; /* Continue only if SPC is initialised */
@ -578,8 +579,17 @@ static int __init ve_spc_clk_init(void)
continue;
}
cluster = topology_physical_package_id(cpu_dev->id);
if (init_opp_table[cluster])
continue;
if (ve_init_opp_table(cpu_dev))
pr_warn("failed to initialise cpu%d opp table\n", cpu);
else if (dev_pm_opp_set_sharing_cpus(cpu_dev,
topology_core_cpumask(cpu_dev->id)))
pr_warn("failed to mark OPPs shared for cpu%d\n", cpu);
else
init_opp_table[cluster] = true;
}
platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);

View File

@ -102,7 +102,7 @@
reboot {
compatible ="syscon-reboot";
regmap = <&dcfg>;
regmap = <&rst>;
offset = <0xb0>;
mask = <0x02>;
};
@ -161,6 +161,12 @@
big-endian;
};
rst: syscon@1e60000 {
compatible = "syscon";
reg = <0x0 0x1e60000 0x0 0x10000>;
little-endian;
};
scfg: syscon@1fc0000 {
compatible = "fsl,ls1028a-scfg", "syscon";
reg = <0x0 0x1fc0000 0x0 0x10000>;
@ -567,7 +573,7 @@
0x00010004 0x0000003d
0x00010005 0x00000045
0x00010006 0x0000004d
0x00010007 0x00000045
0x00010007 0x00000055
0x00010008 0x0000005e
0x00010009 0x00000066
0x0001000a 0x0000006e

View File

@ -575,6 +575,7 @@ static const struct midr_range spectre_v2_safe_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
{ /* sentinel */ }
};

View File

@ -604,6 +604,7 @@ static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
{
int off, b_off;
int tcc_reg;
ctx->flags |= EBPF_SEEN_TC;
/*
@ -616,14 +617,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
b_off = b_imm(this_idx + 1, ctx);
emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
/*
* if (--TCC < 0)
* if (TCC-- < 0)
* goto out;
*/
/* Delay slot */
emit_instr(ctx, daddiu, MIPS_R_T5,
(ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4;
emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1);
b_off = b_imm(this_idx + 1, ctx);
emit_instr(ctx, bltz, MIPS_R_T5, b_off);
emit_instr(ctx, bltz, tcc_reg, b_off);
/*
* prog = array->ptrs[index];
* if (prog == NULL)

View File

@ -44,8 +44,14 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size)
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
#define xchg(ptr, x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
#define xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
__ret = (__typeof__(*(ptr))) \
__xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \
__ret; \
})
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);

View File

@ -2,8 +2,6 @@
#ifndef _ASM_PARISC_KEXEC_H
#define _ASM_PARISC_KEXEC_H
#ifdef CONFIG_KEXEC
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
@ -32,6 +30,4 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_KEXEC */
#endif /* _ASM_PARISC_KEXEC_H */

View File

@ -37,5 +37,5 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KEXEC) += kexec.o relocate_kernel.o
obj-$(CONFIG_KEXEC_CORE) += kexec.o relocate_kernel.o
obj-$(CONFIG_KEXEC_FILE) += kexec_file.o

View File

@ -810,7 +810,7 @@ EXPORT_SYMBOL(device_to_hwpath);
static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
struct device *parent);
static void walk_lower_bus(struct parisc_device *dev)
static void __init walk_lower_bus(struct parisc_device *dev)
{
unsigned long io_io_low, io_io_high;

View File

@ -15,6 +15,7 @@
*
* (the type definitions are in asm/spinlock_types.h)
*/
#include <linux/jump_label.h>
#include <linux/irqflags.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
@ -36,10 +37,12 @@
#endif
#ifdef CONFIG_PPC_PSERIES
DECLARE_STATIC_KEY_FALSE(shared_processor);
#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
if (!static_branch_unlikely(&shared_processor))
return false;
return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
}

View File

@ -282,6 +282,14 @@ void __init mem_init(void)
BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
#ifdef CONFIG_SWIOTLB
/*
* Some platforms (e.g. 85xx) limit DMA-able memory way below
* 4G. We force memblock to bottom-up mode to ensure that the
* memory allocated in swiotlb_init() is DMA-able.
* As it's the last memblock allocation, no need to reset it
* back to to-down.
*/
memblock_set_bottom_up(true);
swiotlb_init(0);
#endif

View File

@ -74,6 +74,9 @@
#include "pseries.h"
#include "../../../../drivers/pci/pci.h"
DEFINE_STATIC_KEY_FALSE(shared_processor);
EXPORT_SYMBOL_GPL(shared_processor);
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
@ -758,6 +761,10 @@ static void __init pSeries_setup_arch(void)
if (firmware_has_feature(FW_FEATURE_LPAR)) {
vpa_init(boot_cpuid);
if (lppaca_shared_proc(get_lppaca()))
static_branch_enable(&shared_processor);
ppc_md.power_save = pseries_lpar_idle;
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
#ifdef CONFIG_PCI_IOV

View File

@ -631,14 +631,14 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
return -1;
emit(rv_bgeu(RV_REG_A2, RV_REG_T1, off >> 1), ctx);
/* if (--TCC < 0)
/* if (TCC-- < 0)
* goto out;
*/
emit(rv_addi(RV_REG_T1, tcc, -1), ctx);
off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
if (is_13b_check(off, insn))
return -1;
emit(rv_blt(RV_REG_T1, RV_REG_ZERO, off >> 1), ctx);
emit(rv_blt(tcc, RV_REG_ZERO, off >> 1), ctx);
/* prog = array->ptrs[index];
* if (!prog)

View File

@ -15,8 +15,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS
$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
$(call if_changed_rule,as_o_S)
$(obj)/string.o: $(srctree)/arch/s390/lib/string.c FORCE
$(call if_changed_rule,cc_o_c)
KCOV_INSTRUMENT := n
GCOV_PROFILE := n
UBSAN_SANITIZE := n
KASAN_SANITIZE := n
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare

View File

@ -0,0 +1,3 @@
// SPDX-License-Identifier: GPL-2.0
#define __HAVE_ARCH_MEMCMP /* arch function */
#include "../lib/string.c"

View File

@ -375,7 +375,7 @@ int x86_add_exclusive(unsigned int what)
* LBR and BTS are still mutually exclusive.
*/
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return 0;
goto out;
if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
mutex_lock(&pmc_reserve_mutex);
@ -387,6 +387,7 @@ int x86_add_exclusive(unsigned int what)
mutex_unlock(&pmc_reserve_mutex);
}
out:
atomic_inc(&active_events);
return 0;
@ -397,11 +398,15 @@ fail_unlock:
void x86_del_exclusive(unsigned int what)
{
atomic_dec(&active_events);
/*
* See the comment in x86_add_exclusive().
*/
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return;
atomic_dec(&x86_pmu.lbr_exclusive[what]);
atomic_dec(&active_events);
}
int x86_setup_perfctr(struct perf_event *event)
@ -1641,9 +1646,12 @@ static struct attribute_group x86_pmu_format_group __ro_after_init = {
ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr = \
struct perf_pmu_events_attr *pmu_attr =
container_of(attr, struct perf_pmu_events_attr, attr);
u64 config = x86_pmu.event_map(pmu_attr->id);
u64 config = 0;
if (pmu_attr->id < x86_pmu.max_events)
config = x86_pmu.event_map(pmu_attr->id);
/* string trumps id */
if (pmu_attr->event_str)
@ -1712,6 +1720,9 @@ is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
struct perf_pmu_events_attr *pmu_attr;
if (idx >= x86_pmu.max_events)
return 0;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
/* str trumps id */
return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0;

View File

@ -714,6 +714,8 @@ static struct chipset early_qrk[] __initdata = {
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_INTEL, 0x3ec4,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_INTEL, 0x8a12,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_BROADCOM, 0x4331,
PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
{}

View File

@ -260,10 +260,6 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
return;
}
/* No need to reserve regions that will never be freed. */
if (md.attribute & EFI_MEMORY_RUNTIME)
return;
size += addr % EFI_PAGE_SIZE;
size = round_up(size, EFI_PAGE_SIZE);
addr = round_down(addr, EFI_PAGE_SIZE);
@ -293,6 +289,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
early_memunmap(new, new_size);
efi_memmap_install(new_phys, num_entries);
e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
e820__update_table(e820_table);
}
/*

View File

@ -887,11 +887,14 @@ generic_make_request_checks(struct bio *bio)
}
/*
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue is not a request based queue.
* Non-mq queues do not honor REQ_NOWAIT, so complete a bio
* with BLK_STS_AGAIN status in order to catch -EAGAIN and
* to give a chance to the caller to repeat request gracefully.
*/
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
goto not_supported;
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) {
status = BLK_STS_AGAIN;
goto end_io;
}
if (should_fail_bio(bio))
goto end_io;

View File

@ -69,6 +69,7 @@
#include <linux/blkdev.h>
#include <linux/gfp.h>
#include <linux/blk-mq.h>
#include <linux/lockdep.h>
#include "blk.h"
#include "blk-mq.h"
@ -492,6 +493,9 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
INIT_LIST_HEAD(&fq->flush_queue[1]);
INIT_LIST_HEAD(&fq->flush_data_in_flight);
lockdep_register_key(&fq->key);
lockdep_set_class(&fq->mq_flush_lock, &fq->key);
return fq;
fail_rq:
@ -506,6 +510,7 @@ void blk_free_flush_queue(struct blk_flush_queue *fq)
if (!fq)
return;
lockdep_unregister_key(&fq->key);
kfree(fq->flush_rq);
kfree(fq);
}

View File

@ -151,7 +151,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return 0;
unmap_rq:
__blk_rq_unmap_user(bio);
blk_rq_unmap_user(bio);
fail:
rq->bio = NULL;
return ret;

View File

@ -30,6 +30,7 @@ struct blk_flush_queue {
* at the same time
*/
struct request *orig_rq;
struct lock_class_key key;
spinlock_t mq_flush_lock;
};

View File

@ -1594,6 +1594,10 @@ static int sysc_reset(struct sysc *ddata)
sysc_val |= sysc_mask;
sysc_write(ddata, sysc_offset, sysc_val);
if (ddata->cfg.srst_udelay)
usleep_range(ddata->cfg.srst_udelay,
ddata->cfg.srst_udelay * 2);
if (ddata->clk_enable_quirk)
ddata->clk_enable_quirk(ddata);

View File

@ -32,7 +32,7 @@ static const uuid_t ftpm_ta_uuid =
0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96);
/**
* ftpm_tee_tpm_op_recv - retrieve fTPM response.
* ftpm_tee_tpm_op_recv() - retrieve fTPM response.
* @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h.
* @buf: the buffer to store data.
* @count: the number of bytes to read.
@ -61,7 +61,7 @@ static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
/**
* ftpm_tee_tpm_op_send - send TPM commands through the TEE shared memory.
* ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory.
* @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h
* @buf: the buffer to send.
* @len: the number of bytes to send.
@ -208,7 +208,7 @@ static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data)
}
/**
* ftpm_tee_probe - initialize the fTPM
* ftpm_tee_probe() - initialize the fTPM
* @pdev: the platform_device description.
*
* Return:
@ -298,7 +298,7 @@ out_tee_session:
}
/**
* ftpm_tee_remove - remove the TPM device
* ftpm_tee_remove() - remove the TPM device
* @pdev: the platform_device description.
*
* Return:
@ -328,6 +328,19 @@ static int ftpm_tee_remove(struct platform_device *pdev)
return 0;
}
/**
* ftpm_tee_shutdown() - shutdown the TPM device
* @pdev: the platform_device description.
*/
static void ftpm_tee_shutdown(struct platform_device *pdev)
{
struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev);
tee_shm_free(pvt_data->shm);
tee_client_close_session(pvt_data->ctx, pvt_data->session);
tee_client_close_context(pvt_data->ctx);
}
static const struct of_device_id of_ftpm_tee_ids[] = {
{ .compatible = "microsoft,ftpm" },
{ }
@ -341,6 +354,7 @@ static struct platform_driver ftpm_tee_driver = {
},
.probe = ftpm_tee_probe,
.remove = ftpm_tee_remove,
.shutdown = ftpm_tee_shutdown,
};
module_platform_driver(ftpm_tee_driver);

View File

@ -348,7 +348,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
return;
mainxtal_name = of_clk_get_parent_name(np, i);
regmap = syscon_node_to_regmap(np);
regmap = device_node_to_regmap(np);
if (IS_ERR(regmap))
return;

View File

@ -83,7 +83,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
return;
mainxtal_name = of_clk_get_parent_name(np, i);
regmap = syscon_node_to_regmap(np);
regmap = device_node_to_regmap(np);
if (IS_ERR(regmap))
return;

View File

@ -146,7 +146,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
return;
mainxtal_name = of_clk_get_parent_name(np, i);
regmap = syscon_node_to_regmap(np);
regmap = device_node_to_regmap(np);
if (IS_ERR(regmap))
return;

View File

@ -275,7 +275,7 @@ static int __init pmc_register_ops(void)
np = of_find_matching_node(NULL, sama5d2_pmc_dt_ids);
pmcreg = syscon_node_to_regmap(np);
pmcreg = device_node_to_regmap(np);
if (IS_ERR(pmcreg))
return PTR_ERR(pmcreg);

View File

@ -162,7 +162,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
return;
mainxtal_name = of_clk_get_parent_name(np, i);
regmap = syscon_node_to_regmap(np);
regmap = device_node_to_regmap(np);
if (IS_ERR(regmap))
return;

View File

@ -136,7 +136,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
return;
mainxtal_name = of_clk_get_parent_name(np, i);
regmap = syscon_node_to_regmap(np);
regmap = device_node_to_regmap(np);
if (IS_ERR(regmap))
return;

View File

@ -3310,6 +3310,35 @@ static inline void clk_debug_unregister(struct clk_core *core)
}
#endif
static void clk_core_reparent_orphans_nolock(void)
{
struct clk_core *orphan;
struct hlist_node *tmp2;
/*
* walk the list of orphan clocks and reparent any that newly finds a
* parent.
*/
hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
struct clk_core *parent = __clk_init_parent(orphan);
/*
* We need to use __clk_set_parent_before() and _after() to
* to properly migrate any prepare/enable count of the orphan
* clock. This is important for CLK_IS_CRITICAL clocks, which
* are enabled during init but might not have a parent yet.
*/
if (parent) {
/* update the clk tree topology */
__clk_set_parent_before(orphan, parent);
__clk_set_parent_after(orphan, parent, NULL);
__clk_recalc_accuracies(orphan);
__clk_recalc_rates(orphan, 0);
__clk_core_update_orphan_hold_state(orphan);
}
}
}
/**
* __clk_core_init - initialize the data structures in a struct clk_core
* @core: clk_core being initialized
@ -3320,8 +3349,6 @@ static inline void clk_debug_unregister(struct clk_core *core)
static int __clk_core_init(struct clk_core *core)
{
int ret;
struct clk_core *orphan;
struct hlist_node *tmp2;
unsigned long rate;
if (!core)
@ -3471,29 +3498,7 @@ static int __clk_core_init(struct clk_core *core)
}
clk_core_hold_state(core);
/*
* walk the list of orphan clocks and reparent any that newly finds a
* parent.
*/
hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
struct clk_core *parent = __clk_init_parent(orphan);
/*
* We need to use __clk_set_parent_before() and _after() to
* to properly migrate any prepare/enable count of the orphan
* clock. This is important for CLK_IS_CRITICAL clocks, which
* are enabled during init but might not have a parent yet.
*/
if (parent) {
/* update the clk tree topology */
__clk_set_parent_before(orphan, parent);
__clk_set_parent_after(orphan, parent, NULL);
__clk_recalc_accuracies(orphan);
__clk_recalc_rates(orphan, 0);
__clk_core_update_orphan_hold_state(orphan);
}
}
clk_core_reparent_orphans_nolock();
kref_init(&core->ref);
out:
@ -4244,6 +4249,13 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
EXPORT_SYMBOL_GPL(clk_notifier_unregister);
#ifdef CONFIG_OF
static void clk_core_reparent_orphans(void)
{
clk_prepare_lock();
clk_core_reparent_orphans_nolock();
clk_prepare_unlock();
}
/**
* struct of_clk_provider - Clock provider registration structure
* @link: Entry in global list of clock providers
@ -4339,6 +4351,8 @@ int of_clk_add_provider(struct device_node *np,
mutex_unlock(&of_clk_mutex);
pr_debug("Added clock from %pOF\n", np);
clk_core_reparent_orphans();
ret = of_clk_set_defaults(np, true);
if (ret < 0)
of_clk_del_provider(np);
@ -4374,6 +4388,8 @@ int of_clk_add_hw_provider(struct device_node *np,
mutex_unlock(&of_clk_mutex);
pr_debug("Added clk_hw provider from %pOF\n", np);
clk_core_reparent_orphans();
ret = of_clk_set_defaults(np, true);
if (ret < 0)
of_clk_del_provider(np);

View File

@ -13,18 +13,57 @@
#include <asm/early_ioremap.h>
static const struct console *earlycon_console __initdata;
static const struct font_desc *font;
static u32 efi_x, efi_y;
static u64 fb_base;
static pgprot_t fb_prot;
static void *efi_fb;
/*
* EFI earlycon needs to use early_memremap() to map the framebuffer.
* But early_memremap() is not usable for 'earlycon=efifb keep_bootcon',
* memremap() should be used instead. memremap() will be available after
* paging_init() which is earlier than initcall callbacks. Thus adding this
* early initcall function early_efi_map_fb() to map the whole EFI framebuffer.
*/
static int __init efi_earlycon_remap_fb(void)
{
/* bail if there is no bootconsole or it has been disabled already */
if (!earlycon_console || !(earlycon_console->flags & CON_ENABLED))
return 0;
if (pgprot_val(fb_prot) == pgprot_val(PAGE_KERNEL))
efi_fb = memremap(fb_base, screen_info.lfb_size, MEMREMAP_WB);
else
efi_fb = memremap(fb_base, screen_info.lfb_size, MEMREMAP_WC);
return efi_fb ? 0 : -ENOMEM;
}
early_initcall(efi_earlycon_remap_fb);
static int __init efi_earlycon_unmap_fb(void)
{
/* unmap the bootconsole fb unless keep_bootcon has left it enabled */
if (efi_fb && !(earlycon_console->flags & CON_ENABLED))
memunmap(efi_fb);
return 0;
}
late_initcall(efi_earlycon_unmap_fb);
static __ref void *efi_earlycon_map(unsigned long start, unsigned long len)
{
if (efi_fb)
return efi_fb + start;
return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot));
}
static __ref void efi_earlycon_unmap(void *addr, unsigned long len)
{
if (efi_fb)
return;
early_memunmap(addr, len);
}
@ -201,6 +240,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
efi_earlycon_scroll_up();
device->con->write = efi_earlycon_write;
earlycon_console = device->con;
return 0;
}
EARLYCON_DECLARE(efifb, efi_earlycon_setup);

View File

@ -83,30 +83,6 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
}
}
static efi_status_t
__gop_query32(efi_system_table_t *sys_table_arg,
struct efi_graphics_output_protocol_32 *gop32,
struct efi_graphics_output_mode_info **info,
unsigned long *size, u64 *fb_base)
{
struct efi_graphics_output_protocol_mode_32 *mode;
efi_graphics_output_protocol_query_mode query_mode;
efi_status_t status;
unsigned long m;
m = gop32->mode;
mode = (struct efi_graphics_output_protocol_mode_32 *)m;
query_mode = (void *)(unsigned long)gop32->query_mode;
status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size,
info);
if (status != EFI_SUCCESS)
return status;
*fb_base = mode->frame_buffer_base;
return status;
}
static efi_status_t
setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
efi_guid_t *proto, unsigned long size, void **gop_handle)
@ -119,7 +95,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
u64 fb_base;
struct efi_pixel_bitmask pixel_info;
int pixel_format;
efi_status_t status = EFI_NOT_FOUND;
efi_status_t status;
u32 *handles = (u32 *)(unsigned long)gop_handle;
int i;
@ -128,6 +104,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
nr_gops = size / sizeof(u32);
for (i = 0; i < nr_gops; i++) {
struct efi_graphics_output_protocol_mode_32 *mode;
struct efi_graphics_output_mode_info *info = NULL;
efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
bool conout_found = false;
@ -145,9 +122,11 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
if (status == EFI_SUCCESS)
conout_found = true;
status = __gop_query32(sys_table_arg, gop32, &info, &size,
&current_fb_base);
if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
mode = (void *)(unsigned long)gop32->mode;
info = (void *)(unsigned long)mode->info;
current_fb_base = mode->frame_buffer_base;
if ((!first_gop || conout_found) &&
info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
@ -175,7 +154,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
/* Did we find any GOPs? */
if (!first_gop)
goto out;
return EFI_NOT_FOUND;
/* EFI framebuffer */
si->orig_video_isVGA = VIDEO_TYPE_EFI;
@ -197,32 +176,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
si->lfb_size = si->lfb_linelength * si->lfb_height;
si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
out:
return status;
}
static efi_status_t
__gop_query64(efi_system_table_t *sys_table_arg,
struct efi_graphics_output_protocol_64 *gop64,
struct efi_graphics_output_mode_info **info,
unsigned long *size, u64 *fb_base)
{
struct efi_graphics_output_protocol_mode_64 *mode;
efi_graphics_output_protocol_query_mode query_mode;
efi_status_t status;
unsigned long m;
m = gop64->mode;
mode = (struct efi_graphics_output_protocol_mode_64 *)m;
query_mode = (void *)(unsigned long)gop64->query_mode;
status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size,
info);
if (status != EFI_SUCCESS)
return status;
*fb_base = mode->frame_buffer_base;
return status;
return EFI_SUCCESS;
}
static efi_status_t
@ -237,7 +192,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
u64 fb_base;
struct efi_pixel_bitmask pixel_info;
int pixel_format;
efi_status_t status = EFI_NOT_FOUND;
efi_status_t status;
u64 *handles = (u64 *)(unsigned long)gop_handle;
int i;
@ -246,6 +201,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
nr_gops = size / sizeof(u64);
for (i = 0; i < nr_gops; i++) {
struct efi_graphics_output_protocol_mode_64 *mode;
struct efi_graphics_output_mode_info *info = NULL;
efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
bool conout_found = false;
@ -263,9 +219,11 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
if (status == EFI_SUCCESS)
conout_found = true;
status = __gop_query64(sys_table_arg, gop64, &info, &size,
&current_fb_base);
if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
mode = (void *)(unsigned long)gop64->mode;
info = (void *)(unsigned long)mode->info;
current_fb_base = mode->frame_buffer_base;
if ((!first_gop || conout_found) &&
info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
@ -293,7 +251,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
/* Did we find any GOPs? */
if (!first_gop)
goto out;
return EFI_NOT_FOUND;
/* EFI framebuffer */
si->orig_video_isVGA = VIDEO_TYPE_EFI;
@ -315,8 +273,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
si->lfb_size = si->lfb_linelength * si->lfb_height;
si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
out:
return status;
return EFI_SUCCESS;
}
/*

View File

@ -23,6 +23,29 @@
#include "gpiolib.h"
#include "gpiolib-of.h"
/**
* of_gpio_spi_cs_get_count() - special GPIO counting for SPI
* Some elder GPIO controllers need special quirks. Currently we handle
* the Freescale GPIO controller with bindings that doesn't use the
* established "cs-gpios" for chip selects but instead rely on
* "gpios" for the chip select lines. If we detect this, we redirect
* the counting of "cs-gpios" to count "gpios" transparent to the
* driver.
*/
int of_gpio_spi_cs_get_count(struct device *dev, const char *con_id)
{
struct device_node *np = dev->of_node;
if (!IS_ENABLED(CONFIG_SPI_MASTER))
return 0;
if (!con_id || strcmp(con_id, "cs"))
return 0;
if (!of_device_is_compatible(np, "fsl,spi") &&
!of_device_is_compatible(np, "aeroflexgaisler,spictrl"))
return 0;
return of_gpio_named_count(np, "gpios");
}
/*
* This is used by external users of of_gpio_count() from <linux/of_gpio.h>
*
@ -35,6 +58,10 @@ int of_gpio_get_count(struct device *dev, const char *con_id)
char propname[32];
unsigned int i;
ret = of_gpio_spi_cs_get_count(dev, con_id);
if (ret > 0)
return ret;
for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
if (con_id)
snprintf(propname, sizeof(propname), "%s-%s",

View File

@ -1313,6 +1313,7 @@ static int gsc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
component_del(dev, &gsc_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);

View File

@ -19,6 +19,7 @@
#include <linux/iova.h>
#include <linux/irq.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
@ -43,7 +44,6 @@ struct iommu_dma_cookie {
dma_addr_t msi_iova;
};
struct list_head msi_page_list;
spinlock_t msi_lock;
/* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain *fq_domain;
@ -62,7 +62,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
if (cookie) {
spin_lock_init(&cookie->msi_lock);
INIT_LIST_HEAD(&cookie->msi_page_list);
cookie->type = type;
}
@ -1150,7 +1149,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (msi_page->phys == msi_addr)
return msi_page;
msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
if (!msi_page)
return NULL;
@ -1180,7 +1179,7 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_dma_cookie *cookie;
struct iommu_dma_msi_page *msi_page;
unsigned long flags;
static DEFINE_MUTEX(msi_prepare_lock); /* see below */
if (!domain || !domain->iova_cookie) {
desc->iommu_cookie = NULL;
@ -1190,13 +1189,13 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
cookie = domain->iova_cookie;
/*
* We disable IRQs to rule out a possible inversion against
* irq_desc_lock if, say, someone tries to retarget the affinity
* of an MSI from within an IPI handler.
* In fact the whole prepare operation should already be serialised by
* irq_domain_mutex further up the callchain, but that's pretty subtle
* on its own, so consider this locking as failsafe documentation...
*/
spin_lock_irqsave(&cookie->msi_lock, flags);
mutex_lock(&msi_prepare_lock);
msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
spin_unlock_irqrestore(&cookie->msi_lock, flags);
mutex_unlock(&msi_prepare_lock);
msi_desc_set_iommu_cookie(desc, msi_page);

View File

@ -233,7 +233,7 @@ static DEFINE_MUTEX(iova_cache_mutex);
struct iova *alloc_iova_mem(void)
{
return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
return kmem_cache_zalloc(iova_cache, GFP_ATOMIC);
}
EXPORT_SYMBOL(alloc_iova_mem);

View File

@ -777,8 +777,9 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
memset(args, 0, sizeof(*args));
if (rc < 0) {
dev_err(hdev->dev, "Error %ld on waiting for CS handle %llu\n",
rc, seq);
dev_err_ratelimited(hdev->dev,
"Error %ld on waiting for CS handle %llu\n",
rc, seq);
if (rc == -ERESTARTSYS) {
args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
rc = -EINTR;

View File

@ -176,7 +176,7 @@ struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
spin_lock(&ctx->cs_lock);
if (seq >= ctx->cs_sequence) {
dev_notice(hdev->dev,
dev_notice_ratelimited(hdev->dev,
"Can't wait on seq %llu because current CS is at seq %llu\n",
seq, ctx->cs_sequence);
spin_unlock(&ctx->cs_lock);

View File

@ -2171,7 +2171,7 @@ static int goya_push_linux_to_device(struct hl_device *hdev)
static int goya_pldm_init_cpu(struct hl_device *hdev)
{
u32 val, unit_rst_val;
u32 unit_rst_val;
int rc;
/* Must initialize SRAM scrambler before pushing u-boot to SRAM */
@ -2179,14 +2179,14 @@ static int goya_pldm_init_cpu(struct hl_device *hdev)
/* Put ARM cores into reset */
WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT);
val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
/* Reset the CA53 MACRO */
unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET);
val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
rc = goya_push_uboot_to_device(hdev);
if (rc)
@ -2207,7 +2207,7 @@ static int goya_pldm_init_cpu(struct hl_device *hdev)
/* Release ARM core 0 from reset */
WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL,
CPU_RESET_CORE0_DEASSERT);
val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
return 0;
}
@ -2475,13 +2475,12 @@ err:
static int goya_hw_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 val;
int rc;
dev_info(hdev->dev, "Starting initialization of H/W\n");
/* Perform read from the device to make sure device is up */
val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
/*
* Let's mark in the H/W that we have reached this point. We check
@ -2533,7 +2532,7 @@ static int goya_hw_init(struct hl_device *hdev)
goto disable_queues;
/* Perform read from the device to flush all MSI-X configuration */
val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
return 0;

View File

@ -10,18 +10,17 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
int pasid;
struct ocxl_context *ctx;
*context = kzalloc(sizeof(struct ocxl_context), GFP_KERNEL);
if (!*context)
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx = *context;
ctx->afu = afu;
mutex_lock(&afu->contexts_lock);
pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base,
afu->pasid_base + afu->pasid_max, GFP_KERNEL);
if (pasid < 0) {
mutex_unlock(&afu->contexts_lock);
kfree(ctx);
return pasid;
}
afu->pasid_count++;
@ -43,6 +42,7 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
* duration of the life of the context
*/
ocxl_afu_get(afu);
*context = ctx;
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_context_alloc);

View File

@ -332,6 +332,11 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
{
u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST;
/* Use the default high priority for management frames sent to
* the CPU.
*/
port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
return mv88e6390_g1_monitor_write(chip, ptr, port);
}

View File

@ -210,6 +210,7 @@
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0
#define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
/* Offset 0x1C: Global Control 2 */

View File

@ -393,7 +393,7 @@ phy_interface_t mv88e6390x_port_max_speed_mode(int port)
}
static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
phy_interface_t mode, bool force)
{
u8 lane;
u16 cmode;
@ -427,8 +427,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
cmode = 0;
}
/* cmode doesn't change, nothing to do for us */
if (cmode == chip->ports[port].cmode)
/* cmode doesn't change, nothing to do for us unless forced */
if (cmode == chip->ports[port].cmode && !force)
return 0;
lane = mv88e6xxx_serdes_get_lane(chip, port);
@ -484,7 +484,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (port != 9 && port != 10)
return -EOPNOTSUPP;
return mv88e6xxx_port_set_cmode(chip, port, mode);
return mv88e6xxx_port_set_cmode(chip, port, mode, false);
}
int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
@ -504,7 +504,7 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
break;
}
return mv88e6xxx_port_set_cmode(chip, port, mode);
return mv88e6xxx_port_set_cmode(chip, port, mode, false);
}
static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip,
@ -555,7 +555,7 @@ int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
return mv88e6xxx_port_set_cmode(chip, port, mode);
return mv88e6xxx_port_set_cmode(chip, port, mode, true);
}
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)

View File

@ -1109,7 +1109,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
u32 func_config =
MF_CFG_RD(bp,
func_mf_config[BP_PORT(bp) + 2 * i].
func_mf_config[BP_PATH(bp) + 2 * i].
config);
func_num +=
((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);

View File

@ -9976,10 +9976,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp)
*/
static void bnx2x_parity_recover(struct bnx2x *bp)
{
bool global = false;
u32 error_recovered, error_unrecovered;
bool is_parity;
bool is_parity, global = false;
#ifdef CONFIG_BNX2X_SRIOV
int vf_idx;
for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
if (vf)
vf->state = VF_LOST;
}
#endif
DP(NETIF_MSG_HW, "Handling parity\n");
while (1) {
switch (bp->recovery_state) {

View File

@ -139,6 +139,7 @@ struct bnx2x_virtf {
#define VF_ACQUIRED 1 /* VF acquired, but not initialized */
#define VF_ENABLED 2 /* VF Enabled */
#define VF_RESET 3 /* VF FLR'd, pending cleanup */
#define VF_LOST 4 /* Recovery while VFs are loaded */
bool flr_clnup_stage; /* true during flr cleanup */
bool malicious; /* true if FW indicated so, until FLR */

View File

@ -2107,6 +2107,18 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
{
int i;
if (vf->state == VF_LOST) {
/* Just ack the FW and return if VFs are lost
* in case of parity error. VFs are supposed to be timedout
* on waiting for PF response.
*/
DP(BNX2X_MSG_IOV,
"VF 0x%x lost, not handling the request\n", vf->abs_vfid);
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
return;
}
/* check if tlv type is known */
if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
/* Lock the per vf op mutex and note the locker's identity.

View File

@ -4027,7 +4027,7 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
mgmt->rate = 0;
mgmt->hw.init = &init;
*tx_clk = clk_register(NULL, &mgmt->hw);
*tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
if (IS_ERR(*tx_clk))
return PTR_ERR(*tx_clk);
@ -4361,7 +4361,6 @@ err_out_free_netdev:
err_disable_clocks:
clk_disable_unprepare(tx_clk);
clk_unregister(tx_clk);
clk_disable_unprepare(hclk);
clk_disable_unprepare(pclk);
clk_disable_unprepare(rx_clk);
@ -4397,7 +4396,6 @@ static int macb_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (!pm_runtime_suspended(&pdev->dev)) {
clk_disable_unprepare(bp->tx_clk);
clk_unregister(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
clk_disable_unprepare(bp->rx_clk);

View File

@ -2996,6 +2996,9 @@ static int sge_queue_entries(const struct adapter *adap)
int tot_uld_entries = 0;
int i;
if (!is_uld(adap))
goto lld_only;
mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_TX_MAX; i++)
tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i);
@ -3006,6 +3009,7 @@ static int sge_queue_entries(const struct adapter *adap)
}
mutex_unlock(&uld_mutex);
lld_only:
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;

View File

@ -2199,8 +2199,14 @@ static void fec_enet_get_regs(struct net_device *ndev,
{
struct fec_enet_private *fep = netdev_priv(ndev);
u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
struct device *dev = &fep->pdev->dev;
u32 *buf = (u32 *)regbuf;
u32 i, off;
int ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
return;
regs->version = fec_enet_register_version;
@ -2216,6 +2222,9 @@ static void fec_enet_get_regs(struct net_device *ndev,
off >>= 2;
buf[off] = readl(&theregs[off]);
}
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
static int fec_enet_get_ts_info(struct net_device *ndev,

View File

@ -1151,7 +1151,7 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
{
return !!vsi->xdp_prog;
return !!READ_ONCE(vsi->xdp_prog);
}
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);

View File

@ -6804,8 +6804,8 @@ void i40e_down(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_clean_tx_ring(vsi->tx_rings[i]);
if (i40e_enabled_xdp_vsi(vsi)) {
/* Make sure that in-progress ndo_xdp_xmit
* calls are completed.
/* Make sure that in-progress ndo_xdp_xmit and
* ndo_xsk_wakeup calls are completed.
*/
synchronize_rcu();
i40e_clean_tx_ring(vsi->xdp_rings[i]);
@ -12526,8 +12526,12 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
old_prog = xchg(&vsi->xdp_prog, prog);
if (need_reset)
if (need_reset) {
if (!prog)
/* Wait until ndo_xsk_wakeup completes. */
synchronize_rcu();
i40e_reset_and_rebuild(pf, true, true);
}
for (i = 0; i < vsi->num_queue_pairs; i++)
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);

View File

@ -787,8 +787,12 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_ring *ring;
if (test_bit(__I40E_CONFIG_BUSY, pf->state))
return -ENETDOWN;
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return -ENETDOWN;

View File

@ -10248,7 +10248,12 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
/* If transitioning XDP modes reconfigure rings */
if (need_reset) {
int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
int err;
if (!prog)
/* Wait until ndo_xsk_wakeup completes. */
synchronize_rcu();
err = ixgbe_setup_tc(dev, adapter->hw_tcs);
if (err) {
rcu_assign_pointer(adapter->xdp_prog, old_prog);

View File

@ -709,10 +709,14 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (qid >= adapter->num_xdp_queues)
return -ENXIO;
if (!adapter->xdp_ring[qid]->xsk_umem)
ring = adapter->xdp_ring[qid];
if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
return -ENETDOWN;
if (!ring->xsk_umem)
return -ENXIO;
ring = adapter->xdp_ring[qid];
if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
u64 eics = BIT_ULL(ring->q_vector->v_idx);

View File

@ -760,7 +760,7 @@ enum {
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
MLX5E_STATE_XDP_TX_ENABLED,
MLX5E_STATE_XDP_OPEN,
MLX5E_STATE_XDP_ACTIVE,
};
struct mlx5e_rqt {

View File

@ -122,6 +122,22 @@ enum {
#endif
};
#define MLX5E_TTC_NUM_GROUPS 3
#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
#define MLX5E_TTC_GROUP2_SIZE BIT(1)
#define MLX5E_TTC_GROUP3_SIZE BIT(0)
#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
MLX5E_TTC_GROUP2_SIZE +\
MLX5E_TTC_GROUP3_SIZE)
#define MLX5E_INNER_TTC_NUM_GROUPS 3
#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
MLX5E_INNER_TTC_GROUP2_SIZE +\
MLX5E_INNER_TTC_GROUP3_SIZE)
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_table {

View File

@ -197,9 +197,10 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
struct devlink_health_reporter *reporter, char *err_str,
struct mlx5e_err_ctx *err_ctx)
{
if (!reporter) {
netdev_err(priv->netdev, err_str);
netdev_err(priv->netdev, err_str);
if (!reporter)
return err_ctx->recover(&err_ctx->ctx);
}
return devlink_health_report(reporter, err_str, err_ctx);
}

View File

@ -75,12 +75,18 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
{
set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
if (priv->channels.params.xdp_prog)
set_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
}
static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
{
if (priv->channels.params.xdp_prog)
clear_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
/* let other device's napi(s) see our new state */
/* Let other device's napi(s) and XSK wakeups see our new state. */
synchronize_rcu();
}
@ -89,19 +95,9 @@ static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
}
static inline void mlx5e_xdp_set_open(struct mlx5e_priv *priv)
static inline bool mlx5e_xdp_is_active(struct mlx5e_priv *priv)
{
set_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
}
static inline void mlx5e_xdp_set_closed(struct mlx5e_priv *priv)
{
clear_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
}
static inline bool mlx5e_xdp_is_open(struct mlx5e_priv *priv)
{
return test_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
return test_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
}
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)

View File

@ -144,6 +144,7 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
{
clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
napi_synchronize(&c->napi);
synchronize_rcu(); /* Sync with the XSK wakeup. */
mlx5e_close_rq(&c->xskrq);
mlx5e_close_cq(&c->xskrq.cq);

View File

@ -14,7 +14,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
struct mlx5e_channel *c;
u16 ix;
if (unlikely(!mlx5e_xdp_is_open(priv)))
if (unlikely(!mlx5e_xdp_is_active(priv)))
return -ENETDOWN;
if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))

View File

@ -904,22 +904,6 @@ del_rules:
return err;
}
#define MLX5E_TTC_NUM_GROUPS 3
#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
#define MLX5E_TTC_GROUP2_SIZE BIT(1)
#define MLX5E_TTC_GROUP3_SIZE BIT(0)
#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
MLX5E_TTC_GROUP2_SIZE +\
MLX5E_TTC_GROUP3_SIZE)
#define MLX5E_INNER_TTC_NUM_GROUPS 3
#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
MLX5E_INNER_TTC_GROUP2_SIZE +\
MLX5E_INNER_TTC_GROUP3_SIZE)
static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
bool use_ipv)
{

View File

@ -3002,12 +3002,9 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
bool is_xdp = priv->channels.params.xdp_prog;
int err;
set_bit(MLX5E_STATE_OPENED, &priv->state);
if (is_xdp)
mlx5e_xdp_set_open(priv);
err = mlx5e_open_channels(priv, &priv->channels);
if (err)
@ -3022,8 +3019,6 @@ int mlx5e_open_locked(struct net_device *netdev)
return 0;
err_clear_state_opened_flag:
if (is_xdp)
mlx5e_xdp_set_closed(priv);
clear_bit(MLX5E_STATE_OPENED, &priv->state);
return err;
}
@ -3055,8 +3050,6 @@ int mlx5e_close_locked(struct net_device *netdev)
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
if (priv->channels.params.xdp_prog)
mlx5e_xdp_set_closed(priv);
clear_bit(MLX5E_STATE_OPENED, &priv->state);
netif_carrier_off(priv->netdev);
@ -4373,16 +4366,6 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
return 0;
}
static int mlx5e_xdp_update_state(struct mlx5e_priv *priv)
{
if (priv->channels.params.xdp_prog)
mlx5e_xdp_set_open(priv);
else
mlx5e_xdp_set_closed(priv);
return 0;
}
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@ -4422,7 +4405,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
mlx5e_set_rq_type(priv->mdev, &new_channels.params);
old_prog = priv->channels.params.xdp_prog;
err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_xdp_update_state);
err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
if (err)
goto unlock;
} else {

View File

@ -586,7 +586,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
ft_attr->max_fte = MLX5E_NUM_TT;
ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_TC_PRIO;
}

View File

@ -1197,6 +1197,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
if (err)
goto err_load;
if (boot) {
err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
if (err)
goto err_devlink_reg;
}
if (mlx5_device_registered(dev)) {
mlx5_attach_device(dev);
} else {
@ -1214,6 +1220,9 @@ out:
return err;
err_reg_dev:
if (boot)
mlx5_devlink_unregister(priv_to_devlink(dev));
err_devlink_reg:
mlx5_unload(dev);
err_load:
if (boot)
@ -1353,10 +1362,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
request_module_nowait(MLX5_IB_MOD);
err = mlx5_devlink_register(devlink, &pdev->dev);
if (err)
goto clean_load;
err = mlx5_crdump_enable(dev);
if (err)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
@ -1364,9 +1369,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_save_state(pdev);
return 0;
clean_load:
mlx5_unload_one(dev, true);
err_load_one:
mlx5_pci_close(dev);
pci_init_err:

View File

@ -209,7 +209,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
/* We need to copy the refcount since this ste
* may have been traversed several times
*/
refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount));
new_ste->refcount = cur_ste->refcount;
/* Link old STEs rule_mem list to the new ste */
mlx5dr_rule_update_rule_member(cur_ste, new_ste);
@ -638,6 +638,9 @@ static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
if (!rule_mem)
return -ENOMEM;
INIT_LIST_HEAD(&rule_mem->list);
INIT_LIST_HEAD(&rule_mem->use_ste_list);
rule_mem->ste = ste;
list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);

View File

@ -340,7 +340,7 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
if (dst->next_htbl)
dst->next_htbl->pointing_ste = dst;
refcount_set(&dst->refcount, refcount_read(&src->refcount));
dst->refcount = src->refcount;
INIT_LIST_HEAD(&dst->rule_list);
list_splice_tail_init(&src->rule_list, &dst->rule_list);
@ -557,7 +557,7 @@ bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste)
bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
{
return !refcount_read(&ste->refcount);
return !ste->refcount;
}
/* Init one ste as a pattern for ste data array */
@ -681,14 +681,14 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
htbl->ste_arr = chunk->ste_arr;
htbl->hw_ste_arr = chunk->hw_ste_arr;
htbl->miss_list = chunk->miss_list;
refcount_set(&htbl->refcount, 0);
htbl->refcount = 0;
for (i = 0; i < chunk->num_of_entries; i++) {
struct mlx5dr_ste *ste = &htbl->ste_arr[i];
ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
ste->htbl = htbl;
refcount_set(&ste->refcount, 0);
ste->refcount = 0;
INIT_LIST_HEAD(&ste->miss_list_node);
INIT_LIST_HEAD(&htbl->miss_list[i]);
INIT_LIST_HEAD(&ste->rule_list);
@ -705,7 +705,7 @@ out_free_htbl:
int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
{
if (refcount_read(&htbl->refcount))
if (htbl->refcount)
return -EBUSY;
mlx5dr_icm_free_chunk(htbl->chunk);

View File

@ -117,7 +117,7 @@ struct mlx5dr_matcher_rx_tx;
struct mlx5dr_ste {
u8 *hw_ste;
/* refcount: indicates the num of rules that using this ste */
refcount_t refcount;
u32 refcount;
/* attached to the miss_list head at each htbl entry */
struct list_head miss_list_node;
@ -149,7 +149,7 @@ struct mlx5dr_ste_htbl_ctrl {
struct mlx5dr_ste_htbl {
u8 lu_type;
u16 byte_mask;
refcount_t refcount;
u32 refcount;
struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_ste *ste_arr;
u8 *hw_ste_arr;
@ -200,13 +200,14 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
{
if (refcount_dec_and_test(&htbl->refcount))
htbl->refcount--;
if (!htbl->refcount)
mlx5dr_ste_htbl_free(htbl);
}
static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
{
refcount_inc(&htbl->refcount);
htbl->refcount++;
}
/* STE utils */
@ -248,14 +249,15 @@ static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher)
{
if (refcount_dec_and_test(&ste->refcount))
ste->refcount--;
if (!ste->refcount)
mlx5dr_ste_free(ste, matcher, nic_matcher);
}
/* initial as 0, increased only when ste appears in a new rule */
static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
{
refcount_inc(&ste->refcount);
ste->refcount++;
}
void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,

View File

@ -650,6 +650,13 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
return 0;
if (!p->child_handle) {
/* This is an invisible FIFO replacing the original Qdisc.
* Ignore it--the original Qdisc's destroy will follow.
*/
return 0;
}
/* See if the grafted qdisc is already offloaded on any tclass. If so,
* unoffload it.
*/

View File

@ -957,6 +957,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
/* default */
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
break;
case PHY_INTERFACE_MODE_RMII:

View File

@ -44,7 +44,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
* rate, which then uses the auto-reparenting feature of the
* clock driver, and enabling/disabling the clock.
*/
if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
if (phy_interface_mode_is_rgmii(gmac->interface)) {
clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
clk_prepare_enable(gmac->tx_clk);
gmac->clk_enabled = 1;

View File

@ -343,6 +343,8 @@
#define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x)))
#define XGMAC_RxPBL GENMASK(21, 16)
#define XGMAC_RxPBL_SHIFT 16
#define XGMAC_RBSZ GENMASK(14, 1)
#define XGMAC_RBSZ_SHIFT 1
#define XGMAC_RXST BIT(0)
#define XGMAC_DMA_CH_TxDESC_HADDR(x) (0x00003110 + (0x80 * (x)))
#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x)))

View File

@ -489,7 +489,8 @@ static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
u32 value;
value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
value |= bfsize << 1;
value &= ~XGMAC_RBSZ;
value |= bfsize << XGMAC_RBSZ_SHIFT;
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
}

View File

@ -45,7 +45,7 @@
#include "dwxgmac2.h"
#include "hwif.h"
#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
/* Module parameters */
@ -1292,19 +1292,9 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_count = priv->plat->rx_queues_to_use;
int ret = -ENOMEM;
int bfsize = 0;
int queue;
int i;
bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
if (bfsize < 0)
bfsize = 0;
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
priv->dma_buf_sz = bfsize;
/* RX INITIALIZATION */
netif_dbg(priv, probe, priv->dev,
"SKB addresses:\nskb\t\tskb data\tdma data\n");
@ -1346,8 +1336,6 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
}
}
buf_sz = bfsize;
return 0;
err_init_rx_buffers:
@ -2654,6 +2642,7 @@ static void stmmac_hw_teardown(struct net_device *dev)
static int stmmac_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
int bfsize = 0;
u32 chan;
int ret;
@ -2673,7 +2662,16 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
if (bfsize < 0)
bfsize = 0;
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
priv->dma_buf_sz = bfsize;
buf_sz = bfsize;
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
ret = alloc_dma_desc_resources(priv);
@ -3103,6 +3101,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue);
return NETDEV_TX_OK;
@ -3330,6 +3329,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue);
return NETDEV_TX_OK;
@ -3747,12 +3747,24 @@ static void stmmac_set_rx_mode(struct net_device *dev)
static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct stmmac_priv *priv = netdev_priv(dev);
int txfifosz = priv->plat->tx_fifo_size;
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
txfifosz /= priv->plat->tx_queues_to_use;
if (netif_running(dev)) {
netdev_err(priv->dev, "must be stopped to change its MTU\n");
return -EBUSY;
}
new_mtu = STMMAC_ALIGN(new_mtu);
/* If condition true, FIFO is too small or MTU too large */
if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
return -EINVAL;
dev->mtu = new_mtu;
netdev_update_features(dev);

View File

@ -320,7 +320,7 @@ out:
static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
struct device_node *np, struct device *dev)
{
bool mdio = false;
bool mdio = !of_phy_is_fixed_link(np);
static const struct of_device_id need_mdio_ids[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10" },
{},

View File

@ -624,6 +624,8 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv)
return -EOPNOTSUPP;
if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
return -EOPNOTSUPP;
if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
return -EOPNOTSUPP;
while (--tries) {
/* We only need to check the mc_addr for collisions */
@ -666,6 +668,8 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv)
if (stmmac_filter_check(priv))
return -EOPNOTSUPP;
if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
return -EOPNOTSUPP;
if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
return -EOPNOTSUPP;

View File

@ -813,7 +813,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
lock_sock(sock->sk);
if (sock->sk->sk_user_data) {
sk = ERR_PTR(-EBUSY);
goto out_sock;
goto out_rel_sock;
}
sk = sock->sk;
@ -826,8 +826,9 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
out_sock:
out_rel_sock:
release_sock(sock->sk);
out_sock:
sockfd_put(sock);
return sk;
}

View File

@ -169,7 +169,6 @@ struct rndis_device {
u8 hw_mac_adr[ETH_ALEN];
u8 rss_key[NETVSC_HASH_KEYLEN];
u16 rx_table[ITAB_NUM];
};
@ -938,6 +937,8 @@ struct net_device_context {
u32 tx_table[VRSS_SEND_TAB_SIZE];
u16 rx_table[ITAB_NUM];
/* Ethtool settings */
u8 duplex;
u32 speed;

View File

@ -1659,7 +1659,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
rndis_dev = ndev->extension;
if (indir) {
for (i = 0; i < ITAB_NUM; i++)
indir[i] = rndis_dev->rx_table[i];
indir[i] = ndc->rx_table[i];
}
if (key)
@ -1689,7 +1689,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
return -EINVAL;
for (i = 0; i < ITAB_NUM; i++)
rndis_dev->rx_table[i] = indir[i];
ndc->rx_table[i] = indir[i];
}
if (!key) {

View File

@ -767,6 +767,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
const u8 *rss_key, u16 flag)
{
struct net_device *ndev = rdev->ndev;
struct net_device_context *ndc = netdev_priv(ndev);
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_set_complete *set_complete;
@ -806,7 +807,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
/* Set indirection table entries */
itab = (u32 *)(rssp + 1);
for (i = 0; i < ITAB_NUM; i++)
itab[i] = rdev->rx_table[i];
itab[i] = ndc->rx_table[i];
/* Set hask key values */
keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
@ -1305,6 +1306,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
struct netvsc_device_info *device_info)
{
struct net_device *net = hv_get_drvdata(dev);
struct net_device_context *ndc = netdev_priv(net);
struct netvsc_device *net_device;
struct rndis_device *rndis_device;
struct ndis_recv_scale_cap rsscap;
@ -1391,9 +1393,11 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
/* We will use the given number of channels if available. */
net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
for (i = 0; i < ITAB_NUM; i++)
rndis_device->rx_table[i] = ethtool_rxfh_indir_default(
if (!netif_is_rxfh_configured(net)) {
for (i = 0; i < ITAB_NUM; i++)
ndc->rx_table[i] = ethtool_rxfh_indir_default(
i, net_device->num_chn);
}
atomic_set(&net_device->open_chn, 1);
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);

View File

@ -259,7 +259,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
struct net_device *src,
enum macvlan_mode mode)
{
const struct ethhdr *eth = eth_hdr(skb);
const struct ethhdr *eth = skb_eth_hdr(skb);
const struct macvlan_dev *vlan;
struct sk_buff *nskb;
unsigned int i;

View File

@ -511,7 +511,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev,
}
} else {
netdev_warn(dev->net,
"Failed to read stat ret = 0x%x", ret);
"Failed to read stat ret = %d", ret);
}
kfree(stats);
@ -2724,11 +2724,6 @@ static int lan78xx_stop(struct net_device *net)
return 0;
}
static int lan78xx_linearize(struct sk_buff *skb)
{
return skb_linearize(skb);
}
static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
struct sk_buff *skb, gfp_t flags)
{
@ -2740,8 +2735,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
return NULL;
}
if (lan78xx_linearize(skb) < 0)
if (skb_linearize(skb)) {
dev_kfree_skb_any(skb);
return NULL;
}
tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;

View File

@ -2542,7 +2542,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
ndst = &rt->dst;
skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
vni, md, flags, udp_sum);
@ -2582,7 +2582,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet);
err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),

View File

@ -953,59 +953,117 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
switch (*pos) {
case WLAN_EID_SUPP_RATES:
if (pos[1] > 32)
return;
sta_ptr->tdls_cap.rates_len = pos[1];
for (i = 0; i < pos[1]; i++)
sta_ptr->tdls_cap.rates[i] = pos[i + 2];
break;
case WLAN_EID_EXT_SUPP_RATES:
if (pos[1] > 32)
return;
basic = sta_ptr->tdls_cap.rates_len;
if (pos[1] > 32 - basic)
return;
for (i = 0; i < pos[1]; i++)
sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
sta_ptr->tdls_cap.rates_len += pos[1];
break;
case WLAN_EID_HT_CAPABILITY:
memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
return;
if (pos[1] != sizeof(struct ieee80211_ht_cap))
return;
/* copy the ie's value into ht_capb*/
memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
sizeof(struct ieee80211_ht_cap));
sta_ptr->is_11n_enabled = 1;
break;
case WLAN_EID_HT_OPERATION:
memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
if (pos > end -
sizeof(struct ieee80211_ht_operation) - 2)
return;
if (pos[1] != sizeof(struct ieee80211_ht_operation))
return;
/* copy the ie's value into ht_oper*/
memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
sizeof(struct ieee80211_ht_operation));
break;
case WLAN_EID_BSS_COEX_2040:
if (pos > end - 3)
return;
if (pos[1] != 1)
return;
sta_ptr->tdls_cap.coex_2040 = pos[2];
break;
case WLAN_EID_EXT_CAPABILITY:
if (pos > end - sizeof(struct ieee_types_header))
return;
if (pos[1] < sizeof(struct ieee_types_header))
return;
if (pos[1] > 8)
return;
memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
sizeof(struct ieee_types_header) +
min_t(u8, pos[1], 8));
break;
case WLAN_EID_RSN:
if (pos > end - sizeof(struct ieee_types_header))
return;
if (pos[1] < sizeof(struct ieee_types_header))
return;
if (pos[1] > IEEE_MAX_IE_SIZE -
sizeof(struct ieee_types_header))
return;
memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
sizeof(struct ieee_types_header) +
min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
sizeof(struct ieee_types_header)));
break;
case WLAN_EID_QOS_CAPA:
if (pos > end - 3)
return;
if (pos[1] != 1)
return;
sta_ptr->tdls_cap.qos_info = pos[2];
break;
case WLAN_EID_VHT_OPERATION:
if (priv->adapter->is_hw_11ac_capable)
memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
if (priv->adapter->is_hw_11ac_capable) {
if (pos > end -
sizeof(struct ieee80211_vht_operation) - 2)
return;
if (pos[1] !=
sizeof(struct ieee80211_vht_operation))
return;
/* copy the ie's value into vhtoper*/
memcpy(&sta_ptr->tdls_cap.vhtoper, pos + 2,
sizeof(struct ieee80211_vht_operation));
}
break;
case WLAN_EID_VHT_CAPABILITY:
if (priv->adapter->is_hw_11ac_capable) {
memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
if (pos > end -
sizeof(struct ieee80211_vht_cap) - 2)
return;
if (pos[1] != sizeof(struct ieee80211_vht_cap))
return;
/* copy the ie's value into vhtcap*/
memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
sizeof(struct ieee80211_vht_cap));
sta_ptr->is_11ac_enabled = 1;
}
break;
case WLAN_EID_AID:
if (priv->adapter->is_hw_11ac_capable)
if (priv->adapter->is_hw_11ac_capable) {
if (pos > end - 4)
return;
if (pos[1] != 2)
return;
sta_ptr->tdls_cap.aid =
get_unaligned_le16((pos + 2));
}
break;
default:
break;
}

View File

@ -815,7 +815,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
if (err) {
dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
err, &res_0->start);
goto out_cpuhp_err;
return err;
}
err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
@ -834,8 +834,6 @@ static int smmu_pmu_probe(struct platform_device *pdev)
out_unregister:
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
out_cpuhp_err:
put_cpu();
return err;
}

View File

@ -1088,60 +1088,52 @@ SSSF_PIN_DECL(AF15, GPIOV7, LPCSMI, SIG_DESC_SET(SCU434, 15));
#define AB7 176
SIG_EXPR_LIST_DECL_SESG(AB7, LAD0, LPC, SIG_DESC_SET(SCU434, 16),
SIG_DESC_CLEAR(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AB7, ESPID0, ESPI, SIG_DESC_SET(SCU434, 16),
SIG_DESC_SET(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AB7, ESPID0, ESPI, SIG_DESC_SET(SCU434, 16));
PIN_DECL_2(AB7, GPIOW0, LAD0, ESPID0);
#define AB8 177
SIG_EXPR_LIST_DECL_SESG(AB8, LAD1, LPC, SIG_DESC_SET(SCU434, 17),
SIG_DESC_CLEAR(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AB8, ESPID1, ESPI, SIG_DESC_SET(SCU434, 17),
SIG_DESC_SET(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AB8, ESPID1, ESPI, SIG_DESC_SET(SCU434, 17));
PIN_DECL_2(AB8, GPIOW1, LAD1, ESPID1);
#define AC8 178
SIG_EXPR_LIST_DECL_SESG(AC8, LAD2, LPC, SIG_DESC_SET(SCU434, 18),
SIG_DESC_CLEAR(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AC8, ESPID2, ESPI, SIG_DESC_SET(SCU434, 18),
SIG_DESC_SET(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AC8, ESPID2, ESPI, SIG_DESC_SET(SCU434, 18));
PIN_DECL_2(AC8, GPIOW2, LAD2, ESPID2);
#define AC7 179
SIG_EXPR_LIST_DECL_SESG(AC7, LAD3, LPC, SIG_DESC_SET(SCU434, 19),
SIG_DESC_CLEAR(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AC7, ESPID3, ESPI, SIG_DESC_SET(SCU434, 19),
SIG_DESC_SET(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AC7, ESPID3, ESPI, SIG_DESC_SET(SCU434, 19));
PIN_DECL_2(AC7, GPIOW3, LAD3, ESPID3);
#define AE7 180
SIG_EXPR_LIST_DECL_SESG(AE7, LCLK, LPC, SIG_DESC_SET(SCU434, 20),
SIG_DESC_CLEAR(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AE7, ESPICK, ESPI, SIG_DESC_SET(SCU434, 20),
SIG_DESC_SET(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AE7, ESPICK, ESPI, SIG_DESC_SET(SCU434, 20));
PIN_DECL_2(AE7, GPIOW4, LCLK, ESPICK);
#define AF7 181
SIG_EXPR_LIST_DECL_SESG(AF7, LFRAME, LPC, SIG_DESC_SET(SCU434, 21),
SIG_DESC_CLEAR(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AF7, ESPICS, ESPI, SIG_DESC_SET(SCU434, 21),
SIG_DESC_SET(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AF7, ESPICS, ESPI, SIG_DESC_SET(SCU434, 21));
PIN_DECL_2(AF7, GPIOW5, LFRAME, ESPICS);
#define AD7 182
SIG_EXPR_LIST_DECL_SESG(AD7, LSIRQ, LSIRQ, SIG_DESC_SET(SCU434, 22),
SIG_DESC_CLEAR(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AD7, ESPIALT, ESPIALT, SIG_DESC_SET(SCU434, 22),
SIG_DESC_SET(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AD7, ESPIALT, ESPIALT, SIG_DESC_SET(SCU434, 22));
PIN_DECL_2(AD7, GPIOW6, LSIRQ, ESPIALT);
FUNC_GROUP_DECL(LSIRQ, AD7);
FUNC_GROUP_DECL(ESPIALT, AD7);
#define AD8 183
SIG_EXPR_LIST_DECL_SESG(AD8, LPCRST, LPC, SIG_DESC_SET(SCU434, 23),
SIG_DESC_CLEAR(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AD8, ESPIRST, ESPI, SIG_DESC_SET(SCU434, 23),
SIG_DESC_SET(SCU510, 6));
SIG_EXPR_LIST_DECL_SESG(AD8, ESPIRST, ESPI, SIG_DESC_SET(SCU434, 23));
PIN_DECL_2(AD8, GPIOW7, LPCRST, ESPIRST);
FUNC_GROUP_DECL(LPC, AB7, AB8, AC8, AC7, AE7, AF7, AD8);

View File

@ -85,7 +85,7 @@ bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned pin)
const struct pinmux_ops *ops = pctldev->desc->pmxops;
/* Can't inspect pin, assume it can be used */
if (!desc)
if (!desc || !ops)
return true;
if (ops->strict && desc->mux_usecount)

View File

@ -95,7 +95,7 @@ static struct gpiod_lookup_table gpios_led_table = {
NULL, 1, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3,
NULL, 2, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_REG_SIMSWAP,
GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_SIMSWAP,
NULL, 3, GPIO_ACTIVE_LOW),
}
};

View File

@ -1938,8 +1938,8 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
regulator = create_regulator(rdev, dev, id);
if (regulator == NULL) {
regulator = ERR_PTR(-ENOMEM);
put_device(&rdev->dev);
module_put(rdev->owner);
put_device(&rdev->dev);
return regulator;
}
@ -2063,13 +2063,13 @@ static void _regulator_put(struct regulator *regulator)
rdev->open_count--;
rdev->exclusive = 0;
put_device(&rdev->dev);
regulator_unlock(rdev);
kfree_const(regulator->supply_name);
kfree(regulator);
module_put(rdev->owner);
put_device(&rdev->dev);
}
/**
@ -4996,6 +4996,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
struct regulator_dev *rdev;
bool dangling_cfg_gpiod = false;
bool dangling_of_gpiod = false;
bool reg_device_fail = false;
struct device *dev;
int ret, i;
@ -5181,7 +5182,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
dev_set_drvdata(&rdev->dev, rdev);
ret = device_register(&rdev->dev);
if (ret != 0) {
put_device(&rdev->dev);
reg_device_fail = true;
goto unset_supplies;
}
@ -5212,7 +5213,10 @@ wash:
clean:
if (dangling_of_gpiod)
gpiod_put(config->ena_gpiod);
kfree(rdev);
if (reg_device_fail)
put_device(&rdev->dev);
else
kfree(rdev);
kfree(config);
rinse:
if (dangling_cfg_gpiod)

Some files were not shown because too many files have changed in this diff Show More