This is the 5.4.54 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl8hMPEACgkQONu9yGCS
 aT4XTw/+KgIRJRuJb3FKXbV/ranj4gSLiOoDq86W7L6SNYqDoz/sqMmGhe0ttiu8
 38xCl6YrPQQqA91w9fiy6Y6DcY15XRtUTWIbLtVO5GPwTNiZHskfWZ+KwjNcM1B9
 q/fSR/791nJ1lpNo6XjPws7sbVUNdX0UbTIlLCEZayoxvHdar6u/XmNNUTPwbY01
 kD5EmVVv3tbrkhiDQ+MXqEfsF7cae85SbkUVGRrGm7uL/s6PB2NwRGEn1XbN/ZKJ
 jmuRc0T5nwbB9rh2TV8P1c8PP3iPrpRdWFbaTGhcxCkJh2gzwqVnRBIfpWNQo2Fz
 LuLj5g3BfzSx5xQcWkjZrcn72zobZLF9Bvx6NIaYm1Ly3vtR9mjYmqOcRtv3T0sJ
 xi+ObxI73dZhvv0yeJTFbAquQ3Js+ohzNpSr8zJcJp4WX7XDTd/dE+u3uOmSBMoE
 UlD2cWf/HIaYZcLLIOqSIM0uHmfTTc4KTgR2bMXzwEMhtResJpUG1BDa8/MOCDxX
 7UhgRmrZ99faOjNy5jrdI4x5MwTMQs/znMtZ8Y6OlE9RZIogHON8/mTRGqLV0cRF
 Drw6XTfS6QOu55vvddqjmie4J8/aFvhWVw4A90n0DmXH8fY+XqFI8mCguBQw/mW3
 xm4lSSCgRFuQ2YFctXjgwD5MBaCDKAW2rp6iwZmC8sONYeoVCfM=
 =IZ2W
 -----END PGP SIGNATURE-----

Merge 5.4.54 into android11-5.4

Changes in 5.4.54
	soc: qcom: rpmh: Dirt can only make you dirtier, not cleaner
	gpio: arizona: handle pm_runtime_get_sync failure case
	gpio: arizona: put pm_runtime in case of failure
	pinctrl: amd: fix npins for uart0 in kerncz_groups
	mac80211: allow rx of mesh eapol frames with default rx key
	scsi: scsi_transport_spi: Fix function pointer check
	xtensa: fix __sync_fetch_and_{and,or}_4 declarations
	xtensa: update *pos in cpuinfo_op.next
	scsi: mpt3sas: Fix unlock imbalance
	drivers/net/wan/lapbether: Fixed the value of hard_header_len
	ALSA: hda/hdmi: fix failures at PCM open on Intel ICL and later
	net: sky2: initialize return of gm_phy_read
	drm/nouveau/i2c/g94-: increase NV_PMGR_DP_AUXCTL_TRANSACTREQ timeout
	scsi: mpt3sas: Fix error returns in BRM_status_show
	scsi: dh: Add Fujitsu device to devinfo and dh lists
	dm: use bio_uninit instead of bio_disassociate_blkg
	drivers/firmware/psci: Fix memory leakage in alloc_init_cpu_groups()
	fuse: fix weird page warning
	irqdomain/treewide: Keep firmware node unconditionally allocated
	ARM: dts: imx6qdl-gw551x: Do not use 'simple-audio-card,dai-link'
	ARM: dts: imx6qdl-gw551x: fix audio SSI
	dmabuf: use spinlock to access dmabuf->name
	drm/amd/display: Check DMCU Exists Before Loading
	SUNRPC reverting d03727b248d0 ("NFSv4 fix CLOSE not waiting for direct IO compeletion")
	btrfs: reloc: fix reloc root leak and NULL pointer dereference
	btrfs: reloc: clear DEAD_RELOC_TREE bit for orphan roots to prevent runaway balance
	uprobes: Change handle_swbp() to send SIGTRAP with si_code=SI_KERNEL, to fix GDB regression
	ALSA: hda/realtek: Fixed ALC298 sound bug by adding quirk for Samsung Notebook Pen S
	ALSA: info: Drop WARN_ON() from buffer NULL sanity check
	ASoC: rt5670: Correct RT5670_LDO_SEL_MASK
	btrfs: fix double free on ulist after backref resolution failure
	btrfs: fix mount failure caused by race with umount
	btrfs: fix page leaks after failure to lock page for delalloc
	bnxt_en: Fix race when modifying pause settings.
	bnxt_en: Fix completion ring sizing with TPA enabled.
	fpga: dfl: pci: reduce the scope of variable 'ret'
	fpga: dfl: fix bug in port reset handshake
	hippi: Fix a size used in a 'pci_free_consistent()' in an error handling path
	vsock/virtio: annotate 'the_virtio_vsock' RCU pointer
	ax88172a: fix ax88172a_unbind() failures
	RDMA/mlx5: Use xa_lock_irq when access to SRQ table
	ASoC: Intel: bytcht_es8316: Add missed put_device()
	net: dp83640: fix SIOCSHWTSTAMP to update the struct with actual configuration
	ieee802154: fix one possible memleak in adf7242_probe
	drm: sun4i: hdmi: Fix inverted HPD result
	net: smc91x: Fix possible memory leak in smc_drv_probe()
	bonding: check error value of register_netdevice() immediately
	mlxsw: destroy workqueue when trap_register in mlxsw_emad_init
	ionic: use offset for ethtool regs data
	ionic: fix up filter locks and debug msgs
	net: ag71xx: add missed clk_disable_unprepare in error path of probe
	net: hns3: fix error handling for desc filling
	net: dsa: microchip: call phy_remove_link_mode during probe
	netdevsim: fix unbalaced locking in nsim_create()
	qed: suppress "don't support RoCE & iWARP" flooding on HW init
	qed: suppress false-positives interrupt error messages on HW init
	ipvs: fix the connection sync failed in some cases
	net: ethernet: ave: Fix error returns in ave_init
	Revert "PCI/PM: Assume ports without DLL Link Active train links in 100 ms"
	nfsd4: fix NULL dereference in nfsd/clients display code
	enetc: Remove the mdio bus on PF probe bailout
	i2c: rcar: always clear ICSAR to avoid side effects
	i2c: i2c-qcom-geni: Fix DMA transfer race
	bonding: check return value of register_netdevice() in bond_newlink()
	geneve: fix an uninitialized value in geneve_changelink()
	serial: exar: Fix GPIO configuration for Sealevel cards based on XR17V35X
	scripts/decode_stacktrace: strip basepath from all paths
	scripts/gdb: fix lx-symbols 'gdb.error' while loading modules
	HID: i2c-hid: add Mediacom FlexBook edge13 to descriptor override
	HID: alps: support devices with report id 2
	HID: steam: fixes race in handling device list.
	HID: apple: Disable Fn-key key-re-mapping on clone keyboards
	dmaengine: tegra210-adma: Fix runtime PM imbalance on error
	Input: add `SW_MACHINE_COVER`
	ARM: dts: n900: remove mmc1 card detect gpio
	spi: mediatek: use correct SPI_CFG2_REG MACRO
	regmap: dev_get_regmap_match(): fix string comparison
	hwmon: (aspeed-pwm-tacho) Avoid possible buffer overflow
	dmaengine: fsl-edma: fix wrong tcd endianness for big-endian cpu
	dmaengine: ioat setting ioat timeout as module parameter
	Input: synaptics - enable InterTouch for ThinkPad X1E 1st gen
	Input: elan_i2c - only increment wakeup count on touch
	usb: dwc3: pci: add support for the Intel Tiger Lake PCH -H variant
	usb: dwc3: pci: add support for the Intel Jasper Lake
	usb: gadget: udc: gr_udc: fix memleak on error handling path in gr_ep_init()
	usb: cdns3: ep0: fix some endian issues
	usb: cdns3: trace: fix some endian issues
	hwmon: (adm1275) Make sure we are reading enough data for different chips
	drm/amdgpu/gfx10: fix race condition for kiq
	drm/amdgpu: fix preemption unit test
	hwmon: (nct6775) Accept PECI Calibration as temperature source for NCT6798D
	platform/x86: ISST: Add new PCI device ids
	platform/x86: asus-wmi: allow BAT1 battery name
	hwmon: (scmi) Fix potential buffer overflow in scmi_hwmon_probe()
	ALSA: hda/realtek - fixup for yet another Intel reference board
	drivers/perf: Fix kernel panic when rmmod PMU modules during perf sampling
	arm64: Use test_tsk_thread_flag() for checking TIF_SINGLESTEP
	x86: math-emu: Fix up 'cmp' insn for clang ias
	asm-generic/mmiowb: Allow mmiowb_set_pending() when preemptible()
	drivers/perf: Prevent forced unbinding of PMU drivers
	RISC-V: Upgrade smp_mb__after_spinlock() to iorw,iorw
	binder: Don't use mmput() from shrinker function.
	usb: xhci-mtk: fix the failure of bandwidth allocation
	usb: xhci: Fix ASM2142/ASM3142 DMA addressing
	Revert "cifs: Fix the target file was deleted when rename failed."
	iwlwifi: mvm: don't call iwl_mvm_free_inactive_queue() under RCU
	tty: xilinx_uartps: Really fix id assignment
	staging: wlan-ng: properly check endpoint types
	staging: comedi: addi_apci_1032: check INSN_CONFIG_DIGITAL_TRIG shift
	staging: comedi: ni_6527: fix INSN_CONFIG_DIGITAL_TRIG support
	staging: comedi: addi_apci_1500: check INSN_CONFIG_DIGITAL_TRIG shift
	staging: comedi: addi_apci_1564: check INSN_CONFIG_DIGITAL_TRIG shift
	serial: tegra: fix CREAD handling for PIO
	serial: 8250: fix null-ptr-deref in serial8250_start_tx()
	serial: 8250_mtk: Fix high-speed baud rates clamping
	/dev/mem: Add missing memory barriers for devmem_inode
	fbdev: Detect integer underflow at "struct fbcon_ops"->clear_margins.
	vt: Reject zero-sized screen buffer size.
	Makefile: Fix GCC_TOOLCHAIN_DIR prefix for Clang cross compilation
	mm/mmap.c: close race between munmap() and expand_upwards()/downwards()
	mm/memcg: fix refcount error while moving and swapping
	mm: memcg/slab: fix memory leak at non-root kmem_cache destroy
	khugepaged: fix null-pointer dereference due to race
	io-mapping: indicate mapping failure
	mmc: sdhci-of-aspeed: Fix clock divider calculation
	drm/amdgpu: Fix NULL dereference in dpm sysfs handlers
	drm/amd/powerplay: fix a crash when overclocking Vega M
	parisc: Add atomic64_set_release() define to avoid CPU soft lockups
	x86, vmlinux.lds: Page-align end of ..page_aligned sections
	ASoC: rt5670: Add new gpio1_is_ext_spk_en quirk and enable it on the Lenovo Miix 2 10
	ASoC: qcom: Drop HAS_DMA dependency to fix link failure
	ASoC: topology: fix kernel oops on route addition error
	ASoC: topology: fix tlvs in error handling for widget_dmixer
	dm integrity: fix integrity recalculation that is improperly skipped
	ath9k: Fix general protection fault in ath9k_hif_usb_rx_cb
	ath9k: Fix regression with Atheros 9271
	Linux 5.4.54

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ic2a25ecd02cfc4a44ec53c73e200b72cd7d930ba
This commit is contained in:
Greg Kroah-Hartman 2020-07-29 13:27:01 +02:00
commit 261a54f37d
167 changed files with 920 additions and 407 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 53
SUBLEVEL = 54
EXTRAVERSION =
NAME = Kleptomaniac Octopus
@ -532,7 +532,7 @@ ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_FLAGS)), y)
$(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?")
endif
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif
ifneq ($(GCC_TOOLCHAIN),)

View File

@ -105,19 +105,16 @@
sound-digital {
compatible = "simple-audio-card";
simple-audio-card,name = "tda1997x-audio";
simple-audio-card,format = "i2s";
simple-audio-card,bitclock-master = <&sound_codec>;
simple-audio-card,frame-master = <&sound_codec>;
simple-audio-card,dai-link@0 {
format = "i2s";
sound_cpu: simple-audio-card,cpu {
sound-dai = <&ssi1>;
};
cpu {
sound-dai = <&ssi2>;
};
codec {
bitclock-master;
frame-master;
sound-dai = <&hdmi_receiver>;
};
sound_codec: simple-audio-card,codec {
sound-dai = <&hdmi_receiver>;
};
};
};

View File

@ -105,6 +105,14 @@
linux,code = <SW_FRONT_PROXIMITY>;
linux,can-disable;
};
machine_cover {
label = "Machine Cover";
gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */
linux,input-type = <EV_SW>;
linux,code = <SW_MACHINE_COVER>;
linux,can-disable;
};
};
isp1707: isp1707 {
@ -814,10 +822,6 @@
pinctrl-0 = <&mmc1_pins>;
vmmc-supply = <&vmmc1>;
bus-width = <4>;
/* For debugging, it is often good idea to remove this GPIO.
It means you can remove back cover (to reboot by removing
battery) and still use the MMC card. */
cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */
};
/* most boards use vaux3, only some old versions use vmmc2 instead */

View File

@ -396,14 +396,14 @@ void user_rewind_single_step(struct task_struct *task)
* If single step is active for this thread, then set SPSR.SS
* to 1 to avoid returning to the active-pending state.
*/
if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
set_regs_spsr_ss(task_pt_regs(task));
}
NOKPROBE_SYMBOL(user_rewind_single_step);
void user_fastforward_single_step(struct task_struct *task)
{
if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
clear_regs_spsr_ss(task_pt_regs(task));
}

View File

@ -444,9 +444,10 @@ static int bridge_probe(struct platform_device *pdev)
return -ENOMEM;
domain = irq_domain_create_hierarchy(parent, 0, 8, fn,
&bridge_domain_ops, NULL);
irq_domain_free_fwnode(fn);
if (!domain)
if (!domain) {
irq_domain_free_fwnode(fn);
return -ENOMEM;
}
pci_set_flags(PCI_PROBE_ONLY);

View File

@ -212,6 +212,8 @@ atomic64_set(atomic64_t *v, s64 i)
_atomic_spin_unlock_irqrestore(v, flags);
}
#define atomic64_set_release(v, i) atomic64_set((v), (i))
static __inline__ s64
atomic64_read(const atomic64_t *v)
{

View File

@ -58,8 +58,16 @@ do { \
* The AQ/RL pair provides a RCpc critical section, but there's not really any
* way we can take advantage of that here because the ordering is only enforced
* on that one lock. Thus, we're just doing a full fence.
*
* Since we allow writeX to be called from preemptive regions we need at least
* an "o" in the predecessor set to ensure device writes are visible before the
* task is marked as available for scheduling on a new hart. While I don't see
* any concrete reason we need a full IO fence, it seems safer to just upgrade
* this in order to avoid any IO crossing a scheduling boundary. In both
* instances the scheduler pairs this with an mb(), so nothing is necessary on
* the new hart.
*/
#define smp_mb__after_spinlock() RISCV_FENCE(rw,rw)
#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
#include <asm-generic/barrier.h>

View File

@ -2329,12 +2329,12 @@ static int mp_irqdomain_create(int ioapic)
ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops,
(void *)(long)ioapic);
/* Release fw handle if it was allocated above */
if (!cfg->dev)
irq_domain_free_fwnode(fn);
if (!ip->irqdomain)
if (!ip->irqdomain) {
/* Release fw handle if it was allocated above */
if (!cfg->dev)
irq_domain_free_fwnode(fn);
return -ENOMEM;
}
ip->irqdomain->parent = parent;

View File

@ -262,12 +262,13 @@ void __init arch_init_msi_domain(struct irq_domain *parent)
msi_default_domain =
pci_msi_create_irq_domain(fn, &pci_msi_domain_info,
parent);
irq_domain_free_fwnode(fn);
}
if (!msi_default_domain)
if (!msi_default_domain) {
irq_domain_free_fwnode(fn);
pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n");
else
} else {
msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
}
}
#ifdef CONFIG_IRQ_REMAP
@ -300,7 +301,8 @@ struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent,
if (!fn)
return NULL;
d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent);
irq_domain_free_fwnode(fn);
if (!d)
irq_domain_free_fwnode(fn);
return d;
}
#endif
@ -363,7 +365,8 @@ static struct irq_domain *dmar_get_irq_domain(void)
if (fn) {
dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info,
x86_vector_domain);
irq_domain_free_fwnode(fn);
if (!dmar_domain)
irq_domain_free_fwnode(fn);
}
out:
mutex_unlock(&dmar_lock);
@ -488,7 +491,10 @@ struct irq_domain *hpet_create_irq_domain(int hpet_id)
}
d = msi_create_irq_domain(fn, domain_info, parent);
irq_domain_free_fwnode(fn);
if (!d) {
irq_domain_free_fwnode(fn);
kfree(domain_info);
}
return d;
}

View File

@ -701,7 +701,6 @@ int __init arch_early_irq_init(void)
x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
NULL);
BUG_ON(x86_vector_domain == NULL);
irq_domain_free_fwnode(fn);
irq_set_default_host(x86_vector_domain);
arch_init_msi_domain(x86_vector_domain);

View File

@ -368,6 +368,7 @@ SECTIONS
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
__bss_start = .;
*(.bss..page_aligned)
. = ALIGN(PAGE_SIZE);
*(BSS_MAIN)
BSS_DECRYPTED
. = ALIGN(PAGE_SIZE);

View File

@ -209,7 +209,7 @@ sqrt_stage_2_finish:
#ifdef PARANOID
/* It should be possible to get here only if the arg is ffff....ffff */
cmp $0xffffffff,FPU_fsqrt_arg_1
cmpl $0xffffffff,FPU_fsqrt_arg_1
jnz sqrt_stage_2_error
#endif /* PARANOID */

View File

@ -167,9 +167,10 @@ static struct irq_domain *uv_get_irq_domain(void)
goto out;
uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL);
irq_domain_free_fwnode(fn);
if (uv_domain)
uv_domain->parent = x86_vector_domain;
else
irq_domain_free_fwnode(fn);
out:
mutex_unlock(&uv_lock);

View File

@ -716,7 +716,8 @@ c_start(struct seq_file *f, loff_t *pos)
static void *
c_next(struct seq_file *f, void *v, loff_t *pos)
{
return NULL;
++*pos;
return c_start(f, pos);
}
static void

View File

@ -87,13 +87,13 @@ void __xtensa_libgcc_window_spill(void)
}
EXPORT_SYMBOL(__xtensa_libgcc_window_spill);
unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v)
unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v)
{
BUG();
}
EXPORT_SYMBOL(__sync_fetch_and_and_4);
unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v)
{
BUG();
}

View File

@ -948,7 +948,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
up_read(&mm->mmap_sem);
mmput(mm);
mmput_async(mm);
trace_binder_unmap_kernel_start(alloc, index);

View File

@ -1363,7 +1363,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
/* If the user didn't specify a name match any */
if (data)
return (*r)->name == data;
return !strcmp((*r)->name, data);
else
return 1;
}

View File

@ -814,7 +814,8 @@ static struct inode *devmem_inode;
#ifdef CONFIG_IO_STRICT_DEVMEM
void revoke_devmem(struct resource *res)
{
struct inode *inode = READ_ONCE(devmem_inode);
/* pairs with smp_store_release() in devmem_init_inode() */
struct inode *inode = smp_load_acquire(&devmem_inode);
/*
* Check that the initialization has completed. Losing the race
@ -1028,8 +1029,11 @@ static int devmem_init_inode(void)
return rc;
}
/* publish /dev/mem initialized */
WRITE_ONCE(devmem_inode, inode);
/*
* Publish /dev/mem initialized.
* Pairs with smp_load_acquire() in revoke_devmem().
*/
smp_store_release(&devmem_inode, inode);
return 0;
}

View File

@ -45,10 +45,10 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
size_t ret = 0;
dmabuf = dentry->d_fsdata;
mutex_lock(&dmabuf->lock);
spin_lock(&dmabuf->name_lock);
if (dmabuf->name)
ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
mutex_unlock(&dmabuf->lock);
spin_unlock(&dmabuf->name_lock);
return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
dentry->d_name.name, ret > 0 ? name : "");
@ -338,8 +338,10 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
kfree(name);
goto out_unlock;
}
spin_lock(&dmabuf->name_lock);
kfree(dmabuf->name);
dmabuf->name = name;
spin_unlock(&dmabuf->name_lock);
out_unlock:
mutex_unlock(&dmabuf->lock);
@ -402,10 +404,10 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
/* Don't count the temporary reference taken inside procfs seq_show */
seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
mutex_lock(&dmabuf->lock);
spin_lock(&dmabuf->name_lock);
if (dmabuf->name)
seq_printf(m, "name:\t%s\n", dmabuf->name);
mutex_unlock(&dmabuf->lock);
spin_unlock(&dmabuf->name_lock);
}
static const struct file_operations dma_buf_fops = {
@ -537,6 +539,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->size = exp_info->size;
dmabuf->exp_name = exp_info->exp_name;
dmabuf->owner = exp_info->owner;
spin_lock_init(&dmabuf->name_lock);
init_waitqueue_head(&dmabuf->poll);
dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;

View File

@ -347,26 +347,28 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
/*
* TCD parameters are stored in struct fsl_edma_hw_tcd in little
* endian format. However, we need to load the TCD registers in
* big- or little-endian obeying the eDMA engine model endian.
* big- or little-endian obeying the eDMA engine model endian,
* and this is performed from specific edma_write functions
*/
edma_writew(edma, 0, &regs->tcd[ch].csr);
edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr);
edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr);
edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr);
edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff);
edma_writel(edma, (s32)tcd->saddr, &regs->tcd[ch].saddr);
edma_writel(edma, (s32)tcd->daddr, &regs->tcd[ch].daddr);
edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes);
edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast);
edma_writew(edma, (s16)tcd->attr, &regs->tcd[ch].attr);
edma_writew(edma, tcd->soff, &regs->tcd[ch].soff);
edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer);
edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter);
edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff);
edma_writel(edma, (s32)tcd->nbytes, &regs->tcd[ch].nbytes);
edma_writel(edma, (s32)tcd->slast, &regs->tcd[ch].slast);
edma_writel(edma, le32_to_cpu(tcd->dlast_sga),
edma_writew(edma, (s16)tcd->citer, &regs->tcd[ch].citer);
edma_writew(edma, (s16)tcd->biter, &regs->tcd[ch].biter);
edma_writew(edma, (s16)tcd->doff, &regs->tcd[ch].doff);
edma_writel(edma, (s32)tcd->dlast_sga,
&regs->tcd[ch].dlast_sga);
edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr);
edma_writew(edma, (s16)tcd->csr, &regs->tcd[ch].csr);
}
static inline

View File

@ -26,6 +26,18 @@
#include "../dmaengine.h"
int completion_timeout = 200;
module_param(completion_timeout, int, 0644);
MODULE_PARM_DESC(completion_timeout,
"set ioat completion timeout [msec] (default 200 [msec])");
int idle_timeout = 2000;
module_param(idle_timeout, int, 0644);
MODULE_PARM_DESC(idle_timeout,
"set ioat idel timeout [msec] (default 2000 [msec])");
#define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
#define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
static char *chanerr_str[] = {
"DMA Transfer Source Address Error",
"DMA Transfer Destination Address Error",

View File

@ -99,8 +99,6 @@ struct ioatdma_chan {
#define IOAT_RUN 5
#define IOAT_CHAN_ACTIVE 6
struct timer_list timer;
#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
#define RESET_DELAY msecs_to_jiffies(100)
struct ioatdma_device *ioat_dma;
dma_addr_t completion_dma;

View File

@ -658,6 +658,7 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc)
ret = pm_runtime_get_sync(tdc2dev(tdc));
if (ret < 0) {
pm_runtime_put_noidle(tdc2dev(tdc));
free_irq(tdc->irq, tdc);
return ret;
}
@ -869,8 +870,10 @@ static int tegra_adma_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
if (ret < 0) {
pm_runtime_put_noidle(&pdev->dev);
goto rpm_disable;
}
ret = tegra_adma_init(tdma);
if (ret)

View File

@ -157,8 +157,10 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
GFP_KERNEL);
if (!cpu_groups)
if (!cpu_groups) {
free_cpumask_var(tmp);
return -ENOMEM;
}
cpumask_copy(tmp, cpu_online_mask);
@ -167,6 +169,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
topology_core_cpumask(cpumask_any(tmp));
if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
free_cpumask_var(tmp);
free_cpu_groups(num_groups, &cpu_groups);
return -ENOMEM;
}

View File

@ -83,7 +83,8 @@ int __afu_port_disable(struct platform_device *pdev)
* on this port and minimum soft reset pulse width has elapsed.
* Driver polls port_soft_reset_ack to determine if reset done by HW.
*/
if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
v & PORT_CTRL_SFTRST_ACK,
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
dev_err(&pdev->dev, "timeout, fail to reset device\n");
return -ETIMEDOUT;

View File

@ -227,7 +227,6 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
{
struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
struct dfl_fpga_cdev *cdev = drvdata->cdev;
int ret = 0;
if (!num_vfs) {
/*
@ -239,6 +238,8 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
dfl_fpga_cdev_config_ports_pf(cdev);
} else {
int ret;
/*
* before enable SRIOV, put released ports into VF access mode
* first of all.

View File

@ -64,6 +64,7 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
ret = pm_runtime_get_sync(chip->parent);
if (ret < 0) {
dev_err(chip->parent, "Failed to resume: %d\n", ret);
pm_runtime_put_autosuspend(chip->parent);
return ret;
}
@ -72,12 +73,15 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
if (ret < 0) {
dev_err(chip->parent, "Failed to drop cache: %d\n",
ret);
pm_runtime_put_autosuspend(chip->parent);
return ret;
}
ret = regmap_read(arizona->regmap, reg, &val);
if (ret < 0)
if (ret < 0) {
pm_runtime_put_autosuspend(chip->parent);
return ret;
}
pm_runtime_mark_last_busy(chip->parent);
pm_runtime_put_autosuspend(chip->parent);
@ -106,6 +110,7 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip,
ret = pm_runtime_get_sync(chip->parent);
if (ret < 0) {
dev_err(chip->parent, "Failed to resume: %d\n", ret);
pm_runtime_put(chip->parent);
return ret;
}
}

View File

@ -990,27 +990,37 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
{
struct amdgpu_job *job;
struct drm_sched_job *s_job;
struct drm_sched_job *s_job, *tmp;
uint32_t preempt_seq;
struct dma_fence *fence, **ptr;
struct amdgpu_fence_driver *drv = &ring->fence_drv;
struct drm_gpu_scheduler *sched = &ring->sched;
bool preempted = true;
if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
return;
preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
if (preempt_seq <= atomic_read(&drv->last_seq))
return;
if (preempt_seq <= atomic_read(&drv->last_seq)) {
preempted = false;
goto no_preempt;
}
preempt_seq &= drv->num_fences_mask;
ptr = &drv->fences[preempt_seq];
fence = rcu_dereference_protected(*ptr, 1);
no_preempt:
spin_lock(&sched->job_list_lock);
list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
/* remove job from ring_mirror_list */
list_del_init(&s_job->node);
sched->ops->free_job(s_job);
continue;
}
job = to_amdgpu_job(s_job);
if (job->fence == fence)
if (preempted && job->fence == fence)
/* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED;
}

View File

@ -691,8 +691,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
while (isspace(*++tmp_str));
while (tmp_str[0]) {
sub_str = strsep(&tmp_str, delimiter);
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@ -883,8 +882,7 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
memcpy(buf_cpy, buf, bytes);
buf_cpy[bytes] = '\0';
tmp = buf_cpy;
while (tmp[0]) {
sub_str = strsep(&tmp, delimiter);
while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
if (ret)
@ -1300,8 +1298,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
while (tmp_str[0]) {
sub_str = strsep(&tmp_str, delimiter);
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret) {
count = -EINVAL;

View File

@ -4683,12 +4683,17 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size))
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
spin_unlock_irqrestore(&kiq->ring_lock, flags);
return -ENOMEM;
}
/* assert preemption condition */
amdgpu_ring_set_preempt_cond_exec(ring, false);
@ -4699,6 +4704,8 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
++ring->trail_seq);
amdgpu_ring_commit(kiq_ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
/* poll the trailing fence */
for (i = 0; i < adev->usec_timeout; i++) {
if (ring->trail_seq ==

View File

@ -928,9 +928,14 @@ static int dm_late_init(void *handle)
struct dmcu_iram_parameters params;
unsigned int linear_lut[16];
int i;
struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
struct dmcu *dmcu = NULL;
bool ret;
if (!adev->dm.fw_dmcu)
return detect_mst_link_for_all_connectors(adev->ddev);
dmcu = adev->dm.dc->res_pool->dmcu;
for (i = 0; i < 16; i++)
linear_lut[i] = 0xFFFF * i / 15;

View File

@ -642,9 +642,6 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
/* sclk is bigger than max sclk in the dependence table */
*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
(dep_table->entries[i - 1].vddc -
(uint16_t)VDDC_VDDCI_DELTA));
if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
*voltage |= (data->vbios_boot_state.vddci_bootup_value *
@ -652,8 +649,13 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
else if (dep_table->entries[i - 1].vddci)
*voltage |= (dep_table->entries[i - 1].vddci *
VOLTAGE_SCALE) << VDDC_SHIFT;
else
else {
vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
(dep_table->entries[i - 1].vddc -
(uint16_t)VDDC_VDDCI_DELTA));
*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
}
if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;

View File

@ -118,10 +118,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (retries)
udelay(400);
/* transaction request, wait up to 1ms for it to complete */
/* transaction request, wait up to 2ms for it to complete */
nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl);
timeout = 1000;
timeout = 2000;
do {
ctrl = nvkm_rd32(device, 0x00e4e4 + base);
udelay(1);

View File

@ -118,10 +118,10 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (retries)
udelay(400);
/* transaction request, wait up to 1ms for it to complete */
/* transaction request, wait up to 2ms for it to complete */
nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl);
timeout = 1000;
timeout = 2000;
do {
ctrl = nvkm_rd32(device, 0x00d954 + base);
udelay(1);

View File

@ -263,7 +263,7 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
unsigned long reg;
reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
if (reg & SUN4I_HDMI_HPD_HIGH) {
if (!(reg & SUN4I_HDMI_HPD_HIGH)) {
cec_phys_addr_invalidate(hdmi->cec_adap);
return connector_status_disconnected;
}

View File

@ -25,6 +25,7 @@
#define U1_MOUSE_REPORT_ID 0x01 /* Mouse data ReportID */
#define U1_ABSOLUTE_REPORT_ID 0x03 /* Absolute data ReportID */
#define U1_ABSOLUTE_REPORT_ID_SECD 0x02 /* FW-PTP Absolute data ReportID */
#define U1_FEATURE_REPORT_ID 0x05 /* Feature ReportID */
#define U1_SP_ABSOLUTE_REPORT_ID 0x06 /* Feature ReportID */
@ -368,6 +369,7 @@ static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size)
case U1_FEATURE_REPORT_ID:
break;
case U1_ABSOLUTE_REPORT_ID:
case U1_ABSOLUTE_REPORT_ID_SECD:
for (i = 0; i < hdata->max_fingers; i++) {
u8 *contact = &data[i * 5];

View File

@ -54,6 +54,7 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
struct apple_sc {
unsigned long quirks;
unsigned int fn_on;
unsigned int fn_found;
DECLARE_BITMAP(pressed_numlock, KEY_CNT);
};
@ -339,12 +340,15 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
struct apple_sc *asc = hid_get_drvdata(hdev);
if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
usage->hid == (HID_UP_MSVENDOR | 0x0003) ||
usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) {
/* The fn key on Apple USB keyboards */
set_bit(EV_REP, hi->input->evbit);
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
asc->fn_found = true;
apple_setup_input(hi->input);
return 1;
}
@ -371,6 +375,19 @@ static int apple_input_mapped(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
static int apple_input_configured(struct hid_device *hdev,
struct hid_input *hidinput)
{
struct apple_sc *asc = hid_get_drvdata(hdev);
if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) {
hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n");
asc->quirks = 0;
}
return 0;
}
static int apple_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@ -585,6 +602,7 @@ static struct hid_driver apple_driver = {
.event = apple_event,
.input_mapping = apple_input_mapping,
.input_mapped = apple_input_mapped,
.input_configured = apple_input_configured,
};
module_hid_driver(apple_driver);

View File

@ -526,7 +526,8 @@ static int steam_register(struct steam_device *steam)
steam_battery_register(steam);
mutex_lock(&steam_devices_lock);
list_add(&steam->list, &steam_devices);
if (list_empty(&steam->list))
list_add(&steam->list, &steam_devices);
mutex_unlock(&steam_devices_lock);
}
@ -552,7 +553,7 @@ static void steam_unregister(struct steam_device *steam)
hid_info(steam->hdev, "Steam Controller '%s' disconnected",
steam->serial_no);
mutex_lock(&steam_devices_lock);
list_del(&steam->list);
list_del_init(&steam->list);
mutex_unlock(&steam_devices_lock);
steam->serial_no[0] = 0;
}
@ -738,6 +739,7 @@ static int steam_probe(struct hid_device *hdev,
mutex_init(&steam->mutex);
steam->quirks = id->driver_data;
INIT_WORK(&steam->work_connect, steam_work_connect_cb);
INIT_LIST_HEAD(&steam->list);
steam->client_hdev = steam_create_client_hid(hdev);
if (IS_ERR(steam->client_hdev)) {

View File

@ -373,6 +373,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
},
.driver_data = (void *)&sipodev_desc
},
{
.ident = "Mediacom FlexBook edge 13",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook_edge13-M-FBE13"),
},
.driver_data = (void *)&sipodev_desc
},
{
.ident = "Odys Winbook 13",
.matches = {

View File

@ -851,6 +851,8 @@ static int aspeed_create_fan(struct device *dev,
ret = of_property_read_u32(child, "reg", &pwm_port);
if (ret)
return ret;
if (pwm_port >= ARRAY_SIZE(pwm_port_params))
return -EINVAL;
aspeed_create_pwm_port(priv, (u8)pwm_port);
ret = of_property_count_u8_elems(child, "cooling-levels");

View File

@ -786,13 +786,13 @@ static const char *const nct6798_temp_label[] = {
"Agent1 Dimm1",
"BYTE_TEMP0",
"BYTE_TEMP1",
"",
"",
"PECI Agent 0 Calibration", /* undocumented */
"PECI Agent 1 Calibration", /* undocumented */
"",
"Virtual_TEMP"
};
#define NCT6798_TEMP_MASK 0x8fff0ffe
#define NCT6798_TEMP_MASK 0xbfff0ffe
#define NCT6798_VIRT_TEMP_MASK 0x80000c00
/* NCT6102D/NCT6106D specific data */

View File

@ -454,6 +454,7 @@ MODULE_DEVICE_TABLE(i2c, adm1275_id);
static int adm1275_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
s32 (*config_read_fn)(const struct i2c_client *client, u8 reg);
u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
int config, device_config;
int ret;
@ -499,11 +500,16 @@ static int adm1275_probe(struct i2c_client *client,
"Device mismatch: Configured %s, detected %s\n",
id->name, mid->name);
config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
if (mid->driver_data == adm1272 || mid->driver_data == adm1278 ||
mid->driver_data == adm1293 || mid->driver_data == adm1294)
config_read_fn = i2c_smbus_read_word_data;
else
config_read_fn = i2c_smbus_read_byte_data;
config = config_read_fn(client, ADM1275_PMON_CONFIG);
if (config < 0)
return config;
device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG);
device_config = config_read_fn(client, ADM1275_DEVICE_CONFIG);
if (device_config < 0)
return device_config;

View File

@ -147,7 +147,7 @@ static enum hwmon_sensor_types scmi_types[] = {
[ENERGY] = hwmon_energy,
};
static u32 hwmon_attributes[] = {
static u32 hwmon_attributes[hwmon_max] = {
[hwmon_chip] = HWMON_C_REGISTER_TZ,
[hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL,
[hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL,

View File

@ -368,7 +368,6 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN);
geni_se_setup_m_cmd(se, I2C_READ, m_param);
if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) {
geni_se_select_mode(se, GENI_SE_FIFO);
@ -376,6 +375,8 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_buf = NULL;
}
geni_se_setup_m_cmd(se, I2C_READ, m_param);
time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
if (!time_left)
geni_i2c_abort_xfer(gi2c);
@ -409,7 +410,6 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN);
geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) {
geni_se_select_mode(se, GENI_SE_FIFO);
@ -417,6 +417,8 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_buf = NULL;
}
geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
if (!dma_buf) /* Get FIFO IRQ */
writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);

View File

@ -865,6 +865,7 @@ static int rcar_unreg_slave(struct i2c_client *slave)
/* disable irqs and ensure none is running before clearing ptr */
rcar_i2c_write(priv, ICSIER, 0);
rcar_i2c_write(priv, ICSCR, 0);
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
synchronize_irq(priv->irq);
priv->slave = NULL;
@ -971,6 +972,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
goto out_pm_put;
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
if (priv->devtype == I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (!IS_ERR(priv->rstc)) {

View File

@ -83,11 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
struct mlx5_srq_table *table = &dev->srq_table;
struct mlx5_core_srq *srq;
xa_lock(&table->array);
xa_lock_irq(&table->array);
srq = xa_load(&table->array, srqn);
if (srq)
refcount_inc(&srq->common.refcount);
xa_unlock(&table->array);
xa_unlock_irq(&table->array);
return srq;
}

View File

@ -951,6 +951,8 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
u8 hover_info = packet[ETP_HOVER_INFO_OFFSET];
bool contact_valid, hover_event;
pm_wakeup_event(&data->client->dev, 0);
hover_event = hover_info & 0x40;
for (i = 0; i < ETP_MAX_FINGERS; i++) {
contact_valid = tp_info & (1U << (3 + i));
@ -974,6 +976,8 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report)
u8 *packet = &report[ETP_REPORT_ID_OFFSET + 1];
int x, y;
pm_wakeup_event(&data->client->dev, 0);
if (!data->tp_input) {
dev_warn_once(&data->client->dev,
"received a trackpoint report while no trackpoint device has been created. Please report upstream.\n");
@ -998,7 +1002,6 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report)
static irqreturn_t elan_isr(int irq, void *dev_id)
{
struct elan_tp_data *data = dev_id;
struct device *dev = &data->client->dev;
int error;
u8 report[ETP_MAX_REPORT_LEN];
@ -1016,8 +1019,6 @@ static irqreturn_t elan_isr(int irq, void *dev_id)
if (error)
goto out;
pm_wakeup_event(dev, 0);
switch (report[ETP_REPORT_ID_OFFSET]) {
case ETP_REPORT_ID:
elan_report_absolute(data, report);
@ -1026,7 +1027,7 @@ static irqreturn_t elan_isr(int irq, void *dev_id)
elan_report_trackpoint(data, report);
break;
default:
dev_err(dev, "invalid report id data (%x)\n",
dev_err(&data->client->dev, "invalid report id data (%x)\n",
report[ETP_REPORT_ID_OFFSET]);
}

View File

@ -179,6 +179,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0093", /* T480 */
"LEN0096", /* X280 */
"LEN0097", /* X280 -> ALPS trackpoint */
"LEN0099", /* X1 Extreme 1st */
"LEN009b", /* T580 */
"LEN200f", /* T450s */
"LEN2044", /* L470 */

View File

@ -4575,9 +4575,10 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
if (!fn)
return -ENOMEM;
iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu);
irq_domain_free_fwnode(fn);
if (!iommu->ir_domain)
if (!iommu->ir_domain) {
irq_domain_free_fwnode(fn);
return -ENOMEM;
}
iommu->ir_domain->parent = arch_get_ir_parent_domain();
iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain,

View File

@ -155,7 +155,10 @@ static int __init hyperv_prepare_irq_remapping(void)
0, IOAPIC_REMAPPING_ENTRY, fn,
&hyperv_ir_domain_ops, NULL);
irq_domain_free_fwnode(fn);
if (!ioapic_ir_domain) {
irq_domain_free_fwnode(fn);
return -ENOMEM;
}
/*
* Hyper-V doesn't provide irq remapping function for

View File

@ -563,8 +563,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
0, INTR_REMAP_TABLE_ENTRIES,
fn, &intel_ir_domain_ops,
iommu);
irq_domain_free_fwnode(fn);
if (!iommu->ir_domain) {
irq_domain_free_fwnode(fn);
pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
goto out_free_bitmap;
}

View File

@ -2298,7 +2298,7 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
return;
spin_lock_irq(&ic->endio_wait.lock);
@ -2359,7 +2359,7 @@ static void integrity_recalc(struct work_struct *w)
next_chunk:
if (unlikely(dm_suspended(ic->ti)))
if (unlikely(dm_post_suspending(ic->ti)))
goto unlock_ret;
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);

View File

@ -143,6 +143,7 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
#define DMF_NOFLUSH_SUSPENDING 5
#define DMF_DEFERRED_REMOVE 6
#define DMF_SUSPENDED_INTERNALLY 7
#define DMF_POST_SUSPENDING 8
#define DM_NUMA_NODE NUMA_NO_NODE
static int dm_numa_node = DM_NUMA_NODE;
@ -1418,9 +1419,6 @@ static int __send_empty_flush(struct clone_info *ci)
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
bio_disassociate_blkg(ci->bio);
return 0;
}
@ -1608,6 +1606,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
ci.bio = &flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
bio_uninit(ci.bio);
/* dec_pending submits any data associated with flush */
} else if (bio_op(bio) == REQ_OP_ZONE_RESET) {
ci.bio = bio;
@ -1682,6 +1681,7 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
ci.bio = &flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
bio_uninit(ci.bio);
/* dec_pending submits any data associated with flush */
} else {
struct dm_target_io *tio;
@ -2543,6 +2543,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
set_bit(DMF_SUSPENDED, &md->flags);
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
}
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
@ -2865,7 +2866,9 @@ retry:
if (r)
goto out_unlock;
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
clear_bit(DMF_POST_SUSPENDING, &md->flags);
out_unlock:
mutex_unlock(&md->suspend_lock);
@ -2962,7 +2965,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
DMF_SUSPENDED_INTERNALLY);
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
clear_bit(DMF_POST_SUSPENDING, &md->flags);
}
static void __dm_internal_resume(struct mapped_device *md)
@ -3123,6 +3128,11 @@ int dm_suspended_md(struct mapped_device *md)
return test_bit(DMF_SUSPENDED, &md->flags);
}
static int dm_post_suspending_md(struct mapped_device *md)
{
return test_bit(DMF_POST_SUSPENDING, &md->flags);
}
int dm_suspended_internally_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
@ -3139,6 +3149,12 @@ int dm_suspended(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_suspended);
int dm_post_suspending(struct dm_target *ti)
{
return dm_post_suspending_md(dm_table_get_md(ti->table));
}
EXPORT_SYMBOL_GPL(dm_post_suspending);
int dm_noflush_suspending(struct dm_target *ti)
{
return __noflush_suspending(dm_table_get_md(ti->table));

View File

@ -68,7 +68,7 @@ static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
if (WARN_ON(clock > host->max_clk))
clock = host->max_clk;
for (div = 1; div < 256; div *= 2) {
for (div = 2; div < 256; div *= 2) {
if ((parent / div) <= clock)
break;
}

View File

@ -4864,15 +4864,19 @@ int bond_create(struct net *net, const char *name)
bond_dev->rtnl_link_ops = &bond_link_ops;
res = register_netdevice(bond_dev);
if (res < 0) {
free_netdev(bond_dev);
rtnl_unlock();
return res;
}
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
rtnl_unlock();
if (res < 0)
free_netdev(bond_dev);
return res;
return 0;
}
static int __net_init bond_net_init(struct net *net)

View File

@ -456,11 +456,10 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
return err;
err = register_netdevice(bond_dev);
netif_carrier_off(bond_dev);
if (!err) {
struct bonding *bond = netdev_priv(bond_dev);
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
}

View File

@ -976,23 +976,6 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
PORT_MIRROR_SNIFFER, false);
}
static void ksz9477_phy_setup(struct ksz_device *dev, int port,
struct phy_device *phy)
{
/* Only apply to port with PHY. */
if (port >= dev->phy_port_cnt)
return;
/* The MAC actually cannot run in 1000 half-duplex mode. */
phy_remove_link_mode(phy,
ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
/* PHY does not support gigabit. */
if (!(dev->features & GBIT_SUPPORT))
phy_remove_link_mode(phy,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
}
static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data)
{
bool gbit;
@ -1605,7 +1588,6 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.get_port_addr = ksz9477_get_port_addr,
.cfg_port_member = ksz9477_cfg_port_member,
.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
.phy_setup = ksz9477_phy_setup,
.port_setup = ksz9477_port_setup,
.r_mib_cnt = ksz9477_r_mib_cnt,
.r_mib_pkt = ksz9477_r_mib_pkt,
@ -1619,7 +1601,29 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
int ksz9477_switch_register(struct ksz_device *dev)
{
return ksz_switch_register(dev, &ksz9477_dev_ops);
int ret, i;
struct phy_device *phydev;
ret = ksz_switch_register(dev, &ksz9477_dev_ops);
if (ret)
return ret;
for (i = 0; i < dev->phy_port_cnt; ++i) {
if (!dsa_is_user_port(dev->ds, i))
continue;
phydev = dsa_to_port(dev->ds, i)->slave->phydev;
/* The MAC actually cannot run in 1000 half-duplex mode. */
phy_remove_link_mode(phydev,
ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
/* PHY does not support gigabit. */
if (!(dev->features & GBIT_SUPPORT))
phy_remove_link_mode(phydev,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
}
return ret;
}
EXPORT_SYMBOL(ksz9477_switch_register);

View File

@ -366,8 +366,6 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
/* setup slave port */
dev->dev_ops->port_setup(dev, port, false);
if (dev->dev_ops->phy_setup)
dev->dev_ops->phy_setup(dev, port, phy);
/* port_stp_state_set() will be called after to enable the port so
* there is no need to do anything.

View File

@ -120,8 +120,6 @@ struct ksz_dev_ops {
u32 (*get_port_addr)(int port, int offset);
void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
void (*phy_setup)(struct ksz_device *dev, int port,
struct phy_device *phy);
void (*port_cleanup)(struct ksz_device *dev, int port);
void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);

View File

@ -553,7 +553,8 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
if (IS_ERR(ag->mdio_reset)) {
netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
return PTR_ERR(ag->mdio_reset);
err = PTR_ERR(ag->mdio_reset);
goto mdio_err_put_clk;
}
mii_bus->name = "ag71xx_mdio";

View File

@ -3423,7 +3423,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
*/
void bnxt_set_ring_params(struct bnxt *bp)
{
u32 ring_size, rx_size, rx_space;
u32 ring_size, rx_size, rx_space, max_rx_cmpl;
u32 agg_factor = 0, agg_ring_size = 0;
/* 8 for CRC and VLAN */
@ -3479,7 +3479,15 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
max_rx_cmpl = bp->rx_ring_size;
/* MAX TPA needs to be added because TPA_START completions are
* immediately recycled, so the TPA completions are not bound by
* the RX ring size.
*/
if (bp->flags & BNXT_FLAG_TPA)
max_rx_cmpl += bp->max_tpa;
/* RX and TPA completions are 32-byte, all others are 16-byte */
ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
bp->cp_ring_size = ring_size;
bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);

View File

@ -1688,8 +1688,11 @@ static int bnxt_set_pauseparam(struct net_device *dev,
if (epause->tx_pause)
link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
if (netif_running(dev))
if (netif_running(dev)) {
mutex_lock(&bp->link_lock);
rc = bnxt_hwrm_set_pause(bp);
mutex_unlock(&bp->link_lock);
}
return rc;
}

View File

@ -885,6 +885,7 @@ static int enetc_pf_probe(struct pci_dev *pdev,
return 0;
err_reg_netdev:
enetc_mdio_remove(pf);
enetc_of_put_phy(priv);
enetc_free_msix(priv);
err_alloc_msix:

View File

@ -77,6 +77,7 @@
((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
enum hns_desc_type {
DESC_TYPE_UNKNOWN,
DESC_TYPE_SKB,
DESC_TYPE_PAGE,
};

View File

@ -1292,6 +1292,10 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
unsigned int i;
for (i = 0; i < ring->desc_num; i++) {
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
memset(desc, 0, sizeof(*desc));
/* check if this is where we started */
if (ring->next_to_use == next_to_use_orig)
break;
@ -1299,6 +1303,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
/* rollback one */
ring_ptr_move_bw(ring, next_to_use);
if (!ring->desc_cb[ring->next_to_use].dma)
continue;
/* unmap the descriptor dma address */
if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
dma_unmap_single(dev,
@ -1313,6 +1320,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
ring->desc_cb[ring->next_to_use].length = 0;
ring->desc_cb[ring->next_to_use].dma = 0;
ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN;
}
}

View File

@ -203,7 +203,7 @@ io_error:
static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
{
u16 v;
u16 v = 0;
__gm_phy_read(hw, port, reg, &v);
return v;
}

View File

@ -592,7 +592,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
if (err)
return err;
goto err_trap_register;
err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
if (err)
@ -604,6 +604,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
err_emad_trap_set:
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
err_trap_register:
destroy_workqueue(mlxsw_core->emad_wq);
return err;
}

View File

@ -102,15 +102,18 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
void *p)
{
struct ionic_lif *lif = netdev_priv(netdev);
unsigned int offset;
unsigned int size;
regs->version = IONIC_DEV_CMD_REG_VERSION;
offset = 0;
size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32);
memcpy_fromio(p, lif->ionic->idev.dev_info_regs->words, size);
memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size);
offset += size;
size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32);
memcpy_fromio(p, lif->ionic->idev.dev_cmd_regs->words, size);
memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size);
}
static int ionic_get_link_ksettings(struct net_device *netdev,

View File

@ -809,8 +809,7 @@ static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
if (f)
return 0;
netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
ctx.comp.rx_filter_add.filter_id);
netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
err = ionic_adminq_post_wait(lif, &ctx);
@ -839,6 +838,9 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
return -ENOENT;
}
netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
addr, f->filter_id);
ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
ionic_rx_filter_free(lif, f);
spin_unlock_bh(&lif->rx_filters.lock);
@ -847,9 +849,6 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
if (err)
return err;
netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
ctx.cmd.rx_filter_del.filter_id);
return 0;
}
@ -1291,13 +1290,11 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
};
int err;
netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
return err;
netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
ctx.comp.rx_filter_add.filter_id);
return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
}
@ -1322,8 +1319,8 @@ static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
return -ENOENT;
}
netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
vid, f->filter_id);
ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
ionic_rx_filter_free(lif, f);

View File

@ -36,10 +36,12 @@ int ionic_rx_filters_init(struct ionic_lif *lif)
spin_lock_init(&lif->rx_filters.lock);
spin_lock_bh(&lif->rx_filters.lock);
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]);
INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]);
}
spin_unlock_bh(&lif->rx_filters.lock);
return 0;
}
@ -51,11 +53,13 @@ void ionic_rx_filters_deinit(struct ionic_lif *lif)
struct hlist_node *tmp;
unsigned int i;
spin_lock_bh(&lif->rx_filters.lock);
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
head = &lif->rx_filters.by_id[i];
hlist_for_each_entry_safe(f, tmp, head, by_id)
ionic_rx_filter_free(lif, f);
}
spin_unlock_bh(&lif->rx_filters.lock);
}
int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
@ -91,6 +95,7 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id);
f->rxq_index = rxq_index;
memcpy(&f->cmd, ac, sizeof(f->cmd));
netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id);
INIT_HLIST_NODE(&f->by_hash);
INIT_HLIST_NODE(&f->by_id);

View File

@ -2073,8 +2073,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
DP_NOTICE(p_hwfn,
"Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE;
}

View File

@ -3092,7 +3092,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
}
/* Log and clear previous pglue_b errors if such exist */
qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true);
/* Enable the PF's internal FID_enable in the PXP */
rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,

View File

@ -256,9 +256,10 @@ out:
#define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
#define PGLUE_ATTENTION_ILT_VALID (1 << 23)
int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
bool hw_init)
{
char msg[256];
u32 tmp;
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
@ -272,22 +273,23 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
details = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_DETAILS);
DP_NOTICE(p_hwfn,
"Illegal write by chip to [%08x:%08x] blocked.\n"
"Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
"Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
addr_hi, addr_lo, details,
(u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
(u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
GET_FIELD(details,
PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
tmp,
GET_FIELD(tmp,
PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
GET_FIELD(tmp,
PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
GET_FIELD(tmp,
PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
snprintf(msg, sizeof(msg),
"Illegal write by chip to [%08x:%08x] blocked.\n"
"Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
"Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]",
addr_hi, addr_lo, details,
(u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
(u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
!!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID),
tmp,
!!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR),
!!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME),
!!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN));
if (hw_init)
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg);
else
DP_NOTICE(p_hwfn, "%s\n", msg);
}
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
@ -320,8 +322,14 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
}
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
if (tmp & PGLUE_ATTENTION_ICPL_VALID)
DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp);
if (tmp & PGLUE_ATTENTION_ICPL_VALID) {
snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp);
if (hw_init)
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg);
else
DP_NOTICE(p_hwfn, "%s\n", msg);
}
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
@ -360,7 +368,7 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn)
{
return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false);
}
#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)

View File

@ -431,7 +431,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
bool hw_init);
#endif

View File

@ -2274,7 +2274,7 @@ static int smc_drv_probe(struct platform_device *pdev)
ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio,
"power", 0, 0, 100);
if (ret)
return ret;
goto out_free_netdev;
/*
* Optional reset GPIO configured? Minimum 100 ns reset needed
@ -2283,7 +2283,7 @@ static int smc_drv_probe(struct platform_device *pdev)
ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio,
"reset", 0, 0, 100);
if (ret)
return ret;
goto out_free_netdev;
/*
* Need to wait for optional EEPROM to load, max 750 us according

View File

@ -1191,7 +1191,7 @@ static int ave_init(struct net_device *ndev)
ret = regmap_update_bits(priv->regmap, SG_ETPINMODE,
priv->pinmode_mask, priv->pinmode_val);
if (ret)
return ret;
goto out_reset_assert;
ave_global_reset(ndev);

View File

@ -1615,11 +1615,11 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct geneve_dev *geneve = netdev_priv(dev);
enum ifla_geneve_df df = geneve->df;
struct geneve_sock *gs4, *gs6;
struct ip_tunnel_info info;
bool metadata;
bool use_udp6_rx_checksums;
enum ifla_geneve_df df;
bool ttl_inherit;
int err;

View File

@ -1242,7 +1242,7 @@ static int rr_open(struct net_device *dev)
rrpriv->info = NULL;
}
if (rrpriv->rx_ctrl) {
pci_free_consistent(pdev, sizeof(struct ring_ctrl),
pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
rrpriv->rx_ctrl = NULL;
}

View File

@ -1262,7 +1262,7 @@ static int adf7242_probe(struct spi_device *spi)
WQ_MEM_RECLAIM);
if (unlikely(!lp->wqueue)) {
ret = -ENOMEM;
goto err_hw_init;
goto err_alloc_wq;
}
ret = adf7242_hw_init(lp);
@ -1294,6 +1294,8 @@ static int adf7242_probe(struct spi_device *spi)
return ret;
err_hw_init:
destroy_workqueue(lp->wqueue);
err_alloc_wq:
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);

View File

@ -301,7 +301,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
rtnl_lock();
err = nsim_bpf_init(ns);
if (err)
goto err_free_netdev;
goto err_rtnl_unlock;
nsim_ipsec_init(ns);
@ -315,8 +315,8 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
err_ipsec_teardown:
nsim_ipsec_teardown(ns);
nsim_bpf_uninit(ns);
err_rtnl_unlock:
rtnl_unlock();
err_free_netdev:
free_netdev(dev);
return ERR_PTR(err);
}

View File

@ -1348,6 +1348,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L4;
dp83640->version = PTP_CLASS_V1;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
@ -1355,6 +1356,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L4;
dp83640->version = PTP_CLASS_V2;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
@ -1362,6 +1364,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L2;
dp83640->version = PTP_CLASS_V2;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
@ -1369,6 +1372,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
dp83640->version = PTP_CLASS_V2;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;

View File

@ -198,6 +198,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
if (ret < ETH_ALEN) {
netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
ret = -EIO;
goto free;
}
memcpy(dev->net->dev_addr, buf, ETH_ALEN);

View File

@ -303,7 +303,6 @@ static void lapbeth_setup(struct net_device *dev)
dev->netdev_ops = &lapbeth_netdev_ops;
dev->needs_free_netdev = true;
dev->type = ARPHRD_X25;
dev->hard_header_len = 3;
dev->mtu = 1000;
dev->addr_len = 0;
}
@ -324,6 +323,14 @@ static int lapbeth_new_device(struct net_device *dev)
if (!ndev)
goto out;
/* When transmitting data:
* first this driver removes a pseudo header of 1 byte,
* then the lapb module prepends an LAPB header of at most 3 bytes,
* then this driver prepends a length field of 2 bytes,
* then the underlying Ethernet device prepends its own header.
*/
ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len;
lapbeth = netdev_priv(ndev);
lapbeth->axdev = ndev;

View File

@ -643,9 +643,9 @@ err:
static void ath9k_hif_usb_rx_cb(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct hif_device_usb *hif_dev =
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
struct hif_device_usb *hif_dev = rx_buf->hif_dev;
struct sk_buff *skb = rx_buf->skb;
int ret;
if (!skb)
@ -685,14 +685,15 @@ resubmit:
return;
free:
kfree_skb(skb);
kfree(rx_buf);
}
static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
struct hif_device_usb *hif_dev = rx_buf->hif_dev;
struct sk_buff *skb = rx_buf->skb;
struct sk_buff *nskb;
struct hif_device_usb *hif_dev =
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
int ret;
if (!skb)
@ -732,11 +733,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
return;
}
rx_buf->skb = nskb;
usb_fill_int_urb(urb, hif_dev->udev,
usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
nskb->data, MAX_REG_IN_BUF_SIZE,
ath9k_hif_usb_reg_in_cb, nskb, 1);
ath9k_hif_usb_reg_in_cb, rx_buf, 1);
}
resubmit:
@ -750,6 +753,7 @@ resubmit:
return;
free:
kfree_skb(skb);
kfree(rx_buf);
urb->context = NULL;
}
@ -795,7 +799,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
init_usb_anchor(&hif_dev->mgmt_submitted);
for (i = 0; i < MAX_TX_URB_NUM; i++) {
tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL);
if (!tx_buf)
goto err;
@ -832,8 +836,9 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
{
struct urb *urb = NULL;
struct rx_buf *rx_buf = NULL;
struct sk_buff *skb = NULL;
struct urb *urb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->rx_submitted);
@ -841,6 +846,12 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
for (i = 0; i < MAX_RX_URB_NUM; i++) {
rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
if (!rx_buf) {
ret = -ENOMEM;
goto err_rxb;
}
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
@ -855,11 +866,14 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
rx_buf->hif_dev = hif_dev;
rx_buf->skb = skb;
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_WLAN_RX_PIPE),
skb->data, MAX_RX_BUF_SIZE,
ath9k_hif_usb_rx_cb, skb);
ath9k_hif_usb_rx_cb, rx_buf);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->rx_submitted);
@ -885,6 +899,8 @@ err_submit:
err_skb:
usb_free_urb(urb);
err_urb:
kfree(rx_buf);
err_rxb:
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
return ret;
}
@ -896,14 +912,21 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
{
struct urb *urb = NULL;
struct rx_buf *rx_buf = NULL;
struct sk_buff *skb = NULL;
struct urb *urb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->reg_in_submitted);
for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
if (!rx_buf) {
ret = -ENOMEM;
goto err_rxb;
}
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
@ -918,11 +941,14 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
rx_buf->hif_dev = hif_dev;
rx_buf->skb = skb;
usb_fill_int_urb(urb, hif_dev->udev,
usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
skb->data, MAX_REG_IN_BUF_SIZE,
ath9k_hif_usb_reg_in_cb, skb, 1);
ath9k_hif_usb_reg_in_cb, rx_buf, 1);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
@ -948,6 +974,8 @@ err_submit:
err_skb:
usb_free_urb(urb);
err_urb:
kfree(rx_buf);
err_rxb:
ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
return ret;
}

View File

@ -86,6 +86,11 @@ struct tx_buf {
struct list_head list;
};
struct rx_buf {
struct sk_buff *skb;
struct hif_device_usb *hif_dev;
};
#define HIF_USB_TX_STOP BIT(0)
#define HIF_USB_TX_FLUSH BIT(1)

View File

@ -1184,17 +1184,15 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
iwl_mvm_change_queue_tid(mvm, i);
rcu_read_unlock();
if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
alloc_for_sta);
if (ret) {
rcu_read_unlock();
if (ret)
return ret;
}
}
rcu_read_unlock();
return free_queue;
}

View File

@ -680,9 +680,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
x86_vector_domain);
irq_domain_free_fwnode(fn);
if (!vmd->irq_domain)
if (!vmd->irq_domain) {
irq_domain_free_fwnode(fn);
return -ENODEV;
}
pci_add_resource(&resources, &vmd->resources[0]);
pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);

View File

@ -4611,8 +4611,7 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
* pcie_wait_for_link_delay - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
* @delay: Delay to wait after link has become active (in ms). Specify %0
* for no delay.
* @delay: Delay to wait after link has become active (in ms)
*
* Use this to wait till link becomes active or inactive.
*/
@ -4653,7 +4652,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
msleep(10);
timeout -= 10;
}
if (active && ret && delay)
if (active && ret)
msleep(delay);
else if (ret != active)
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
@ -4774,28 +4773,17 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
if (!pcie_downstream_port(dev))
return;
/*
* Per PCIe r5.0, sec 6.6.1, for downstream ports that support
* speeds > 5 GT/s, we must wait for link training to complete
* before the mandatory delay.
*
* We can only tell when link training completes via DLL Link
* Active, which is required for downstream ports that support
* speeds > 5 GT/s (sec 7.5.3.6). Unfortunately some common
* devices do not implement Link Active reporting even when it's
* required, so we'll check for that directly instead of checking
* the supported link speed. We assume devices without Link Active
* reporting can train in 100 ms regardless of speed.
*/
if (dev->link_active_reporting) {
pci_dbg(dev, "waiting for link to train\n");
if (!pcie_wait_for_link_delay(dev, true, 0)) {
if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
msleep(delay);
} else {
pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
delay);
if (!pcie_wait_for_link_delay(dev, true, delay)) {
/* Did not train, no need to wait any further */
return;
}
}
pci_dbg(child, "waiting %d ms to become accessible\n", delay);
msleep(delay);
if (!pci_device_is_present(child)) {
pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);

View File

@ -1720,6 +1720,7 @@ static struct platform_driver cci_pmu_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = arm_cci_pmu_matches,
.suppress_bind_attrs = true,
},
.probe = cci_pmu_probe,
.remove = cci_pmu_remove,

View File

@ -1545,6 +1545,7 @@ static struct platform_driver arm_ccn_driver = {
.driver = {
.name = "arm-ccn",
.of_match_table = arm_ccn_match,
.suppress_bind_attrs = true,
},
.probe = arm_ccn_probe,
.remove = arm_ccn_remove,

View File

@ -759,6 +759,7 @@ static struct platform_driver dsu_pmu_driver = {
.driver = {
.name = DRVNAME,
.of_match_table = of_match_ptr(dsu_pmu_of_match),
.suppress_bind_attrs = true,
},
.probe = dsu_pmu_device_probe,
.remove = dsu_pmu_device_remove,

View File

@ -742,6 +742,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, smmu_pmu);
smmu_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = smmu_pmu_enable,
.pmu_disable = smmu_pmu_disable,
@ -860,6 +861,7 @@ static void smmu_pmu_shutdown(struct platform_device *pdev)
static struct platform_driver smmu_pmu_driver = {
.driver = {
.name = "arm-smmu-v3-pmcg",
.suppress_bind_attrs = true,
},
.probe = smmu_pmu_probe,
.remove = smmu_pmu_remove,

View File

@ -1228,6 +1228,7 @@ static struct platform_driver arm_spe_pmu_driver = {
.driver = {
.name = DRVNAME,
.of_match_table = of_match_ptr(arm_spe_pmu_of_match),
.suppress_bind_attrs = true,
},
.probe = arm_spe_pmu_device_probe,
.remove = arm_spe_pmu_device_remove,

View File

@ -451,6 +451,7 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
{
*pmu = (struct ddr_pmu) {
.pmu = (struct pmu) {
.module = THIS_MODULE,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.attr_groups = attr_groups,
@ -645,6 +646,7 @@ static struct platform_driver imx_ddr_pmu_driver = {
.driver = {
.name = "imx-ddr-pmu",
.of_match_table = imx_ddr_pmu_dt_ids,
.suppress_bind_attrs = true,
},
.probe = ddr_perf_probe,
.remove = ddr_perf_remove,

View File

@ -381,6 +381,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
ddrc_pmu->sccl_id, ddrc_pmu->index_id);
ddrc_pmu->pmu = (struct pmu) {
.name = name,
.module = THIS_MODULE,
.task_ctx_nr = perf_invalid_context,
.event_init = hisi_uncore_pmu_event_init,
.pmu_enable = hisi_uncore_pmu_enable,
@ -419,6 +420,7 @@ static struct platform_driver hisi_ddrc_pmu_driver = {
.driver = {
.name = "hisi_ddrc_pmu",
.acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = hisi_ddrc_pmu_probe,
.remove = hisi_ddrc_pmu_remove,

View File

@ -392,6 +392,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
hha_pmu->sccl_id, hha_pmu->index_id);
hha_pmu->pmu = (struct pmu) {
.name = name,
.module = THIS_MODULE,
.task_ctx_nr = perf_invalid_context,
.event_init = hisi_uncore_pmu_event_init,
.pmu_enable = hisi_uncore_pmu_enable,
@ -430,6 +431,7 @@ static struct platform_driver hisi_hha_pmu_driver = {
.driver = {
.name = "hisi_hha_pmu",
.acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = hisi_hha_pmu_probe,
.remove = hisi_hha_pmu_remove,

View File

@ -382,6 +382,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
l3c_pmu->sccl_id, l3c_pmu->index_id);
l3c_pmu->pmu = (struct pmu) {
.name = name,
.module = THIS_MODULE,
.task_ctx_nr = perf_invalid_context,
.event_init = hisi_uncore_pmu_event_init,
.pmu_enable = hisi_uncore_pmu_enable,
@ -420,6 +421,7 @@ static struct platform_driver hisi_l3c_pmu_driver = {
.driver = {
.name = "hisi_l3c_pmu",
.acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = hisi_l3c_pmu_probe,
.remove = hisi_l3c_pmu_remove,

View File

@ -1028,6 +1028,7 @@ static struct platform_driver l2_cache_pmu_driver = {
.driver = {
.name = "qcom-l2cache-pmu",
.acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = l2_cache_pmu_probe,
.remove = l2_cache_pmu_remove,

View File

@ -814,6 +814,7 @@ static struct platform_driver qcom_l3_cache_pmu_driver = {
.driver = {
.name = "qcom-l3cache-pmu",
.acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = qcom_l3_cache_pmu_probe,
};

View File

@ -816,6 +816,7 @@ static struct platform_driver tx2_uncore_driver = {
.driver = {
.name = "tx2-uncore-pmu",
.acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match),
.suppress_bind_attrs = true,
},
.probe = tx2_uncore_probe,
.remove = tx2_uncore_remove,

Some files were not shown because too many files have changed in this diff Show More