This is the 5.4.231 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmPgo/YACgkQONu9yGCS
 aT4o0RAAlt2uWRXaiDW2cYi1dKAuqk8Iyf0tlonzDkSESy6Qy28rw62BIbBRHFNv
 ObPjlz4FgI9ZfSVBsolFjBACTXvzS/fPvvqBEVmWqLA0+cN0/RRsJ8AJYV+wxV4U
 j0h+asxtkaWxhPmsnr0FtVG6KnqMCZkYCJYzkEwMmGZqmhkvqZVtGO5Hxwa+pTuD
 A+EpvsRCeqK42GqM1nn14er7Cej2bX6eM+MX1vhA/rNGgf4OrHSs5CQaLWFioFUO
 VN1I2/aiC+iqpF8poPC4evDgko291s+QYvtIRqcfCGjJqpfwGDWA8xReZPXKD4+4
 JeY0WXHxtbjg1B+FQKZR4ESYlZfBLejI94CN32VJ3uI6CV+VgIyJMBXQ1Vs09OeN
 IEighGiXTHezS5NvHQTL/Y3CSooWuCxIQMmJelSW6Kr7tLpZ4/GMr4V2RU0XO9tF
 l3SRR/Q+w8IRtPsNNbmTB9wWJxcuyTHavrl6mG2DUy86UbJhoxjyYj7XUpiVyzbc
 /UmbHLXdeg9QCayhiHtCvPfcJF8EWoqoYfKSTJrj3B2ysQo7aPVK3D2/cYGRQ80A
 EssOD3IzC+QiBb30TzGJzJ5xaIMcaDZb61Hs7afYkhYUjQyqoQEh6ZxS8x0SCHFE
 8YsVkwNm47Iw9ySPhfIIZiTfxMcK8n2zN85rAlfonlWasblr9Ok=
 =uM6z
 -----END PGP SIGNATURE-----

Merge 5.4.231 into android11-5.4-lts

Changes in 5.4.231
	clk: generalize devm_clk_get() a bit
	clk: Provide new devm_clk helpers for prepared and enabled clocks
	memory: atmel-sdramc: Fix missing clk_disable_unprepare in atmel_ramc_probe()
	memory: mvebu-devbus: Fix missing clk_disable_unprepare in mvebu_devbus_probe()
	ARM: dts: imx6qdl-gw560x: Remove incorrect 'uart-has-rtscts'
	ARM: imx27: Retrieve the SYSCTRL base address from devicetree
	ARM: imx31: Retrieve the IIM base address from devicetree
	ARM: imx35: Retrieve the IIM base address from devicetree
	ARM: imx: add missing of_node_put()
	HID: intel_ish-hid: Add check for ishtp_dma_tx_map
	EDAC/highbank: Fix memory leak in highbank_mc_probe()
	tomoyo: fix broken dependency on *.conf.default
	RDMA/core: Fix ib block iterator counter overflow
	IB/hfi1: Reject a zero-length user expected buffer
	IB/hfi1: Reserve user expected TIDs
	IB/hfi1: Fix expected receive setup error exit issues
	affs: initialize fsdata in affs_truncate()
	amd-xgbe: TX Flow Ctrl Registers are h/w ver dependent
	amd-xgbe: Delay AN timeout during KR training
	bpf: Fix pointer-leak due to insufficient speculative store bypass mitigation
	phy: rockchip-inno-usb2: Fix missing clk_disable_unprepare() in rockchip_usb2phy_power_on()
	net: nfc: Fix use-after-free in local_cleanup()
	net: wan: Add checks for NULL for utdm in undo_uhdlc_init and unmap_si_regs
	gpio: mxc: Always set GPIOs used as interrupt source to INPUT mode
	wifi: rndis_wlan: Prevent buffer overflow in rndis_query_oid
	net/sched: sch_taprio: fix possible use-after-free
	net: fix a concurrency bug in l2tp_tunnel_register()
	l2tp: Serialize access to sk_user_data with sk_callback_lock
	l2tp: Don't sleep and disable BH under writer-side sk_callback_lock
	net: usb: sr9700: Handle negative len
	net: mdio: validate parameter addr in mdiobus_get_phy()
	HID: check empty report_list in hid_validate_values()
	HID: check empty report_list in bigben_probe()
	net: stmmac: fix invalid call to mdiobus_get_phy()
	HID: revert CHERRY_MOUSE_000C quirk
	usb: gadget: f_fs: Prevent race during ffs_ep0_queue_wait
	usb: gadget: f_fs: Ensure ep0req is dequeued before free_request
	net: mlx5: eliminate anonymous module_init & module_exit
	drm/panfrost: fix GENERIC_ATOMIC64 dependency
	dmaengine: Fix double increment of client_count in dma_chan_get()
	net: macb: fix PTP TX timestamp failure due to packet padding
	HID: betop: check shape of output reports
	dmaengine: xilinx_dma: use devm_platform_ioremap_resource()
	dmaengine: xilinx_dma: Fix devm_platform_ioremap_resource error handling
	dmaengine: xilinx_dma: call of_node_put() when breaking out of for_each_child_of_node()
	tcp: avoid the lookup process failing to get sk in ehash table
	w1: fix deadloop in __w1_remove_master_device()
	w1: fix WARNING after calling w1_process()
	driver core: Fix test_async_probe_init saves device in wrong array
	net: dsa: microchip: ksz9477: port map correction in ALU table entry register
	tcp: fix rate_app_limited to default to 1
	cpufreq: Add Tegra234 to cpufreq-dt-platdev blocklist
	ASoC: fsl_micfil: Correct the number of steps on SX controls
	drm: Add orientation quirk for Lenovo ideapad D330-10IGL
	s390/debug: add _ASM_S390_ prefix to header guard
	cpufreq: armada-37xx: stop using 0 as NULL pointer
	ASoC: fsl_ssi: Rename AC'97 streams to avoid collisions with AC'97 CODEC
	ASoC: fsl-asoc-card: Fix naming of AC'97 CODEC widgets
	spi: spidev: remove debug messages that access spidev->spi without locking
	KVM: s390: interrupt: use READ_ONCE() before cmpxchg()
	scsi: hisi_sas: Set a port invalid only if there are no devices attached when refreshing port id
	platform/x86: touchscreen_dmi: Add info for the CSL Panther Tab HD
	platform/x86: asus-nb-wmi: Add alternate mapping for KEY_SCREENLOCK
	lockref: stop doing cpu_relax in the cmpxchg loop
	mmc: sdhci-esdhc-imx: clear pending interrupt and halt cqhci
	mmc: sdhci-esdhc-imx: disable the CMD CRC check for standard tuning
	mmc: sdhci-esdhc-imx: correct the tuning start tap and step setting
	Revert "selftests/bpf: check null propagation only neither reg is PTR_TO_BTF_ID"
	netfilter: conntrack: do not renew entry stuck in tcp SYN_SENT state
	fs: reiserfs: remove useless new_opts in reiserfs_remount
	Revert "Revert "xhci: Set HCD flag to defer primary roothub registration""
	Bluetooth: hci_sync: cancel cmd_timer if hci_open failed
	scsi: hpsa: Fix allocation size for scsi_host_alloc()
	module: Don't wait for GOING modules
	tracing: Make sure trace_printk() can output as soon as it can be used
	trace_events_hist: add check for return value of 'create_hist_field'
	ftrace/scripts: Update the instructions for ftrace-bisect.sh
	cifs: Fix oops due to uncleared server->smbd_conn in reconnect
	KVM: x86/vmx: Do not skip segment attributes if unusable bit is set
	thermal: intel: int340x: Protect trip temperature from concurrent updates
	ARM: 9280/1: mm: fix warning on phys_addr_t to void pointer assignment
	EDAC/device: Respect any driver-supplied workqueue polling value
	EDAC/qcom: Do not pass llcc_driv_data as edac_device_ctl_info's pvt_info
	netlink: prevent potential spectre v1 gadgets
	net: fix UaF in netns ops registration error path
	netfilter: nft_set_rbtree: skip elements in transaction from garbage collection
	netlink: annotate data races around nlk->portid
	netlink: annotate data races around dst_portid and dst_group
	netlink: annotate data races around sk_state
	ipv4: prevent potential spectre v1 gadget in ip_metrics_convert()
	ipv4: prevent potential spectre v1 gadget in fib_metrics_match()
	netfilter: conntrack: fix vtag checks for ABORT/SHUTDOWN_COMPLETE
	netrom: Fix use-after-free of a listening socket.
	net/sched: sch_taprio: do not schedule in taprio_reset()
	sctp: fail if no bound addresses can be used for a given scope
	net: ravb: Fix possible hang if RIS2_QFF1 happen
	thermal: intel: int340x: Add locking to int340x_thermal_get_trip_type()
	net/tg3: resolve deadlock in tg3_reset_task() during EEH
	net/phy/mdio-i2c: Move header file to include/linux/mdio
	net: xgene: Move shared header file into include/linux
	net: mdio-mux-meson-g12a: force internal PHY off on mux switch
	Revert "Input: synaptics - switch touchpad on HP Laptop 15-da3001TU to RMI mode"
	nfsd: Ensure knfsd shuts down when the "nfsd" pseudofs is unmounted
	block: fix and cleanup bio_check_ro
	x86/i8259: Mark legacy PIC interrupts with IRQ_LEVEL
	netfilter: conntrack: unify established states for SCTP paths
	perf/x86/amd: fix potential integer overflow on shift of a int
	clk: Fix pointer casting to prevent oops in devm_clk_release()
	x86/asm: Fix an assembler warning with current binutils
	ARM: dts: imx: Fix pca9547 i2c-mux node name
	bpf: Skip task with pid=1 in send_signal_common()
	blk-cgroup: fix missing pd_online_fn() while activating policy
	dmaengine: imx-sdma: Fix a possible memory leak in sdma_transfer_init
	sysctl: add a new register_sysctl_init() interface
	panic: unset panic_on_warn inside panic()
	mm: kasan: do not panic if both panic_on_warn and kasan_multishot set
	exit: Add and use make_task_dead.
	objtool: Add a missing comma to avoid string concatenation
	hexagon: Fix function name in die()
	h8300: Fix build errors from do_exit() to make_task_dead() transition
	csky: Fix function name in csky_alignment() and die()
	ia64: make IA64_MCA_RECOVERY bool instead of tristate
	exit: Put an upper limit on how often we can oops
	exit: Expose "oops_count" to sysfs
	exit: Allow oops_limit to be disabled
	panic: Consolidate open-coded panic_on_warn checks
	panic: Introduce warn_limit
	panic: Expose "warn_count" to sysfs
	docs: Fix path paste-o for /sys/kernel/warn_count
	exit: Use READ_ONCE() for all oops/warn limit reads
	ipv6: ensure sane device mtu in tunnels
	Bluetooth: fix null ptr deref on hci_sync_conn_complete_evt
	usb: host: xhci-plat: add wakeup entry at sysfs
	Revert "xprtrdma: Fix regbuf data not freed in rpcrdma_req_create()"
	Linux 5.4.231

Change-Id: I0f670158dd88a589d5f56246d094d3392a1784f9
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-02-06 10:47:39 +00:00
commit e367c7b762
176 changed files with 1187 additions and 536 deletions

View File

@ -0,0 +1,6 @@
What: /sys/kernel/oops_count
Date: November 2022
KernelVersion: 6.2.0
Contact: Linux Kernel Hardening List <linux-hardening@vger.kernel.org>
Description:
Shows how many times the system has Oopsed since last boot.

View File

@ -0,0 +1,6 @@
What: /sys/kernel/warn_count
Date: November 2022
KernelVersion: 6.2.0
Contact: Linux Kernel Hardening List <linux-hardening@vger.kernel.org>
Description:
Shows how many times the system has Warned since last boot.

View File

@ -557,6 +557,15 @@ numa_balancing_scan_size_mb is how many megabytes worth of pages are
scanned for a given scan.
oops_limit
==========
Number of kernel oopses after which the kernel should panic when
``panic_on_oops`` is not set. Setting this to 0 disables checking
the count. Setting this to 1 has the same effect as setting
``panic_on_oops=1``. The default value is 10000.
osrelease, ostype & version:
============================
@ -1177,6 +1186,16 @@ entry will default to 2 instead of 0.
2 Unprivileged calls to ``bpf()`` are disabled
= =============================================================
warn_limit
==========
Number of kernel warnings after which the kernel should panic when
``panic_on_warn`` is not set. Setting this to 0 disables checking
the warning count. Setting this to 1 has the same effect as setting
``panic_on_warn=1``. The default value is 0.
watchdog:
=========

View File

@ -14673,6 +14673,7 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/phy/phylink.c
F: drivers/net/phy/sfp*
F: include/linux/mdio/mdio-i2c.h
F: include/linux/phylink.h
F: include/linux/sfp.h
K: phylink

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 230
SUBLEVEL = 231
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -192,7 +192,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
local_irq_enable();
while (1);
}
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
#ifndef CONFIG_MATHEMU
@ -577,7 +577,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
pc, va, opcode, reg);
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
got_exception:
/* Ok, we caught the exception, but we don't want it. Is there
@ -632,7 +632,7 @@ got_exception:
local_irq_enable();
while (1);
}
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
/*

View File

@ -206,7 +206,7 @@ retry:
printk(KERN_ALERT "Unable to handle kernel paging request at "
"virtual address %016lx\n", address);
die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
do_exit(SIGKILL);
make_task_dead(SIGKILL);
/* We ran out of memory, or some other thing happened to us that
made us unable to handle the page fault gracefully. */

View File

@ -461,7 +461,7 @@
scl-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
status = "okay";
i2c-switch@70 {
i2c-mux@70 {
compatible = "nxp,pca9547";
#address-cells = <1>;
#size-cells = <0>;

View File

@ -464,7 +464,6 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
uart-has-rtscts;
rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
status = "okay";
};

View File

@ -341,7 +341,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
if (panic_on_oops)
panic("Fatal exception");
if (signr)
do_exit(signr);
make_task_dead(signr);
}
/*

View File

@ -23,6 +23,7 @@ static int mx25_read_cpu_rev(void)
np = of_find_compatible_node(NULL, NULL, "fsl,imx25-iim");
iim_base = of_iomap(np, 0);
of_node_put(np);
BUG_ON(!iim_base);
rev = readl(iim_base + MXC_IIMSREV);
iounmap(iim_base);

View File

@ -9,6 +9,7 @@
*/
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/module.h>
#include "hardware.h"
@ -17,16 +18,24 @@ static int mx27_cpu_rev = -1;
static int mx27_cpu_partnumber;
#define SYS_CHIP_ID 0x00 /* The offset of CHIP ID register */
#define SYSCTRL_OFFSET 0x800 /* Offset from CCM base address */
static int mx27_read_cpu_rev(void)
{
void __iomem *ccm_base;
struct device_node *np;
u32 val;
np = of_find_compatible_node(NULL, NULL, "fsl,imx27-ccm");
ccm_base = of_iomap(np, 0);
of_node_put(np);
BUG_ON(!ccm_base);
/*
* now we have access to the IO registers. As we need
* the silicon revision very early we read it here to
* avoid any further hooks
*/
val = imx_readl(MX27_IO_ADDRESS(MX27_SYSCTRL_BASE_ADDR + SYS_CHIP_ID));
val = imx_readl(ccm_base + SYSCTRL_OFFSET + SYS_CHIP_ID);
mx27_cpu_partnumber = (int)((val >> 12) & 0xFFFF);

View File

@ -6,6 +6,7 @@
*/
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/io.h>
#include "common.h"
@ -32,10 +33,17 @@ static struct {
static int mx31_read_cpu_rev(void)
{
void __iomem *iim_base;
struct device_node *np;
u32 i, srev;
np = of_find_compatible_node(NULL, NULL, "fsl,imx31-iim");
iim_base = of_iomap(np, 0);
of_node_put(np);
BUG_ON(!iim_base);
/* read SREV register from IIM module */
srev = imx_readl(MX31_IO_ADDRESS(MX31_IIM_BASE_ADDR + MXC_IIMSREV));
srev = imx_readl(iim_base + MXC_IIMSREV);
srev &= 0xff;
for (i = 0; i < ARRAY_SIZE(mx31_cpu_type); i++)

View File

@ -5,6 +5,7 @@
* Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
*/
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/io.h>
#include "hardware.h"
@ -14,9 +15,16 @@ static int mx35_cpu_rev = -1;
static int mx35_read_cpu_rev(void)
{
void __iomem *iim_base;
struct device_node *np;
u32 rev;
rev = imx_readl(MX35_IO_ADDRESS(MX35_IIM_BASE_ADDR + MXC_IIMSREV));
np = of_find_compatible_node(NULL, NULL, "fsl,imx35-iim");
iim_base = of_iomap(np, 0);
of_node_put(np);
BUG_ON(!iim_base);
rev = imx_readl(iim_base + MXC_IIMSREV);
switch (rev) {
case 0x00:
return IMX_CHIP_REVISION_1_0;

View File

@ -28,6 +28,7 @@ static u32 imx5_read_srev_reg(const char *compat)
np = of_find_compatible_node(NULL, NULL, compat);
iim_base = of_iomap(np, 0);
of_node_put(np);
WARN_ON(!iim_base);
srev = readl(iim_base + IIM_SREV) & 0xff;

View File

@ -124,7 +124,7 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
show_pte(KERN_ALERT, mm, addr);
die("Oops", regs, fsr);
bust_spinlocks(0);
do_exit(SIGKILL);
make_task_dead(SIGKILL);
}
/*

View File

@ -161,7 +161,7 @@ void __init paging_init(const struct machine_desc *mdesc)
mpu_setup();
/* allocate the zero page. */
zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!zero_page)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);

View File

@ -202,7 +202,7 @@ void die(const char *str, struct pt_regs *regs, int err)
raw_spin_unlock_irqrestore(&die_lock, flags);
if (ret != NOTIFY_STOP)
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
static void arm64_show_signal(int signo, const char *str)

View File

@ -296,7 +296,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
show_pte(addr);
die("Oops", regs, esr);
bust_spinlocks(0);
do_exit(SIGKILL);
make_task_dead(SIGKILL);
}
static void __do_kernel_fault(unsigned long addr, unsigned int esr,

View File

@ -294,7 +294,7 @@ bad_area:
__func__, opcode, rz, rx, imm, addr);
show_regs(regs);
bust_spinlocks(0);
do_exit(SIGKILL);
make_task_dead(SIGKILL);
}
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);

View File

@ -85,7 +85,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, int nr)
pr_err("%s: %08x\n", str, nr);
show_regs(regs);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
void buserr(struct pt_regs *regs)

View File

@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/mm_types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@ -110,7 +111,7 @@ void die(const char *str, struct pt_regs *fp, unsigned long err)
dump(fp);
spin_unlock_irq(&die_lock);
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
static int kstack_depth_to_print = 24;

View File

@ -52,7 +52,7 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
printk(" at virtual address %08lx\n", address);
if (!user_mode(regs))
die("Oops", regs, error_code);
do_exit(SIGKILL);
make_task_dead(SIGKILL);
return 1;
}

View File

@ -221,7 +221,7 @@ int die(const char *str, struct pt_regs *regs, long err)
panic("Fatal exception");
oops_exit();
do_exit(err);
make_task_dead(err);
return 0;
}

View File

@ -360,7 +360,7 @@ config ARCH_PROC_KCORE_TEXT
depends on PROC_KCORE
config IA64_MCA_RECOVERY
tristate "MCA recovery from errors other than TLB."
bool "MCA recovery from errors other than TLB."
config PERFMON
bool "Performance monitor support"

View File

@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kallsyms.h>
@ -176,7 +177,7 @@ mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
spin_unlock(&mca_bh_lock);
/* This process is about to be killed itself */
do_exit(SIGKILL);
make_task_dead(SIGKILL);
}
/**

View File

@ -85,7 +85,7 @@ die (const char *str, struct pt_regs *regs, long err)
if (panic_on_oops)
panic("Fatal exception");
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
return 0;
}

View File

@ -272,7 +272,7 @@ retry:
regs = NULL;
bust_spinlocks(0);
if (regs)
do_exit(SIGKILL);
make_task_dead(SIGKILL);
return;
out_of_memory:

View File

@ -1139,7 +1139,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr)
pr_crit("%s: %08x\n", str, nr);
show_registers(fp);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
asmlinkage void set_esp0(unsigned long ssp)

View File

@ -48,7 +48,7 @@ int send_fault_sig(struct pt_regs *regs)
pr_alert("Unable to handle kernel access");
pr_cont(" at virtual address %p\n", addr);
die_if_kernel("Oops", regs, 0 /*error_code*/);
do_exit(SIGKILL);
make_task_dead(SIGKILL);
}
return 1;

View File

@ -44,10 +44,10 @@ void die(const char *str, struct pt_regs *fp, long err)
pr_warn("Oops: %s, sig: %ld\n", str, err);
show_regs(fp);
spin_unlock_irq(&die_lock);
/* do_exit() should take care of panic'ing from an interrupt
/* make_task_dead() should take care of panic'ing from an interrupt
* context so we don't handle it here
*/
do_exit(err);
make_task_dead(err);
}
/* for user application debugging */

View File

@ -415,7 +415,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
if (regs && kexec_should_crash(current))
crash_kexec(regs);
do_exit(sig);
make_task_dead(sig);
}
extern struct exception_table_entry __start___dbe_table[];

View File

@ -223,7 +223,7 @@ inline void handle_fpu_exception(struct pt_regs *regs)
}
} else if (fpcsr & FPCSR_mskRIT) {
if (!user_mode(regs))
do_exit(SIGILL);
make_task_dead(SIGILL);
si_signo = SIGILL;
}

View File

@ -184,7 +184,7 @@ void die(const char *str, struct pt_regs *regs, int err)
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
EXPORT_SYMBOL(die);
@ -288,7 +288,7 @@ void unhandled_interruption(struct pt_regs *regs)
pr_emerg("unhandled_interruption\n");
show_regs(regs);
if (!user_mode(regs))
do_exit(SIGKILL);
make_task_dead(SIGKILL);
force_sig(SIGKILL);
}
@ -299,7 +299,7 @@ void unhandled_exceptions(unsigned long entry, unsigned long addr,
addr, type);
show_regs(regs);
if (!user_mode(regs))
do_exit(SIGKILL);
make_task_dead(SIGKILL);
force_sig(SIGKILL);
}
@ -326,7 +326,7 @@ void do_revinsn(struct pt_regs *regs)
pr_emerg("Reserved Instruction\n");
show_regs(regs);
if (!user_mode(regs))
do_exit(SIGILL);
make_task_dead(SIGILL);
force_sig(SIGILL);
}

View File

@ -37,10 +37,10 @@ void die(const char *str, struct pt_regs *regs, long err)
show_regs(regs);
spin_unlock_irq(&die_lock);
/*
* do_exit() should take care of panic'ing from an interrupt
* make_task_dead() should take care of panic'ing from an interrupt
* context so we don't handle it here
*/
do_exit(err);
make_task_dead(err);
}
void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr)

View File

@ -218,7 +218,7 @@ void die(const char *str, struct pt_regs *regs, long err)
__asm__ __volatile__("l.nop 1");
do {} while (1);
#endif
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
/* This is normally the 'Oops' routine */

View File

@ -268,7 +268,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
panic("Fatal exception");
oops_exit();
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
/* gdb uses break 4,8 */

View File

@ -246,7 +246,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
if (panic_on_oops)
panic("Fatal exception");
do_exit(signr);
make_task_dead(signr);
}
NOKPROBE_SYMBOL(oops_end);

View File

@ -57,7 +57,7 @@ void die(struct pt_regs *regs, const char *str)
if (panic_on_oops)
panic("Fatal exception");
if (ret != NOTIFY_STOP)
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)

View File

@ -189,7 +189,7 @@ no_context:
(addr < PAGE_SIZE) ? "NULL pointer dereference" :
"paging request", addr);
die(regs, "Oops");
do_exit(SIGKILL);
make_task_dead(SIGKILL);
/*
* We ran out of memory, call the OOM killer, and return the userspace

View File

@ -4,8 +4,8 @@
*
* Copyright IBM Corp. 1999, 2000
*/
#ifndef DEBUG_H
#define DEBUG_H
#ifndef _ASM_S390_DEBUG_H
#define _ASM_S390_DEBUG_H
#include <linux/string.h>
#include <linux/spinlock.h>
@ -416,4 +416,4 @@ int debug_unregister_view(debug_info_t *id, struct debug_view *view);
#define PRINT_FATAL(x...) printk(KERN_DEBUG PRINTK_HEADER x)
#endif /* DASD_DEBUG */
#endif /* DEBUG_H */
#endif /* _ASM_S390_DEBUG_H */

View File

@ -210,5 +210,5 @@ void die(struct pt_regs *regs, const char *str)
if (panic_on_oops)
panic("Fatal exception: panic_on_oops");
oops_exit();
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}

View File

@ -179,7 +179,7 @@ void s390_handle_mcck(void)
"malfunction (code 0x%016lx).\n", mcck.mcck_code);
printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
current->comm, current->pid);
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
}
EXPORT_SYMBOL_GPL(s390_handle_mcck);

View File

@ -81,8 +81,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
union esca_sigp_ctrl new_val = {0}, old_val;
old_val = READ_ONCE(*sigp_ctrl);
new_val.scn = src_id;
new_val.c = 1;
old_val.c = 0;
@ -93,8 +94,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
union bsca_sigp_ctrl new_val = {0}, old_val;
old_val = READ_ONCE(*sigp_ctrl);
new_val.scn = src_id;
new_val.c = 1;
old_val.c = 0;
@ -124,16 +126,18 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union esca_sigp_ctrl old = *sigp_ctrl;
union esca_sigp_ctrl old;
old = READ_ONCE(*sigp_ctrl);
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union bsca_sigp_ctrl old = *sigp_ctrl;
union bsca_sigp_ctrl old;
old = READ_ONCE(*sigp_ctrl);
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
}

View File

@ -57,7 +57,7 @@ void die(const char *str, struct pt_regs *regs, long err)
if (panic_on_oops)
panic("Fatal exception");
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
void die_if_kernel(const char *str, struct pt_regs *regs, long err)

View File

@ -86,9 +86,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
}
printk("Instruction DUMP:");
instruction_dump ((unsigned long *) regs->pc);
if(regs->psr & PSR_PS)
do_exit(SIGKILL);
do_exit(SIGSEGV);
make_task_dead((regs->psr & PSR_PS) ? SIGKILL : SIGSEGV);
}
void do_hw_interrupt(struct pt_regs *regs, unsigned long type)

View File

@ -2564,9 +2564,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
}
if (panic_on_oops)
panic("Fatal exception");
if (regs->tstate & TSTATE_PRIV)
do_exit(SIGKILL);
do_exit(SIGSEGV);
make_task_dead((regs->tstate & TSTATE_PRIV)? SIGKILL : SIGSEGV);
}
EXPORT_SYMBOL(die_if_kernel);

View File

@ -1659,13 +1659,13 @@ ENTRY(async_page_fault)
END(async_page_fault)
#endif
ENTRY(rewind_stack_do_exit)
ENTRY(rewind_stack_and_make_dead)
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
call do_exit
call make_task_dead
1: jmp 1b
END(rewind_stack_do_exit)
END(rewind_stack_and_make_dead)

View File

@ -1757,7 +1757,7 @@ ENTRY(ignore_sysret)
END(ignore_sysret)
#endif
ENTRY(rewind_stack_do_exit)
ENTRY(rewind_stack_and_make_dead)
UNWIND_HINT_FUNC
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
@ -1766,5 +1766,5 @@ ENTRY(rewind_stack_do_exit)
leaq -PTREGS_SIZE(%rax), %rsp
UNWIND_HINT_REGS
call do_exit
END(rewind_stack_do_exit)
call make_task_dead
END(rewind_stack_and_make_dead)

View File

@ -969,7 +969,7 @@ static int __init amd_core_pmu_init(void)
* numbered counter following it.
*/
for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
even_ctr_mask |= 1 << i;
even_ctr_mask |= BIT_ULL(i);
pair_constraint = (struct event_constraint)
__EVENT_CONSTRAINT(0, even_ctr_mask, 0,

View File

@ -326,7 +326,7 @@ unsigned long oops_begin(void)
}
NOKPROBE_SYMBOL(oops_begin);
void __noreturn rewind_stack_do_exit(int signr);
void __noreturn rewind_stack_and_make_dead(int signr);
void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
{
@ -361,7 +361,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
* reuse the task stack and that existing poisons are invalid.
*/
kasan_unpoison_task_stack(current);
rewind_stack_do_exit(signr);
rewind_stack_and_make_dead(signr);
}
NOKPROBE_SYMBOL(oops_end);

View File

@ -114,6 +114,7 @@ static void make_8259A_irq(unsigned int irq)
disable_irq_nosync(irq);
io_apic_irqs &= ~(1<<irq);
irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
irq_set_status_flags(irq, IRQ_LEVEL);
enable_irq(irq);
lapic_assign_legacy_vector(irq, true);
}

View File

@ -72,8 +72,10 @@ void __init init_ISA_irqs(void)
legacy_pic->init(0);
for (i = 0; i < nr_legacy_irqs(); i++)
for (i = 0; i < nr_legacy_irqs(); i++) {
irq_set_chip_and_handler(i, chip, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
void __init init_IRQ(void)

View File

@ -3241,18 +3241,15 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
{
u32 ar;
if (var->unusable || !var->present)
ar = 1 << 16;
else {
ar = var->type & 15;
ar |= (var->s & 1) << 4;
ar |= (var->dpl & 3) << 5;
ar |= (var->present & 1) << 7;
ar |= (var->avl & 1) << 12;
ar |= (var->l & 1) << 13;
ar |= (var->db & 1) << 14;
ar |= (var->g & 1) << 15;
}
ar = var->type & 15;
ar |= (var->s & 1) << 4;
ar |= (var->dpl & 3) << 5;
ar |= (var->present & 1) << 7;
ar |= (var->avl & 1) << 12;
ar |= (var->l & 1) << 13;
ar |= (var->db & 1) << 14;
ar |= (var->g & 1) << 15;
ar |= (var->unusable || !var->present) << 16;
return ar;
}

View File

@ -10,6 +10,6 @@
*/
ENTRY(__iowrite32_copy)
movl %edx,%ecx
rep movsd
rep movsl
ret
ENDPROC(__iowrite32_copy)

View File

@ -543,5 +543,5 @@ void die(const char * str, struct pt_regs * regs, long err)
if (panic_on_oops)
panic("Fatal exception");
do_exit(err);
make_task_dead(err);
}

View File

@ -1445,6 +1445,10 @@ retry:
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
pol->pd_init_fn(blkg->pd[pol->plid]);
if (pol->pd_online_fn)
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
pol->pd_online_fn(blkg->pd[pol->plid]);
__set_bit(pol->plid, q->blkcg_pols);
ret = 0;

View File

@ -796,10 +796,7 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return false;
WARN_ONCE(1,
"generic_make_request: Trying to write "
"to read-only block-device %s (partno %d)\n",
pr_warn("Trying to write to read-only block-device %s (partno %d)\n",
bio_devname(bio, b), part->partno);
/* Older lvm-tools actually trigger this */
return false;

View File

@ -146,7 +146,7 @@ static int __init test_async_probe_init(void)
calltime = ktime_get();
for_each_online_cpu(cpu) {
nid = cpu_to_node(cpu);
pdev = &sync_dev[sync_id];
pdev = &async_dev[async_id];
*pdev = test_platform_device_register_node("test_async_driver",
async_id,

View File

@ -4,42 +4,101 @@
#include <linux/export.h>
#include <linux/gfp.h>
struct devm_clk_state {
struct clk *clk;
void (*exit)(struct clk *clk);
};
static void devm_clk_release(struct device *dev, void *res)
{
clk_put(*(struct clk **)res);
struct devm_clk_state *state = res;
if (state->exit)
state->exit(state->clk);
clk_put(state->clk);
}
static struct clk *__devm_clk_get(struct device *dev, const char *id,
struct clk *(*get)(struct device *dev, const char *id),
int (*init)(struct clk *clk),
void (*exit)(struct clk *clk))
{
struct devm_clk_state *state;
struct clk *clk;
int ret;
state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
if (!state)
return ERR_PTR(-ENOMEM);
clk = get(dev, id);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
goto err_clk_get;
}
if (init) {
ret = init(clk);
if (ret)
goto err_clk_init;
}
state->clk = clk;
state->exit = exit;
devres_add(dev, state);
return clk;
err_clk_init:
clk_put(clk);
err_clk_get:
devres_free(state);
return ERR_PTR(ret);
}
struct clk *devm_clk_get(struct device *dev, const char *id)
{
struct clk **ptr, *clk;
ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
clk = clk_get(dev, id);
if (!IS_ERR(clk)) {
*ptr = clk;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return clk;
return __devm_clk_get(dev, id, clk_get, NULL, NULL);
}
EXPORT_SYMBOL(devm_clk_get);
struct clk *devm_clk_get_prepared(struct device *dev, const char *id)
{
return __devm_clk_get(dev, id, clk_get, clk_prepare, clk_unprepare);
}
EXPORT_SYMBOL_GPL(devm_clk_get_prepared);
struct clk *devm_clk_get_enabled(struct device *dev, const char *id)
{
return __devm_clk_get(dev, id, clk_get,
clk_prepare_enable, clk_disable_unprepare);
}
EXPORT_SYMBOL_GPL(devm_clk_get_enabled);
struct clk *devm_clk_get_optional(struct device *dev, const char *id)
{
struct clk *clk = devm_clk_get(dev, id);
if (clk == ERR_PTR(-ENOENT))
return NULL;
return clk;
return __devm_clk_get(dev, id, clk_get_optional, NULL, NULL);
}
EXPORT_SYMBOL(devm_clk_get_optional);
struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id)
{
return __devm_clk_get(dev, id, clk_get_optional,
clk_prepare, clk_unprepare);
}
EXPORT_SYMBOL_GPL(devm_clk_get_optional_prepared);
struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id)
{
return __devm_clk_get(dev, id, clk_get_optional,
clk_prepare_enable, clk_disable_unprepare);
}
EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled);
struct clk_bulk_devres {
struct clk_bulk_data *clks;
int num_clks;

View File

@ -443,7 +443,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
return -ENODEV;
}
clk = clk_get(cpu_dev, 0);
clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
dev_err(cpu_dev, "Cannot get clock for CPU0\n");
return PTR_ERR(clk);

View File

@ -126,6 +126,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "nvidia,tegra124", },
{ .compatible = "nvidia,tegra210", },
{ .compatible = "nvidia,tegra234", },
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },

View File

@ -212,7 +212,8 @@ static int dma_chan_get(struct dma_chan *chan)
/* The channel is already in use, update client count */
if (chan->client_count) {
__module_get(owner);
goto out;
chan->client_count++;
return 0;
}
if (!try_module_get(owner))
@ -225,11 +226,11 @@ static int dma_chan_get(struct dma_chan *chan)
goto err_out;
}
chan->client_count++;
if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
balance_ref_count(chan);
out:
chan->client_count++;
return 0;
err_out:

View File

@ -1360,10 +1360,12 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
sdma_config_ownership(sdmac, false, true, false);
if (sdma_load_context(sdmac))
goto err_desc_out;
goto err_bd_out;
return desc;
err_bd_out:
sdma_free_bd(desc);
err_desc_out:
kfree(desc);
err_out:

View File

@ -2626,7 +2626,6 @@ static int xilinx_dma_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct xilinx_dma_device *xdev;
struct device_node *child, *np = pdev->dev.of_node;
struct resource *io;
u32 num_frames, addr_width, len_width;
int i, err;
@ -2652,11 +2651,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return err;
/* Request and map I/O memory */
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
xdev->regs = devm_ioremap_resource(&pdev->dev, io);
if (IS_ERR(xdev->regs))
return PTR_ERR(xdev->regs);
xdev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xdev->regs)) {
err = PTR_ERR(xdev->regs);
goto disable_clks;
}
/* Retrieve the DMA engine properties from the device tree */
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
@ -2748,8 +2747,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Initialize the channels */
for_each_child_of_node(node, child) {
err = xilinx_dma_child_probe(xdev, child);
if (err < 0)
goto disable_clks;
if (err < 0) {
of_node_put(child);
goto error;
}
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@ -2782,12 +2783,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return 0;
disable_clks:
xdma_disable_allclks(xdev);
error:
for (i = 0; i < xdev->nr_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
disable_clks:
xdma_disable_allclks(xdev);
return err;
}

View File

@ -34,6 +34,9 @@
static DEFINE_MUTEX(device_ctls_mutex);
static LIST_HEAD(edac_device_list);
/* Default workqueue processing interval on this instance, in msecs */
#define DEFAULT_POLL_INTERVAL 1000
#ifdef CONFIG_EDAC_DEBUG
static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
{
@ -366,7 +369,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
* whole one second to save timers firing all over the period
* between integral seconds
*/
if (edac_dev->poll_msec == 1000)
if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
@ -396,7 +399,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
* timers firing on sub-second basis, while they are happy
* to fire together on the 1 second exactly
*/
if (edac_dev->poll_msec == 1000)
if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
@ -430,7 +433,7 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
edac_dev->delay = msecs_to_jiffies(msec);
/* See comment in edac_device_workq_setup() above */
if (edac_dev->poll_msec == 1000)
if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_mod_work(&edac_dev->work, edac_dev->delay);
@ -472,11 +475,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
/* This instance is NOW RUNNING */
edac_dev->op_state = OP_RUNNING_POLL;
/*
* enable workq processing on this instance,
* default = 1000 msec
*/
edac_device_workq_setup(edac_dev, 1000);
edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
} else {
edac_dev->op_state = OP_RUNNING_INTERRUPT;
}

View File

@ -174,8 +174,10 @@ static int highbank_mc_probe(struct platform_device *pdev)
drvdata = mci->pvt_info;
platform_set_drvdata(pdev, mci);
if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
return -ENOMEM;
if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
res = -ENOMEM;
goto free;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@ -243,6 +245,7 @@ err2:
edac_mc_del_mc(&pdev->dev);
err:
devres_release_group(&pdev->dev, NULL);
free:
edac_mc_free(mci);
return res;
}

View File

@ -252,7 +252,7 @@ clear:
static int
dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
{
struct llcc_drv_data *drv = edev_ctl->pvt_info;
struct llcc_drv_data *drv = edev_ctl->dev->platform_data;
int ret;
ret = dump_syn_reg_values(drv, bank, err_type);
@ -289,7 +289,7 @@ static irqreturn_t
llcc_ecc_irq_handler(int irq, void *edev_ctl)
{
struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
struct llcc_drv_data *drv = edac_dev_ctl->pvt_info;
struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
irqreturn_t irq_rc = IRQ_NONE;
u32 drp_error, trp_error, i;
int ret;
@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
edev_ctl->dev_name = dev_name(dev);
edev_ctl->ctl_name = "llcc";
edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
edev_ctl->pvt_info = llcc_driv_data;
rc = edac_device_add_device(edev_ctl);
if (rc)

View File

@ -229,7 +229,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
writel(1 << gpio_idx, port->base + GPIO_ISR);
return 0;
return port->gc.direction_input(&port->gc, gpio_idx);
}
static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)

View File

@ -272,6 +272,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Ideapad D330-10IGL (HD) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Lenovo Yoga Book X90F / X91F / X91L */
.matches = {
/* Non exact match to match all versions */

View File

@ -3,7 +3,8 @@
config DRM_PANFROST
tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)"
depends on DRM
depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
depends on ARM || ARM64 || COMPILE_TEST
depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
depends on MMU
select DRM_SCHED
select IOMMU_SUPPORT

View File

@ -60,7 +60,6 @@ static int betopff_init(struct hid_device *hid)
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev;
int field_count = 0;
int error;
int i, j;
@ -86,19 +85,21 @@ static int betopff_init(struct hid_device *hid)
* -----------------------------------------
* Do init them with default value.
*/
if (report->maxfield < 4) {
hid_err(hid, "not enough fields in the report: %d\n",
report->maxfield);
return -ENODEV;
}
for (i = 0; i < report->maxfield; i++) {
if (report->field[i]->report_count < 1) {
hid_err(hid, "no values in the field\n");
return -ENODEV;
}
for (j = 0; j < report->field[i]->report_count; j++) {
report->field[i]->value[j] = 0x00;
field_count++;
}
}
if (field_count < 4) {
hid_err(hid, "not enough fields in the report: %d\n",
field_count);
return -ENODEV;
}
betopff = kzalloc(sizeof(*betopff), GFP_KERNEL);
if (!betopff)
return -ENOMEM;

View File

@ -344,6 +344,11 @@ static int bigben_probe(struct hid_device *hid,
}
report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
if (list_empty(report_list)) {
hid_err(hid, "no output report found\n");
error = -ENODEV;
goto error_hw_stop;
}
bigben->report = list_entry(report_list->next,
struct hid_report, list);

View File

@ -981,8 +981,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
* Validating on id 0 means we should examine the first
* report in the list.
*/
report = list_entry(
hid->report_enum[type].report_list.next,
report = list_first_entry_or_null(
&hid->report_enum[type].report_list,
struct hid_report, list);
} else {
report = hid->report_enum[type].report_id_hash[id];

View File

@ -259,7 +259,6 @@
#define USB_DEVICE_ID_CH_AXIS_295 0x001c
#define USB_VENDOR_ID_CHERRY 0x046a
#define USB_DEVICE_ID_CHERRY_MOUSE_000C 0x000c
#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
#define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027

View File

@ -54,7 +54,6 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_MOUSE_000C), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },

View File

@ -104,6 +104,11 @@ void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
int required_slots = (size / DMA_SLOT_SIZE)
+ 1 * (size % DMA_SLOT_SIZE != 0);
if (!dev->ishtp_dma_tx_map) {
dev_err(dev->devc, "Fail to allocate Tx map\n");
return NULL;
}
spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
free = 1;
@ -150,6 +155,11 @@ void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
return;
}
if (!dev->ishtp_dma_tx_map) {
dev_err(dev->devc, "Fail to allocate Tx map\n");
return;
}
i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
for (j = 0; j < acked_slots; j++) {

View File

@ -2840,15 +2840,18 @@ EXPORT_SYMBOL(__rdma_block_iter_start);
bool __rdma_block_iter_next(struct ib_block_iter *biter)
{
unsigned int block_offset;
unsigned int sg_delta;
if (!biter->__sg_nents || !biter->__sg)
return false;
biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
biter->__sg_advance += sg_delta;
} else {
biter->__sg_advance = 0;
biter->__sg = sg_next(biter->__sg);
biter->__sg_nents--;

View File

@ -325,6 +325,8 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
if (!PAGE_ALIGNED(tinfo->vaddr))
return -EINVAL;
if (tinfo->length == 0)
return -EINVAL;
tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
if (!tidbuf)
@ -335,40 +337,38 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
GFP_KERNEL);
if (!tidbuf->psets) {
kfree(tidbuf);
return -ENOMEM;
ret = -ENOMEM;
goto fail_release_mem;
}
pinned = pin_rcv_pages(fd, tidbuf);
if (pinned <= 0) {
kfree(tidbuf->psets);
kfree(tidbuf);
return pinned;
ret = (pinned < 0) ? pinned : -ENOSPC;
goto fail_unpin;
}
/* Find sets of physically contiguous pages */
tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
/*
* We don't need to access this under a lock since tid_used is per
* process and the same process cannot be in hfi1_user_exp_rcv_clear()
* and hfi1_user_exp_rcv_setup() at the same time.
*/
/* Reserve the number of expected tids to be used. */
spin_lock(&fd->tid_lock);
if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
pageset_count = fd->tid_limit - fd->tid_used;
else
pageset_count = tidbuf->n_psets;
fd->tid_used += pageset_count;
spin_unlock(&fd->tid_lock);
if (!pageset_count)
goto bail;
if (!pageset_count) {
ret = -ENOSPC;
goto fail_unreserve;
}
ngroups = pageset_count / dd->rcv_entries.group_size;
tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
if (!tidlist) {
ret = -ENOMEM;
goto nomem;
goto fail_unreserve;
}
tididx = 0;
@ -464,43 +464,60 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
}
unlock:
mutex_unlock(&uctxt->exp_mutex);
nomem:
hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
mapped_pages, ret);
if (tididx) {
spin_lock(&fd->tid_lock);
fd->tid_used += tididx;
spin_unlock(&fd->tid_lock);
tinfo->tidcnt = tididx;
tinfo->length = mapped_pages * PAGE_SIZE;
if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
tidlist, sizeof(tidlist[0]) * tididx)) {
/*
* On failure to copy to the user level, we need to undo
* everything done so far so we don't leak resources.
*/
tinfo->tidlist = (unsigned long)&tidlist;
hfi1_user_exp_rcv_clear(fd, tinfo);
tinfo->tidlist = 0;
ret = -EFAULT;
goto bail;
}
/* fail if nothing was programmed, set error if none provided */
if (tididx == 0) {
if (ret >= 0)
ret = -ENOSPC;
goto fail_unreserve;
}
/* adjust reserved tid_used to actual count */
spin_lock(&fd->tid_lock);
fd->tid_used -= pageset_count - tididx;
spin_unlock(&fd->tid_lock);
/* unpin all pages not covered by a TID */
unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages,
false);
tinfo->tidcnt = tididx;
tinfo->length = mapped_pages * PAGE_SIZE;
if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
tidlist, sizeof(tidlist[0]) * tididx)) {
ret = -EFAULT;
goto fail_unprogram;
}
/*
* If not everything was mapped (due to insufficient RcvArray entries,
* for example), unpin all unmapped pages so we can pin them nex time.
*/
if (mapped_pages != pinned)
unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
(pinned - mapped_pages), false);
bail:
kfree(tidbuf->psets);
kfree(tidlist);
kfree(tidbuf->pages);
kfree(tidbuf->psets);
kfree(tidbuf);
return ret > 0 ? 0 : ret;
kfree(tidlist);
return 0;
fail_unprogram:
/* unprogram, unmap, and unpin all allocated TIDs */
tinfo->tidlist = (unsigned long)tidlist;
hfi1_user_exp_rcv_clear(fd, tinfo);
tinfo->tidlist = 0;
pinned = 0; /* nothing left to unpin */
pageset_count = 0; /* nothing left reserved */
fail_unreserve:
spin_lock(&fd->tid_lock);
fd->tid_used -= pageset_count;
spin_unlock(&fd->tid_lock);
fail_unpin:
if (pinned > 0)
unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false);
fail_release_mem:
kfree(tidbuf->pages);
kfree(tidbuf->psets);
kfree(tidbuf);
kfree(tidlist);
return ret;
}
int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,

View File

@ -189,7 +189,6 @@ static const char * const smbus_pnp_ids[] = {
"SYN3221", /* HP 15-ay000 */
"SYN323d", /* HP Spectre X360 13-w013dx */
"SYN3257", /* HP Envy 13-ad105ng */
"SYN3286", /* HP Laptop 15-da3001TU */
NULL
};

View File

@ -47,19 +47,17 @@ static int atmel_ramc_probe(struct platform_device *pdev)
caps = of_device_get_match_data(&pdev->dev);
if (caps->has_ddrck) {
clk = devm_clk_get(&pdev->dev, "ddrck");
clk = devm_clk_get_enabled(&pdev->dev, "ddrck");
if (IS_ERR(clk))
return PTR_ERR(clk);
clk_prepare_enable(clk);
}
if (caps->has_mpddr_clk) {
clk = devm_clk_get(&pdev->dev, "mpddr");
clk = devm_clk_get_enabled(&pdev->dev, "mpddr");
if (IS_ERR(clk)) {
pr_err("AT91 RAMC: couldn't get mpddr clock\n");
return PTR_ERR(clk);
}
clk_prepare_enable(clk);
}
return 0;

View File

@ -282,10 +282,9 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
if (IS_ERR(devbus->base))
return PTR_ERR(devbus->base);
clk = devm_clk_get(&pdev->dev, NULL);
clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
clk_prepare_enable(clk);
/*
* Obtain clock period in picoseconds,

View File

@ -89,6 +89,8 @@
/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
#define ESDHC_TUNING_START_TAP_DEFAULT 0x1
#define ESDHC_TUNING_START_TAP_MASK 0x7f
#define ESDHC_TUNING_CMD_CRC_CHECK_DISABLE (1 << 7)
#define ESDHC_TUNING_STEP_DEFAULT 0x1
#define ESDHC_TUNING_STEP_MASK 0x00070000
#define ESDHC_TUNING_STEP_SHIFT 16
@ -1180,7 +1182,8 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int tmp;
struct cqhci_host *cq_host = host->mmc->cqe_private;
u32 tmp;
if (esdhc_is_usdhc(imx_data)) {
/*
@ -1233,18 +1236,37 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
tmp |= ESDHC_STD_TUNING_EN |
ESDHC_TUNING_START_TAP_DEFAULT;
if (imx_data->boarddata.tuning_start_tap) {
tmp &= ~ESDHC_TUNING_START_TAP_MASK;
tmp |= ESDHC_STD_TUNING_EN;
/*
* ROM code or bootloader may config the start tap
* and step, unmask them first.
*/
tmp &= ~(ESDHC_TUNING_START_TAP_MASK | ESDHC_TUNING_STEP_MASK);
if (imx_data->boarddata.tuning_start_tap)
tmp |= imx_data->boarddata.tuning_start_tap;
}
else
tmp |= ESDHC_TUNING_START_TAP_DEFAULT;
if (imx_data->boarddata.tuning_step) {
tmp &= ~ESDHC_TUNING_STEP_MASK;
tmp |= imx_data->boarddata.tuning_step
<< ESDHC_TUNING_STEP_SHIFT;
} else {
tmp |= ESDHC_TUNING_STEP_DEFAULT
<< ESDHC_TUNING_STEP_SHIFT;
}
/* Disable the CMD CRC check for tuning, if not, need to
* add some delay after every tuning command, because
* hardware standard tuning logic will directly go to next
* step once it detect the CMD CRC error, will not wait for
* the card side to finally send out the tuning data, trigger
* the buffer read ready interrupt immediately. If usdhc send
* the next tuning command some eMMC card will stuck, can't
* response, block the tuning procedure or the first command
* after the whole tuning procedure always can't get any response.
*/
tmp |= ESDHC_TUNING_CMD_CRC_CHECK_DISABLE;
writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
} else if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
/*
@ -1256,6 +1278,21 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
tmp &= ~ESDHC_STD_TUNING_EN;
writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
}
/*
* On i.MX8MM, we are running Dual Linux OS, with 1st Linux using SD Card
* as rootfs storage, 2nd Linux using eMMC as rootfs storage. We let the
* the 1st linux configure power/clock for the 2nd Linux.
*
* When the 2nd Linux is booting into rootfs stage, we let the 1st Linux
* to destroy the 2nd linux, then restart the 2nd linux, we met SDHCI dump.
* After we clear the pending interrupt and halt CQCTL, issue gone.
*/
if (cq_host) {
tmp = cqhci_readl(cq_host, CQHCI_IS);
cqhci_writel(cq_host, tmp, CQHCI_IS);
cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
}
}
}
@ -1571,8 +1608,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (err)
goto disable_ahb_clk;
host->tuning_delay = 1;
sdhci_esdhc_imx_hwinit(host);
err = sdhci_add_host(host);

View File

@ -682,10 +682,10 @@ static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
/* clear forwarding port */
alu_table[2] &= ~BIT(port);
alu_table[1] &= ~BIT(port);
/* if there is no port to forward, clear table */
if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
alu_table[0] = 0;
alu_table[1] = 0;
alu_table[2] = 0;

View File

@ -524,19 +524,28 @@ static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
}
static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
{
unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
/* From MAC ver 30H the TFCR is per priority, instead of per queue */
if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
return max_q_count;
else
return min_t(unsigned int, pdata->tx_q_count, max_q_count);
}
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
{
unsigned int max_q_count, q_count;
unsigned int reg, reg_val;
unsigned int i;
unsigned int i, q_count;
/* Clear MTL flow control */
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
/* Clear MAC flow control */
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
q_count = xgbe_get_fc_queue_count(pdata);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
@ -553,9 +562,8 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
{
struct ieee_pfc *pfc = pdata->pfc;
struct ieee_ets *ets = pdata->ets;
unsigned int max_q_count, q_count;
unsigned int reg, reg_val;
unsigned int i;
unsigned int i, q_count;
/* Set MTL flow control */
for (i = 0; i < pdata->rx_q_count; i++) {
@ -579,8 +587,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
}
/* Set MAC flow control */
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
q_count = xgbe_get_fc_queue_count(pdata);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);

View File

@ -496,6 +496,7 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
reg |= XGBE_KR_TRAINING_ENABLE;
reg |= XGBE_KR_TRAINING_START;
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
pdata->kr_start_time = jiffies;
netif_dbg(pdata, link, pdata->netdev,
"KR training initiated\n");
@ -632,6 +633,8 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
xgbe_switch_mode(pdata);
pdata->an_result = XGBE_AN_READY;
xgbe_an_restart(pdata);
return XGBE_AN_INCOMPAT_LINK;
@ -1275,9 +1278,30 @@ static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
{
unsigned long link_timeout;
unsigned long kr_time;
int wait;
link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
if (time_after(jiffies, link_timeout)) {
if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) &&
pdata->phy.autoneg == AUTONEG_ENABLE) {
/* AN restart should not happen while KR training is in progress.
* The while loop ensures no AN restart during KR training,
* waits up to 500ms and AN restart is triggered only if KR
* training is failed.
*/
wait = XGBE_KR_TRAINING_WAIT_ITER;
while (wait--) {
kr_time = pdata->kr_start_time +
msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
if (time_after(jiffies, kr_time))
break;
/* AN restart is not required, if AN result is COMPLETE */
if (pdata->an_result == XGBE_AN_COMPLETE)
return;
usleep_range(10000, 11000);
}
}
netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
xgbe_phy_config_aneg(pdata);
}

View File

@ -290,6 +290,7 @@
/* Auto-negotiation */
#define XGBE_AN_MS_TIMEOUT 500
#define XGBE_LINK_TIMEOUT 5
#define XGBE_KR_TRAINING_WAIT_ITER 50
#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
@ -1266,6 +1267,7 @@ struct xgbe_prv_data {
unsigned int parallel_detect;
unsigned int fec_ability;
unsigned long an_start;
unsigned long kr_start_time;
enum xgbe_an_mode an_mode;
/* I2C support */

View File

@ -18,6 +18,7 @@
#include <linux/of_platform.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/mdio/mdio-xgene.h>
#include <linux/module.h>
#include <net/ip.h>
#include <linux/prefetch.h>
@ -26,7 +27,6 @@
#include "xgene_enet_hw.h"
#include "xgene_enet_cle.h"
#include "xgene_enet_ring2.h"
#include "../../../phy/mdio-xgene.h"
#define XGENE_DRV_VERSION "v1.0"
#define ETHER_MIN_PACKET 64

View File

@ -11195,7 +11195,7 @@ static void tg3_reset_task(struct work_struct *work)
rtnl_lock();
tg3_full_lock(tp, 0);
if (!netif_running(tp->dev)) {
if (tp->pcierr_recovery || !netif_running(tp->dev)) {
tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_full_unlock(tp);
rtnl_unlock();
@ -18187,6 +18187,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
netdev_info(netdev, "PCI I/O error detected\n");
/* Want to make sure that the reset task doesn't run */
tg3_reset_task_cancel(tp);
rtnl_lock();
/* Could be second call or maybe we don't have netdev yet */
@ -18203,9 +18206,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
tg3_timer_stop(tp);
/* Want to make sure that the reset task doesn't run */
tg3_reset_task_cancel(tp);
netif_device_detach(netdev);
/* Clean up software state, even if MMIO is blocked */

View File

@ -1752,7 +1752,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
skb_is_nonlinear(*skb);
int padlen = ETH_ZLEN - (*skb)->len;
int headroom = skb_headroom(*skb);
int tailroom = skb_tailroom(*skb);
struct sk_buff *nskb;
u32 fcs;
@ -1766,9 +1765,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
/* FCS could be appeded to tailroom. */
if (tailroom >= ETH_FCS_LEN)
goto add_fcs;
/* FCS could be appeded by moving data to headroom. */
else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
padlen = 0;
/* No room for FCS, need to reallocate skb. */
else
padlen = ETH_FCS_LEN;
@ -1777,10 +1773,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
padlen += ETH_FCS_LEN;
}
if (!cloned && headroom + tailroom >= padlen) {
(*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
skb_set_tail_pointer(*skb, (*skb)->len);
} else {
if (cloned || tailroom < padlen) {
nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
if (!nskb)
return -ENOMEM;

View File

@ -1640,7 +1640,7 @@ static void mlx5_core_verify_params(void)
}
}
static int __init init(void)
static int __init mlx5_init(void)
{
int err;
@ -1665,7 +1665,7 @@ err_debug:
return err;
}
static void __exit cleanup(void)
static void __exit mlx5_cleanup(void)
{
#ifdef CONFIG_MLX5_CORE_EN
mlx5e_cleanup();
@ -1674,5 +1674,5 @@ static void __exit cleanup(void)
mlx5_unregister_debugfs();
}
module_init(init);
module_exit(cleanup);
module_init(mlx5_init);
module_exit(mlx5_cleanup);

View File

@ -736,14 +736,14 @@ static void ravb_error_interrupt(struct net_device *ndev)
ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
if (eis & EIS_QFS) {
ris2 = ravb_read(ndev, RIS2);
ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
RIS2);
/* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF0)
priv->stats[RAVB_BE].rx_over_errors++;
/* Receive Descriptor Empty int */
/* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF1)
priv->stats[RAVB_NC].rx_over_errors++;

View File

@ -998,6 +998,11 @@ static int stmmac_init_phy(struct net_device *dev)
int addr = priv->plat->phy_addr;
struct phy_device *phydev;
if (addr < 0) {
netdev_err(priv->dev, "no phy found\n");
return -ENODEV;
}
phydev = mdiobus_get_phy(priv->mii, addr);
if (!phydev) {
netdev_err(priv->dev, "no phy at addr %d\n", addr);

View File

@ -10,10 +10,9 @@
* of their settings.
*/
#include <linux/i2c.h>
#include <linux/mdio/mdio-i2c.h>
#include <linux/phy.h>
#include "mdio-i2c.h"
/*
* I2C bus addresses 0x50 and 0x51 are normally an EEPROM, which is
* specified to be present in SFP modules. These correspond with PHY

View File

@ -4,6 +4,7 @@
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
@ -148,6 +149,7 @@ static const struct clk_ops g12a_ephy_pll_ops = {
static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
{
u32 value;
int ret;
/* Enable the phy clock */
@ -161,18 +163,25 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
/* Initialize ephy control */
writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
PHY_CNTL1_CLK_EN |
PHY_CNTL1_CLKFREQ |
PHY_CNTL1_PHY_ENB,
priv->regs + ETH_PHY_CNTL1);
/* Make sure we get a 0 -> 1 transition on the enable bit */
value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
PHY_CNTL1_CLK_EN |
PHY_CNTL1_CLKFREQ;
writel(value, priv->regs + ETH_PHY_CNTL1);
writel(PHY_CNTL2_USE_INTERNAL |
PHY_CNTL2_SMI_SRC_MAC |
PHY_CNTL2_RX_CLK_EPHY,
priv->regs + ETH_PHY_CNTL2);
value |= PHY_CNTL1_PHY_ENB;
writel(value, priv->regs + ETH_PHY_CNTL1);
/* The phy needs a bit of time to power up */
mdelay(10);
return 0;
}

View File

@ -11,6 +11,7 @@
#include <linux/efi.h>
#include <linux/if_vlan.h>
#include <linux/io.h>
#include <linux/mdio/mdio-xgene.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
@ -18,7 +19,6 @@
#include <linux/prefetch.h>
#include <linux/phy.h>
#include <net/ip.h>
#include "mdio-xgene.h"
static bool xgene_mdio_status;

View File

@ -117,7 +117,12 @@ EXPORT_SYMBOL(mdiobus_unregister_device);
struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr)
{
struct mdio_device *mdiodev = bus->mdio_map[addr];
struct mdio_device *mdiodev;
if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map))
return NULL;
mdiodev = bus->mdio_map[addr];
if (!mdiodev)
return NULL;

View File

@ -7,6 +7,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/mdio/mdio-i2c.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
@ -16,7 +17,6 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "mdio-i2c.h"
#include "sfp.h"
#include "swphy.h"

View File

@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* ignore the CRC length */
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
if (len > ETH_FRAME_LEN || len > skb->len)
if (len > ETH_FRAME_LEN || len > skb->len || len < 0)
return 0;
/* the last packet of current skb */

View File

@ -1249,9 +1249,11 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
free_dev:
free_netdev(dev);
undo_uhdlc_init:
iounmap(utdm->siram);
if (utdm)
iounmap(utdm->siram);
unmap_si_regs:
iounmap(utdm->si_regs);
if (utdm)
iounmap(utdm->si_regs);
free_utdm:
if (uhdlc_priv->tsa)
kfree(utdm);

Some files were not shown because too many files have changed in this diff Show More