This is the 5.4.249 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmSb7V4ACgkQONu9yGCS
 aT5vLxAA0yhg7h210wyMLrPNgQHrIItxkvcosoAG04WziImnvTT84XYpvthKlQrZ
 jzLGwdrH8ggdZIq+jPblmGvfvpGuM7MjKw1F8tgmviMnMyfKziGO/kIEzkNPaHSt
 sRFuGniXx2Q/m2IVblhC8pqJG6SRgkBbNgg3by7SpTRSEHBjpxaOVxvGC53Bdlkb
 ep90ox3iVbA4Q45rGCn5UfJM22wEnUYbzRv04085fzWaPDEZyHi5S6a3rHepVbrq
 7ElDQgUgHKlLm7rd1ngB8Ac+EdfavVcPok789pbEmQwf6jsAetl43yPUSEE6xFXb
 5FZAA7uUUa+E7P+140+iWBCZwQX9g+WglEkOxJV8gOMtWoiFZjpPcJxyWnvz/7ch
 XFz88WW/Ub4+bpg62TJ2F3dboeF0x1rN5kB8/ylb+Gf9vACT2gPLDbFaeG24DZEr
 s1hdsRx1Q3m8ffOYbsuTTn3bfGv8TfycV4Cwy+v+QPwJF/WPdMUnIDRY7VgWJ6fO
 scRdhkgMer9MLDrcSwxgS3tyn6JObQMp5A40H1Yb6ZVwN+q2BRC/B4Gqi6BmUNKr
 uU0BRMeyExyyQfKYCgvcf0M23qUf5L4PDpk1MX38pU+AHm8rPHlE36/pNFG4PG0g
 p6vBTlKzYeHKh12VAdPJjiWICloaz2ixf3K85xJ+vH56jXfjbSY=
 =3Pqk
 -----END PGP SIGNATURE-----

Merge 5.4.249 into android11-5.4-lts

Changes in 5.4.249
	nilfs2: reject devices with insufficient block count
	mm: rewrite wait_on_page_bit_common() logic
	list: add "list_del_init_careful()" to go with "list_empty_careful()"
	epoll: ep_autoremove_wake_function should use list_del_init_careful
	tracing: Add tracing_reset_all_online_cpus_unlocked() function
	x86/purgatory: remove PGO flags
	tick/common: Align tick period during sched_timer setup
	media: dvbdev: Fix memleak in dvb_register_device
	media: dvbdev: fix error logic at dvb_register_device()
	media: dvb-core: Fix use-after-free due to race at dvb_register_device()
	nilfs2: fix buffer corruption due to concurrent device reads
	Drivers: hv: vmbus: Fix vmbus_wait_for_unload() to scan present CPUs
	PCI: hv: Fix a race condition bug in hv_pci_query_relations()
	cgroup: Do not corrupt task iteration when rebinding subsystem
	mmc: meson-gx: remove redundant mmc_request_done() call from irq context
	ip_tunnels: allow VXLAN/GENEVE to inherit TOS/TTL from VLAN
	writeback: fix dereferencing NULL mapping->host on writeback_page_template
	nilfs2: prevent general protection fault in nilfs_clear_dirty_page()
	cifs: Clean up DFS referral cache
	cifs: Get rid of kstrdup_const()'d paths
	cifs: Introduce helpers for finding TCP connection
	cifs: Merge is_path_valid() into get_normalized_path()
	cifs: Fix potential deadlock when updating vol in cifs_reconnect()
	x86/mm: Avoid using set_pgd() outside of real PGD pages
	rcu: Upgrade rcu_swap_protected() to rcu_replace_pointer()
	ieee802154: hwsim: Fix possible memory leaks
	xfrm: Linearize the skb after offloading if needed.
	net: qca_spi: Avoid high load if QCA7000 is not available
	mmc: mtk-sd: fix deferred probing
	mmc: mvsdio: convert to devm_platform_ioremap_resource
	mmc: mvsdio: fix deferred probing
	mmc: omap: fix deferred probing
	mmc: omap_hsmmc: fix deferred probing
	mmc: sdhci-acpi: fix deferred probing
	mmc: sh_mmcif: fix deferred probing
	mmc: usdhi60rol0: fix deferred probing
	ipvs: align inner_mac_header for encapsulation
	net: dsa: mt7530: fix trapping frames on non-MT7621 SoC MT7530 switch
	be2net: Extend xmit workaround to BE3 chip
	netfilter: nf_tables: disallow element updates of bound anonymous sets
	netfilter: nfnetlink_osf: fix module autoload
	Revert "net: phy: dp83867: perform soft reset and retain established link"
	sch_netem: acquire qdisc lock in netem_change()
	scsi: target: iscsi: Prevent login threads from racing between each other
	HID: wacom: Add error check to wacom_parse_and_register()
	arm64: Add missing Set/Way CMO encodings
	media: cec: core: don't set last_initiator if tx in progress
	nfcsim.c: Fix error checking for debugfs_create_dir
	usb: gadget: udc: fix NULL dereference in remove()
	s390/cio: unregister device when the only path is gone
	ASoC: nau8824: Add quirk to active-high jack-detect
	ARM: dts: Fix erroneous ADS touchscreen polarities
	drm/exynos: vidi: fix a wrong error return
	drm/exynos: fix race condition UAF in exynos_g2d_exec_ioctl
	drm/radeon: fix race condition UAF in radeon_gem_set_domain_ioctl
	x86/apic: Fix kernel panic when booting with intremap=off and x2apic_phys
	i2c: imx-lpi2c: fix type char overflow issue when calculating the clock cycle
	mm: fix VM_BUG_ON(PageTail) and BUG_ON(PageWriteback)
	mm: make wait_on_page_writeback() wait for multiple pending writebacks
	xfs: verify buffer contents when we skip log replay
	Linux 5.4.249

Change-Id: I3f7cf3804fddac70b4c1accef1c7374b184b1ea3
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-06-28 09:54:38 +00:00
commit c7f89f1b6b
72 changed files with 897 additions and 506 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 248
SUBLEVEL = 249
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -527,7 +527,7 @@
interrupt-parent = <&gpio1>;
interrupts = <31 0>;
pendown-gpio = <&gpio1 31 0>;
pendown-gpio = <&gpio1 31 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;

View File

@ -156,7 +156,7 @@
compatible = "ti,ads7843";
interrupts-extended = <&pioC 2 IRQ_TYPE_EDGE_BOTH>;
spi-max-frequency = <3000000>;
pendown-gpio = <&pioC 2 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&pioC 2 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <150>;
ti,x-max = /bits/ 16 <3830>;

View File

@ -64,7 +64,7 @@
interrupt-parent = <&gpio2>;
interrupts = <7 0>;
spi-max-frequency = <1000000>;
pendown-gpio = <&gpio2 7 0>;
pendown-gpio = <&gpio2 7 GPIO_ACTIVE_LOW>;
vcc-supply = <&reg_3p3v>;
ti,x-min = /bits/ 16 <0>;
ti,x-max = /bits/ 16 <4095>;

View File

@ -176,7 +176,7 @@
pinctrl-0 = <&pinctrl_tsc2046_pendown>;
interrupt-parent = <&gpio2>;
interrupts = <29 0>;
pendown-gpio = <&gpio2 29 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio2 29 GPIO_ACTIVE_LOW>;
touchscreen-max-pressure = <255>;
wakeup-source;
};

View File

@ -227,7 +227,7 @@
interrupt-parent = <&gpio2>;
interrupts = <25 0>; /* gpio_57 */
pendown-gpio = <&gpio2 25 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio2 25 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;
ti,x-max = /bits/ 16 <0x0fff>;

View File

@ -54,7 +54,7 @@
interrupt-parent = <&gpio1>;
interrupts = <27 0>; /* gpio_27 */
pendown-gpio = <&gpio1 27 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio1 27 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;
ti,x-max = /bits/ 16 <0x0fff>;

View File

@ -311,7 +311,7 @@
interrupt-parent = <&gpio1>;
interrupts = <8 0>; /* boot6 / gpio_8 */
spi-max-frequency = <1000000>;
pendown-gpio = <&gpio1 8 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio1 8 GPIO_ACTIVE_LOW>;
vcc-supply = <&reg_vcc3>;
pinctrl-names = "default";
pinctrl-0 = <&tsc2048_pins>;

View File

@ -149,7 +149,7 @@
interrupt-parent = <&gpio4>;
interrupts = <18 0>; /* gpio_114 */
pendown-gpio = <&gpio4 18 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio4 18 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;
ti,x-max = /bits/ 16 <0x0fff>;

View File

@ -160,7 +160,7 @@
interrupt-parent = <&gpio4>;
interrupts = <18 0>; /* gpio_114 */
pendown-gpio = <&gpio4 18 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio4 18 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;
ti,x-max = /bits/ 16 <0x0fff>;

View File

@ -651,7 +651,7 @@
pinctrl-0 = <&penirq_pins>;
interrupt-parent = <&gpio3>;
interrupts = <30 IRQ_TYPE_NONE>; /* GPIO_94 */
pendown-gpio = <&gpio3 30 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio3 30 GPIO_ACTIVE_LOW>;
vcc-supply = <&vaux4>;
ti,x-min = /bits/ 16 <0>;

View File

@ -354,7 +354,7 @@
interrupt-parent = <&gpio1>;
interrupts = <15 0>; /* gpio1_wk15 */
pendown-gpio = <&gpio1 15 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio1 15 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;

View File

@ -102,8 +102,14 @@
#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4)
#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6)
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
#define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4)
#define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6)
#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4)
#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6)
#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2)
#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0)

View File

@ -97,7 +97,10 @@ static void init_x2apic_ldr(void)
static int x2apic_phys_probe(void)
{
if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
if (!x2apic_mode)
return 0;
if (x2apic_phys || x2apic_fadt_phys())
return 1;
return apic == &apic_x2apic_phys;

View File

@ -182,11 +182,11 @@ static void __meminit init_trampoline_pud(void)
set_p4d(p4d_tramp,
__p4d(_KERNPG_TABLE | __pa(pud_page_tramp)));
set_pgd(&trampoline_pgd_entry,
__pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
trampoline_pgd_entry =
__pgd(_KERNPG_TABLE | __pa(p4d_page_tramp));
} else {
set_pgd(&trampoline_pgd_entry,
__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
trampoline_pgd_entry =
__pgd(_KERNPG_TABLE | __pa(pud_page_tramp));
}
}

View File

@ -14,6 +14,11 @@ $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
CFLAGS_sha256.o := -D__DISABLE_EXPORTS
# When profile-guided optimization is enabled, llvm emits two different
# overlapping text sections, which is not supported by kexec. Remove profile
# optimization flags.
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
targets += purgatory.ro

View File

@ -1332,7 +1332,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
/* Let the runqueue know that there is work to do. */
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
if (runqueue_node->async)
if (req->async)
goto out;
wait_for_completion(&runqueue_node->complete);

View File

@ -483,8 +483,6 @@ static int vidi_remove(struct platform_device *pdev)
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
kfree(ctx->raw_edid);
ctx->raw_edid = NULL;
return -EINVAL;
}
component_del(&pdev->dev, &vidi_component_ops);

View File

@ -384,7 +384,6 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r;
/* for now if someone requests domain CPU -
@ -397,13 +396,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
up_read(&rdev->exclusive_lock);
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
drm_gem_object_put_unlocked(gobj);
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(robj->rdev, r);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}

View File

@ -2419,8 +2419,13 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
goto fail_quirks;
}
if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR) {
error = hid_hw_open(hdev);
if (error) {
hid_err(hdev, "hw open failed\n");
goto fail_quirks;
}
}
wacom_set_shared_values(wacom_wac);
devres_close_group(&hdev->dev, wacom);

View File

@ -802,11 +802,22 @@ static void vmbus_wait_for_unload(void)
if (completion_done(&vmbus_connection.unload_event))
goto completed;
for_each_online_cpu(cpu) {
for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
/*
* In a CoCo VM the synic_message_page is not allocated
* in hv_synic_alloc(). Instead it is set/cleared in
* hv_synic_enable_regs() and hv_synic_disable_regs()
* such that it is set only when the CPU is online. If
* not all present CPUs are online, the message page
* might be NULL, so skip such CPUs.
*/
page_addr = hv_cpu->synic_message_page;
if (!page_addr)
continue;
msg = (struct hv_message *)page_addr
+ VMBUS_MESSAGE_SINT;
@ -840,11 +851,14 @@ completed:
* maybe-pending messages on all CPUs to be able to receive new
* messages after we reconnect.
*/
for_each_online_cpu(cpu) {
for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
page_addr = hv_cpu->synic_message_page;
if (!page_addr)
continue;
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
msg->header.message_type = HVMSG_NONE;
}

View File

@ -206,8 +206,8 @@ static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx)
/* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */
static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
{
u8 prescale, filt, sethold, clkhi, clklo, datavd;
unsigned int clk_rate, clk_cycle;
u8 prescale, filt, sethold, datavd;
unsigned int clk_rate, clk_cycle, clkhi, clklo;
enum lpi2c_imx_pincfg pincfg;
unsigned int temp;

View File

@ -1077,7 +1077,8 @@ void cec_received_msg_ts(struct cec_adapter *adap,
mutex_lock(&adap->lock);
dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
adap->last_initiator = 0xff;
if (!adap->transmit_in_progress)
adap->last_initiator = 0xff;
/* Check if this message was for us (directed or broadcast). */
if (!cec_msg_is_broadcast(msg))

View File

@ -37,6 +37,7 @@
#include <media/tuner.h>
static DEFINE_MUTEX(dvbdev_mutex);
static LIST_HEAD(dvbdevfops_list);
static int dvbdev_debug;
module_param(dvbdev_debug, int, 0644);
@ -462,14 +463,15 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
enum dvb_device_type type, int demux_sink_pads)
{
struct dvb_device *dvbdev;
struct file_operations *dvbdevfops;
struct file_operations *dvbdevfops = NULL;
struct dvbdevfops_node *node = NULL, *new_node = NULL;
struct device *clsdev;
int minor;
int id, ret;
mutex_lock(&dvbdev_register_lock);
if ((id = dvbdev_get_free_id (adap, type)) < 0){
if ((id = dvbdev_get_free_id (adap, type)) < 0) {
mutex_unlock(&dvbdev_register_lock);
*pdvbdev = NULL;
pr_err("%s: couldn't find free device id\n", __func__);
@ -477,18 +479,45 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
}
*pdvbdev = dvbdev = kzalloc(sizeof(*dvbdev), GFP_KERNEL);
if (!dvbdev){
mutex_unlock(&dvbdev_register_lock);
return -ENOMEM;
}
dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
/*
* When a device of the same type is probe()d more than once,
* the first allocated fops are used. This prevents memory leaks
* that can occur when the same device is probe()d repeatedly.
*/
list_for_each_entry(node, &dvbdevfops_list, list_head) {
if (node->fops->owner == adap->module &&
node->type == type &&
node->template == template) {
dvbdevfops = node->fops;
break;
}
}
if (!dvbdevfops){
kfree (dvbdev);
mutex_unlock(&dvbdev_register_lock);
return -ENOMEM;
if (dvbdevfops == NULL) {
dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
if (!dvbdevfops) {
kfree(dvbdev);
mutex_unlock(&dvbdev_register_lock);
return -ENOMEM;
}
new_node = kzalloc(sizeof(struct dvbdevfops_node), GFP_KERNEL);
if (!new_node) {
kfree(dvbdevfops);
kfree(dvbdev);
mutex_unlock(&dvbdev_register_lock);
return -ENOMEM;
}
new_node->fops = dvbdevfops;
new_node->type = type;
new_node->template = template;
list_add_tail (&new_node->list_head, &dvbdevfops_list);
}
memcpy(dvbdev, template, sizeof(struct dvb_device));
@ -499,19 +528,20 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
dvbdev->priv = priv;
dvbdev->fops = dvbdevfops;
init_waitqueue_head (&dvbdev->wait_queue);
dvbdevfops->owner = adap->module;
list_add_tail (&dvbdev->list_head, &adap->device_list);
down_write(&minor_rwsem);
#ifdef CONFIG_DVB_DYNAMIC_MINORS
for (minor = 0; minor < MAX_DVB_MINORS; minor++)
if (dvb_minors[minor] == NULL)
break;
if (minor == MAX_DVB_MINORS) {
kfree(dvbdevfops);
if (new_node) {
list_del (&new_node->list_head);
kfree(dvbdevfops);
kfree(new_node);
}
list_del (&dvbdev->list_head);
kfree(dvbdev);
up_write(&minor_rwsem);
mutex_unlock(&dvbdev_register_lock);
@ -520,36 +550,47 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
#else
minor = nums2minor(adap->num, type, id);
#endif
dvbdev->minor = minor;
dvb_minors[minor] = dvb_device_get(dvbdev);
up_write(&minor_rwsem);
ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads);
if (ret) {
pr_err("%s: dvb_register_media_device failed to create the mediagraph\n",
__func__);
if (new_node) {
list_del (&new_node->list_head);
kfree(dvbdevfops);
kfree(new_node);
}
dvb_media_device_free(dvbdev);
kfree(dvbdevfops);
list_del (&dvbdev->list_head);
kfree(dvbdev);
mutex_unlock(&dvbdev_register_lock);
return ret;
}
mutex_unlock(&dvbdev_register_lock);
clsdev = device_create(dvb_class, adap->device,
MKDEV(DVB_MAJOR, minor),
dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id);
if (IS_ERR(clsdev)) {
pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n",
__func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
if (new_node) {
list_del (&new_node->list_head);
kfree(dvbdevfops);
kfree(new_node);
}
dvb_media_device_free(dvbdev);
list_del (&dvbdev->list_head);
kfree(dvbdev);
mutex_unlock(&dvbdev_register_lock);
return PTR_ERR(clsdev);
}
dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
adap->num, dnames[type], id, minor, minor);
mutex_unlock(&dvbdev_register_lock);
return 0;
}
EXPORT_SYMBOL(dvb_register_device);
@ -578,7 +619,6 @@ static void dvb_free_device(struct kref *ref)
{
struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref);
kfree (dvbdev->fops);
kfree (dvbdev);
}
@ -1084,9 +1124,17 @@ error:
static void __exit exit_dvbdev(void)
{
struct dvbdevfops_node *node, *next;
class_destroy(dvb_class);
cdev_del(&dvb_device_cdev);
unregister_chrdev_region(MKDEV(DVB_MAJOR, 0), MAX_DVB_MINORS);
list_for_each_entry_safe(node, next, &dvbdevfops_list, list_head) {
list_del (&node->list_head);
kfree(node->fops);
kfree(node);
}
}
subsys_initcall(init_dvbdev);

View File

@ -973,11 +973,8 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) {
if (data && !cmd->error)
data->bytes_xfered = data->blksz * data->blocks;
if (meson_mmc_bounce_buf_read(data) ||
meson_mmc_get_next_command(cmd))
ret = IRQ_WAKE_THREAD;
else
ret = IRQ_HANDLED;
return IRQ_WAKE_THREAD;
}
out:
@ -989,9 +986,6 @@ out:
writel(start, host->regs + SD_EMMC_START);
}
if (ret == IRQ_HANDLED)
meson_mmc_request_done(host->mmc, cmd->mrq);
return ret;
}

View File

@ -2249,7 +2249,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = -EINVAL;
ret = host->irq;
goto host_free;
}

View File

@ -696,17 +696,15 @@ static int mvsd_probe(struct platform_device *pdev)
struct mmc_host *mmc = NULL;
struct mvsd_host *host = NULL;
const struct mbus_dram_target_info *dram;
struct resource *r;
int ret, irq;
if (!np) {
dev_err(&pdev->dev, "no DT node\n");
return -ENODEV;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!r || irq < 0)
return -ENXIO;
if (irq < 0)
return irq;
mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
if (!mmc) {
@ -758,7 +756,7 @@ static int mvsd_probe(struct platform_device *pdev)
spin_lock_init(&host->lock);
host->base = devm_ioremap_resource(&pdev->dev, r);
host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto out;

View File

@ -1344,7 +1344,7 @@ static int mmc_omap_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENXIO;
return irq;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->virt_base = devm_ioremap_resource(&pdev->dev, res);

View File

@ -1843,9 +1843,11 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (res == NULL || irq < 0)
if (!res)
return -ENXIO;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))

View File

@ -835,7 +835,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
host->ops = &sdhci_acpi_ops_dflt;
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
err = -EINVAL;
err = host->irq;
goto err_free;
}

View File

@ -1395,7 +1395,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
irq[0] = platform_get_irq(pdev, 0);
irq[1] = platform_get_irq_optional(pdev, 1);
if (irq[0] < 0)
return -ENXIO;
return irq[0];
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(dev, res);

View File

@ -1743,8 +1743,10 @@ static int usdhi6_probe(struct platform_device *pdev)
irq_cd = platform_get_irq_byname(pdev, "card detect");
irq_sd = platform_get_irq_byname(pdev, "data");
irq_sdio = platform_get_irq_byname(pdev, "SDIO");
if (irq_sd < 0 || irq_sdio < 0)
return -ENODEV;
if (irq_sd < 0)
return irq_sd;
if (irq_sdio < 0)
return irq_sdio;
mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
if (!mmc)

View File

@ -644,7 +644,7 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv,
mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port)));
/* Set CPU port number */
if (priv->id == ID_MT7621)
if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
/* CPU port gets connected to all user ports of

View File

@ -1137,8 +1137,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
VLAN_ETH_HLEN : ETH_HLEN;
if (skb->len <= 60 &&
(lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
is_ipv4_pkt(skb)) {
(lancer_chip(adapter) || BE3_chip(adapter) ||
skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
ip = (struct iphdr *)ip_hdr(skb);
pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
}

View File

@ -574,8 +574,7 @@ qcaspi_spi_thread(void *data)
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
if ((qca->intr_req == qca->intr_svc) &&
(qca->txr.skb[qca->txr.head] == NULL) &&
(qca->sync == QCASPI_SYNC_READY))
!qca->txr.skb[qca->txr.head])
schedule();
set_current_state(TASK_RUNNING);

View File

@ -522,7 +522,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
{
struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1];
struct hwsim_edge_info *einfo;
struct hwsim_edge_info *einfo, *einfo_old;
struct hwsim_phy *phy_v0;
struct hwsim_edge *e;
u32 v0, v1;
@ -560,8 +560,10 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
list_for_each_entry_rcu(e, &phy_v0->edges, list) {
if (e->endpoint->idx == v1) {
einfo->lqi = lqi;
rcu_assign_pointer(e->info, einfo);
einfo_old = rcu_replace_pointer(e->info, einfo,
lockdep_is_held(&hwsim_phys_lock));
rcu_read_unlock();
kfree_rcu(einfo_old, rcu);
mutex_unlock(&hwsim_phys_lock);
return 0;
}

View File

@ -476,7 +476,7 @@ static int dp83867_phy_reset(struct phy_device *phydev)
{
int err;
err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
if (err < 0)
return err;

View File

@ -336,10 +336,6 @@ static struct dentry *nfcsim_debugfs_root;
static void nfcsim_debugfs_init(void)
{
nfcsim_debugfs_root = debugfs_create_dir("nfcsim", NULL);
if (!nfcsim_debugfs_root)
pr_err("Could not create debugfs entry\n");
}
static void nfcsim_debugfs_remove(void)

View File

@ -2739,6 +2739,24 @@ static int hv_pci_query_relations(struct hv_device *hdev)
if (!ret)
ret = wait_for_response(hdev, &comp);
/*
* In the case of fast device addition/removal, it's possible that
* vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
* already got a PCI_BUS_RELATIONS* message from the host and the
* channel callback already scheduled a work to hbus->wq, which can be
* running pci_devices_present_work() -> survey_child_resources() ->
* complete(&hbus->survey_event), even after hv_pci_query_relations()
* exits and the stack variable 'comp' is no longer valid; as a result,
* a hang or a page fault may happen when the complete() calls
* raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
* hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
* -ENODEV, there can't be any more work item scheduled to hbus->wq
* after the flush_workqueue(): see vmbus_onoffer_rescind() ->
* vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
* channel->rescind = true.
*/
flush_workqueue(hbus->wq);
return ret;
}

View File

@ -1385,6 +1385,7 @@ void ccw_device_set_notoper(struct ccw_device *cdev)
enum io_sch_action {
IO_SCH_UNREG,
IO_SCH_ORPH_UNREG,
IO_SCH_UNREG_CDEV,
IO_SCH_ATTACH,
IO_SCH_UNREG_ATTACH,
IO_SCH_ORPH_ATTACH,
@ -1417,7 +1418,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
}
if ((sch->schib.pmcw.pam & sch->opm) == 0) {
if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
return IO_SCH_UNREG;
return IO_SCH_UNREG_CDEV;
return IO_SCH_DISC;
}
if (device_is_disconnected(cdev))
@ -1479,6 +1480,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
case IO_SCH_ORPH_ATTACH:
ccw_device_set_disconnected(cdev);
break;
case IO_SCH_UNREG_CDEV:
case IO_SCH_UNREG_ATTACH:
case IO_SCH_UNREG:
if (!cdev)
@ -1512,6 +1514,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
if (rc)
goto out;
break;
case IO_SCH_UNREG_CDEV:
case IO_SCH_UNREG_ATTACH:
spin_lock_irqsave(sch->lock, flags);
if (cdev->private->flags.resuming) {

View File

@ -1053,6 +1053,7 @@ int iscsi_target_locate_portal(
iscsi_target_set_sock_callbacks(conn);
login->np = np;
conn->tpg = NULL;
login_req = (struct iscsi_login_req *) login->req;
payload_length = ntoh24(login_req->dlength);
@ -1122,7 +1123,6 @@ int iscsi_target_locate_portal(
*/
sessiontype = strncmp(s_buf, DISCOVERY, 9);
if (!sessiontype) {
conn->tpg = iscsit_global->discovery_tpg;
if (!login->leading_connection)
goto get_target;
@ -1139,9 +1139,11 @@ int iscsi_target_locate_portal(
* Serialize access across the discovery struct iscsi_portal_group to
* process login attempt.
*/
conn->tpg = iscsit_global->discovery_tpg;
if (iscsit_access_np(np, conn->tpg) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
conn->tpg = NULL;
ret = -1;
goto out;
}

View File

@ -171,6 +171,9 @@ static int udc_pci_probe(
retval = -ENODEV;
goto err_probe;
}
udc = dev;
return 0;
err_probe:

File diff suppressed because it is too large Load Diff

View File

@ -1815,7 +1815,11 @@ static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry,
{
int ret = default_wake_function(wq_entry, mode, sync, key);
list_del_init(&wq_entry->entry);
/*
* Pairs with list_empty_careful in ep_poll, and ensures future loop
* iterations see the cause of this wakeup.
*/
list_del_init_careful(&wq_entry->entry);
return ret;
}

View File

@ -370,7 +370,15 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
struct page *page = pvec.pages[i];
lock_page(page);
nilfs_clear_dirty_page(page, silent);
/*
* This page may have been removed from the address
* space by truncation or invalidation when the lock
* was acquired. Skip processing in that case.
*/
if (likely(page->mapping == mapping))
nilfs_clear_dirty_page(page, silent);
unlock_page(page);
}
pagevec_release(&pvec);

View File

@ -101,6 +101,12 @@ int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf)
if (unlikely(!bh))
return -ENOMEM;
lock_buffer(bh);
if (!buffer_uptodate(bh)) {
memset(bh->b_data, 0, bh->b_size);
set_buffer_uptodate(bh);
}
unlock_buffer(bh);
nilfs_segbuf_add_segsum_buffer(segbuf, bh);
return 0;
}

View File

@ -984,10 +984,13 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
unsigned int isz, srsz;
bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
lock_buffer(bh_sr);
raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
isz = nilfs->ns_inode_size;
srsz = NILFS_SR_BYTES(isz);
raw_sr->sr_sum = 0; /* Ensure initialization within this update */
raw_sr->sr_bytes = cpu_to_le16(srsz);
raw_sr->sr_nongc_ctime
= cpu_to_le64(nilfs_doing_gc() ?
@ -1001,6 +1004,8 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
NILFS_SR_SUFILE_OFFSET(isz), 1);
memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
set_buffer_uptodate(bh_sr);
unlock_buffer(bh_sr);
}
static void nilfs_redirty_inodes(struct list_head *head)
@ -1778,6 +1783,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
list_for_each_entry(segbuf, logs, sb_list) {
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
b_assoc_buffers) {
clear_buffer_uptodate(bh);
if (bh->b_page != bd_page) {
if (bd_page)
end_page_writeback(bd_page);
@ -1789,6 +1795,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
b_assoc_buffers) {
clear_buffer_async_write(bh);
if (bh == segbuf->sb_super_root) {
clear_buffer_uptodate(bh);
if (bh->b_page != bd_page) {
end_page_writeback(bd_page);
bd_page = bh->b_page;

View File

@ -367,10 +367,31 @@ static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
goto out;
}
nsbp = (void *)nsbh->b_data + offset;
memset(nsbp, 0, nilfs->ns_blocksize);
lock_buffer(nsbh);
if (sb2i >= 0) {
/*
* The position of the second superblock only changes by 4KiB,
* which is larger than the maximum superblock data size
* (= 1KiB), so there is no need to use memmove() to allow
* overlap between source and destination.
*/
memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize);
/*
* Zero fill after copy to avoid overwriting in case of move
* within the same block.
*/
memset(nsbh->b_data, 0, offset);
memset((void *)nsbp + nilfs->ns_sbsize, 0,
nsbh->b_size - offset - nilfs->ns_sbsize);
} else {
memset(nsbh->b_data, 0, nsbh->b_size);
}
set_buffer_uptodate(nsbh);
unlock_buffer(nsbh);
if (sb2i >= 0) {
memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize);
brelse(nilfs->ns_sbh[sb2i]);
nilfs->ns_sbh[sb2i] = nsbh;
nilfs->ns_sbp[sb2i] = nsbp;

View File

@ -375,6 +375,18 @@ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
100));
}
/**
* nilfs_max_segment_count - calculate the maximum number of segments
* @nilfs: nilfs object
*/
static u64 nilfs_max_segment_count(struct the_nilfs *nilfs)
{
u64 max_count = U64_MAX;
do_div(max_count, nilfs->ns_blocks_per_segment);
return min_t(u64, max_count, ULONG_MAX);
}
void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
{
nilfs->ns_nsegments = nsegs;
@ -384,6 +396,8 @@ void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
struct nilfs_super_block *sbp)
{
u64 nsegments, nblocks;
if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) {
nilfs_msg(nilfs->ns_sb, KERN_ERR,
"unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).",
@ -430,7 +444,35 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
return -EINVAL;
}
nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
nsegments = le64_to_cpu(sbp->s_nsegments);
if (nsegments > nilfs_max_segment_count(nilfs)) {
nilfs_msg(nilfs->ns_sb, KERN_ERR,
"segment count %llu exceeds upper limit (%llu segments)",
(unsigned long long)nsegments,
(unsigned long long)nilfs_max_segment_count(nilfs));
return -EINVAL;
}
nblocks = (u64)i_size_read(nilfs->ns_sb->s_bdev->bd_inode) >>
nilfs->ns_sb->s_blocksize_bits;
if (nblocks) {
u64 min_block_count = nsegments * nilfs->ns_blocks_per_segment;
/*
* To avoid failing to mount early device images without a
* second superblock, exclude that block count from the
* "min_block_count" calculation.
*/
if (nblocks < min_block_count) {
nilfs_msg(nilfs->ns_sb, KERN_ERR,
"total number of segment blocks %llu exceeds device size (%llu blocks)",
(unsigned long long)min_block_count,
(unsigned long long)nblocks);
return -EINVAL;
}
}
nilfs_set_nsegments(nilfs, nsegments);
nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
return 0;
}

View File

@ -2783,6 +2783,16 @@ xlog_recover_buffer_pass2(
if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
trace_xfs_log_recover_buf_skip(log, buf_f);
xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
/*
* We're skipping replay of this buffer log item due to the log
* item LSN being behind the ondisk buffer. Verify the buffer
* contents since we aren't going to run the write verifier.
*/
if (bp->b_ops) {
bp->b_ops->verify_read(bp);
error = bp->b_error;
}
goto out_release;
}

View File

@ -268,6 +268,24 @@ static inline int list_empty(const struct list_head *head)
return READ_ONCE(head->next) == head;
}
/**
* list_del_init_careful - deletes entry from list and reinitialize it.
* @entry: the element to delete from the list.
*
* This is the same as list_del_init(), except designed to be used
* together with list_empty_careful() in a way to guarantee ordering
* of other memory operations.
*
* Any memory operations done before a list_del_init_careful() are
* guaranteed to be visible after a list_empty_careful() test.
*/
static inline void list_del_init_careful(struct list_head *entry)
{
__list_del_entry(entry);
entry->prev = entry;
smp_store_release(&entry->next, entry);
}
/**
* list_empty_careful - tests whether a list is empty and not being modified
* @head: the list to test
@ -283,7 +301,7 @@ static inline int list_empty(const struct list_head *head)
*/
static inline int list_empty_careful(const struct list_head *head)
{
struct list_head *next = head->next;
struct list_head *next = smp_load_acquire(&head->next);
return (next == head) && (next == head->prev);
}

View File

@ -384,6 +384,24 @@ do { \
smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
} while (0)
/**
* rcu_replace_pointer() - replace an RCU pointer, returning its old value
* @rcu_ptr: RCU pointer, whose old value is returned
* @ptr: regular pointer
* @c: the lockdep conditions under which the dereference will take place
*
* Perform a replacement, where @rcu_ptr is an RCU-annotated
* pointer and @c is the lockdep argument that is passed to the
* rcu_dereference_protected() call used to read that pointer. The old
* value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr.
*/
#define rcu_replace_pointer(rcu_ptr, ptr, c) \
({ \
typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \
rcu_assign_pointer((rcu_ptr), (ptr)); \
__tmp; \
})
/**
* rcu_swap_protected() - swap an RCU and a regular pointer
* @rcu_ptr: RCU pointer

View File

@ -189,6 +189,21 @@ struct dvb_device {
void *priv;
};
/**
* struct dvbdevfops_node - fops nodes registered in dvbdevfops_list
*
* @fops: Dynamically allocated fops for ->owner registration
* @type: type of dvb_device
* @template: dvb_device used for registration
* @list_head: list_head for dvbdevfops_list
*/
struct dvbdevfops_node {
struct file_operations *fops;
enum dvb_device_type type;
const struct dvb_device *template;
struct list_head list_head;
};
/**
* dvb_device_get - Increase dvb_device reference
*

View File

@ -374,9 +374,11 @@ static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
const struct sk_buff *skb)
{
if (skb->protocol == htons(ETH_P_IP))
__be16 payload_protocol = skb_protocol(skb, true);
if (payload_protocol == htons(ETH_P_IP))
return iph->tos;
else if (skb->protocol == htons(ETH_P_IPV6))
else if (payload_protocol == htons(ETH_P_IPV6))
return ipv6_get_dsfield((const struct ipv6hdr *)iph);
else
return 0;
@ -385,9 +387,11 @@ static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
const struct sk_buff *skb)
{
if (skb->protocol == htons(ETH_P_IP))
__be16 payload_protocol = skb_protocol(skb, true);
if (payload_protocol == htons(ETH_P_IP))
return iph->ttl;
else if (skb->protocol == htons(ETH_P_IPV6))
else if (payload_protocol == htons(ETH_P_IPV6))
return ((const struct ipv6hdr *)iph)->hop_limit;
else
return 0;

View File

@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(writeback_page_template,
strscpy_pad(__entry->name,
bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
NULL), 32);
__entry->ino = mapping ? mapping->host->i_ino : 0;
__entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0;
__entry->index = page->index;
),

View File

@ -1728,7 +1728,7 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
{
struct cgroup *dcgrp = &dst_root->cgrp;
struct cgroup_subsys *ss;
int ssid, i, ret;
int ssid, ret;
u16 dfl_disable_ss_mask = 0;
lockdep_assert_held(&cgroup_mutex);
@ -1772,7 +1772,8 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
struct cgroup_root *src_root = ss->root;
struct cgroup *scgrp = &src_root->cgrp;
struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
struct css_set *cset;
struct css_set *cset, *cset_pos;
struct css_task_iter *it;
WARN_ON(!css || cgroup_css(dcgrp, ss));
@ -1790,9 +1791,22 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
css->cgroup = dcgrp;
spin_lock_irq(&css_set_lock);
hash_for_each(css_set_table, i, cset, hlist)
WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
e_cset_node[ss->id]) {
list_move_tail(&cset->e_cset_node[ss->id],
&dcgrp->e_csets[ss->id]);
/*
* all css_sets of scgrp together in same order to dcgrp,
* patch in-flight iterators to preserve correct iteration.
* since the iterator is always advanced right away and
* finished when it->cset_pos meets it->cset_head, so only
* update it->cset_head is enough here.
*/
list_for_each_entry(it, &cset->task_iters, iters_node)
if (it->cset_head == &scgrp->e_csets[ss->id])
it->cset_head = &dcgrp->e_csets[ss->id];
}
spin_unlock_irq(&css_set_lock);
/* default hierarchy doesn't enable controllers by default */

View File

@ -384,7 +384,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
int ret = default_wake_function(wq_entry, mode, sync, key);
if (ret)
list_del_init(&wq_entry->entry);
list_del_init_careful(&wq_entry->entry);
return ret;
}

View File

@ -216,19 +216,8 @@ static void tick_setup_device(struct tick_device *td,
* this cpu:
*/
if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
ktime_t next_p;
u32 rem;
tick_do_timer_cpu = cpu;
next_p = ktime_get();
div_u64_rem(next_p, TICK_NSEC, &rem);
if (rem) {
next_p -= rem;
next_p += TICK_NSEC;
}
tick_next_period = next_p;
tick_next_period = ktime_get();
#ifdef CONFIG_NO_HZ_FULL
/*
* The boot CPU may be nohz_full, in which case set

View File

@ -129,8 +129,19 @@ static ktime_t tick_init_jiffy_update(void)
raw_spin_lock(&jiffies_lock);
write_seqcount_begin(&jiffies_seq);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update == 0)
if (last_jiffies_update == 0) {
u32 rem;
/*
* Ensure that the tick is aligned to a multiple of
* TICK_NSEC.
*/
div_u64_rem(tick_next_period, TICK_NSEC, &rem);
if (rem)
tick_next_period += TICK_NSEC - rem;
last_jiffies_update = tick_next_period;
}
period = last_jiffies_update;
write_seqcount_end(&jiffies_seq);
raw_spin_unlock(&jiffies_lock);

View File

@ -1931,10 +1931,12 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
}
/* Must have trace_types_lock held */
void tracing_reset_all_online_cpus(void)
void tracing_reset_all_online_cpus_unlocked(void)
{
struct trace_array *tr;
lockdep_assert_held(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (!tr->clear_trace)
continue;
@ -1946,6 +1948,13 @@ void tracing_reset_all_online_cpus(void)
}
}
void tracing_reset_all_online_cpus(void)
{
mutex_lock(&trace_types_lock);
tracing_reset_all_online_cpus_unlocked();
mutex_unlock(&trace_types_lock);
}
/*
* The tgid_map array maps from pid to tgid; i.e. the value stored at index i
* is the tgid last observed corresponding to pid=i.

View File

@ -677,6 +677,7 @@ int tracing_is_enabled(void);
void tracing_reset_online_cpus(struct trace_buffer *buf);
void tracing_reset_current(int cpu);
void tracing_reset_all_online_cpus(void);
void tracing_reset_all_online_cpus_unlocked(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
int tracing_open_generic_tr(struct inode *inode, struct file *filp);
bool tracing_is_disabled(void);

View File

@ -2440,7 +2440,7 @@ static void trace_module_remove_events(struct module *mod)
* over from this module may be passed to the new module events and
* unexpected results may occur.
*/
tracing_reset_all_online_cpus();
tracing_reset_all_online_cpus_unlocked();
}
static int trace_module_notify(struct notifier_block *self,

View File

@ -1046,6 +1046,7 @@ struct wait_page_queue {
static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
{
int ret;
struct wait_page_key *key = arg;
struct wait_page_queue *wait_page
= container_of(wait, struct wait_page_queue, wait);
@ -1058,17 +1059,35 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
return 0;
/*
* Stop walking if it's locked.
* Is this safe if put_and_wait_on_page_locked() is in use?
* Yes: the waker must hold a reference to this page, and if PG_locked
* has now already been set by another task, that task must also hold
* a reference to the *same usage* of this page; so there is no need
* to walk on to wake even the put_and_wait_on_page_locked() callers.
* If it's an exclusive wait, we get the bit for it, and
* stop walking if we can't.
*
* If it's a non-exclusive wait, then the fact that this
* wake function was called means that the bit already
* was cleared, and we don't care if somebody then
* re-took it.
*/
if (test_bit(key->bit_nr, &key->page->flags))
return -1;
ret = 0;
if (wait->flags & WQ_FLAG_EXCLUSIVE) {
if (test_and_set_bit(key->bit_nr, &key->page->flags))
return -1;
ret = 1;
}
wait->flags |= WQ_FLAG_WOKEN;
return autoremove_wake_function(wait, mode, sync, key);
wake_up_state(wait->private, mode);
/*
* Ok, we have successfully done what we're waiting for,
* and we can unconditionally remove the wait entry.
*
* Note that this has to be the absolute last thing we do,
* since after list_del_init(&wait->entry) the wait entry
* might be de-allocated and the process might even have
* exited.
*/
list_del_init_careful(&wait->entry);
return ret;
}
static void wake_up_page_bit(struct page *page, int bit_nr)
@ -1147,16 +1166,31 @@ enum behavior {
*/
};
/*
* Attempt to check (or get) the page bit, and mark the
* waiter woken if successful.
*/
static inline bool trylock_page_bit_common(struct page *page, int bit_nr,
struct wait_queue_entry *wait)
{
if (wait->flags & WQ_FLAG_EXCLUSIVE) {
if (test_and_set_bit(bit_nr, &page->flags))
return false;
} else if (test_bit(bit_nr, &page->flags))
return false;
wait->flags |= WQ_FLAG_WOKEN;
return true;
}
static inline int wait_on_page_bit_common(wait_queue_head_t *q,
struct page *page, int bit_nr, int state, enum behavior behavior)
{
struct wait_page_queue wait_page;
wait_queue_entry_t *wait = &wait_page.wait;
bool bit_is_set;
bool thrashing = false;
bool delayacct = false;
unsigned long pflags;
int ret = 0;
if (bit_nr == PG_locked &&
!PageUptodate(page) && PageWorkingset(page)) {
@ -1174,48 +1208,47 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
wait_page.page = page;
wait_page.bit_nr = bit_nr;
/*
* Do one last check whether we can get the
* page bit synchronously.
*
* Do the SetPageWaiters() marking before that
* to let any waker we _just_ missed know they
* need to wake us up (otherwise they'll never
* even go to the slow case that looks at the
* page queue), and add ourselves to the wait
* queue if we need to sleep.
*
* This part needs to be done under the queue
* lock to avoid races.
*/
spin_lock_irq(&q->lock);
SetPageWaiters(page);
if (!trylock_page_bit_common(page, bit_nr, wait))
__add_wait_queue_entry_tail(q, wait);
spin_unlock_irq(&q->lock);
/*
* From now on, all the logic will be based on
* the WQ_FLAG_WOKEN flag, and the and the page
* bit testing (and setting) will be - or has
* already been - done by the wake function.
*
* We can drop our reference to the page.
*/
if (behavior == DROP)
put_page(page);
for (;;) {
spin_lock_irq(&q->lock);
if (likely(list_empty(&wait->entry))) {
__add_wait_queue_entry_tail(q, wait);
SetPageWaiters(page);
}
set_current_state(state);
spin_unlock_irq(&q->lock);
bit_is_set = test_bit(bit_nr, &page->flags);
if (behavior == DROP)
put_page(page);
if (likely(bit_is_set))
io_schedule();
if (behavior == EXCLUSIVE) {
if (!test_and_set_bit_lock(bit_nr, &page->flags))
break;
} else if (behavior == SHARED) {
if (!test_bit(bit_nr, &page->flags))
break;
}
if (signal_pending_state(state, current)) {
ret = -EINTR;
if (signal_pending_state(state, current))
break;
}
if (behavior == DROP) {
/*
* We can no longer safely access page->flags:
* even if CONFIG_MEMORY_HOTREMOVE is not enabled,
* there is a risk of waiting forever on a page reused
* for something that keeps it locked indefinitely.
* But best check for -EINTR above before breaking.
*/
if (wait->flags & WQ_FLAG_WOKEN)
break;
}
io_schedule();
}
finish_wait(q, wait);
@ -1234,7 +1267,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
* bother with signals either.
*/
return ret;
return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
}
void wait_on_page_bit(struct page *page, int bit_nr)
@ -1355,11 +1388,19 @@ void end_page_writeback(struct page *page)
rotate_reclaimable_page(page);
}
/*
* Writeback does not hold a page reference of its own, relying
* on truncation to wait for the clearing of PG_writeback.
* But here we must make sure that the page is not freed and
* reused before the wake_up_page().
*/
get_page(page);
if (!test_clear_page_writeback(page))
BUG();
smp_mb__after_atomic();
wake_up_page(page, PG_writeback);
put_page(page);
}
EXPORT_SYMBOL(end_page_writeback);

View File

@ -2746,12 +2746,6 @@ int test_clear_page_writeback(struct page *page)
} else {
ret = TestClearPageWriteback(page);
}
/*
* NOTE: Page might be free now! Writeback doesn't hold a page
* reference on its own, it relies on truncation to wait for
* the clearing of PG_writeback. The below can only access
* page state that is static across allocation cycles.
*/
if (ret) {
dec_lruvec_state(lruvec, NR_WRITEBACK);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
@ -2817,7 +2811,7 @@ EXPORT_SYMBOL(__test_set_page_writeback);
*/
void wait_on_page_writeback(struct page *page)
{
if (PageWriteback(page)) {
while (PageWriteback(page)) {
trace_wait_on_page_writeback(page, page_mapping(page));
wait_on_page_bit(page, PG_writeback);
}

View File

@ -283,6 +283,9 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
secpath_reset(skb);
if (skb_needs_linearize(skb, skb->dev->features) &&
__skb_linearize(skb))
return -ENOMEM;
return 0;
}

View File

@ -314,6 +314,9 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
secpath_reset(skb);
if (skb_needs_linearize(skb, skb->dev->features) &&
__skb_linearize(skb))
return -ENOMEM;
return 0;
}

View File

@ -1231,6 +1231,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
skb->transport_header = skb->network_header;
skb_set_inner_ipproto(skb, next_protocol);
skb_set_inner_mac_header(skb, skb_inner_network_offset(skb));
if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
bool check = false;
@ -1379,6 +1380,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
skb->transport_header = skb->network_header;
skb_set_inner_ipproto(skb, next_protocol);
skb_set_inner_mac_header(skb, skb_inner_network_offset(skb));
if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
bool check = false;

View File

@ -4804,7 +4804,8 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
if (IS_ERR(set))
return PTR_ERR(set);
if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
if (!list_empty(&set->bindings) &&
(set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS)))
return -EBUSY;
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
@ -4987,7 +4988,9 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
set = nft_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
if (IS_ERR(set))
return PTR_ERR(set);
if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
if (!list_empty(&set->bindings) &&
(set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS)))
return -EBUSY;
if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) {

View File

@ -440,3 +440,4 @@ module_init(nfnl_osf_init);
module_exit(nfnl_osf_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_OSF);

View File

@ -71,4 +71,3 @@ MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("Passive OS fingerprint matching.");
MODULE_ALIAS("ipt_osf");
MODULE_ALIAS("ip6t_osf");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_OSF);

View File

@ -969,6 +969,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
if (ret < 0)
return ret;
sch_tree_lock(sch);
/* backup q->clg and q->loss_model */
old_clg = q->clg;
old_loss_model = q->loss_model;
@ -977,7 +978,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
if (ret) {
q->loss_model = old_loss_model;
return ret;
goto unlock;
}
} else {
q->loss_model = CLG_RANDOM;
@ -1044,6 +1045,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
/* capping jitter to the range acceptable by tabledist() */
q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
unlock:
sch_tree_unlock(sch);
return ret;
get_table_failure:
@ -1053,7 +1056,8 @@ get_table_failure:
*/
q->clg = old_clg;
q->loss_model = old_loss_model;
return ret;
goto unlock;
}
static int netem_init(struct Qdisc *sch, struct nlattr *opt,

View File

@ -1896,6 +1896,30 @@ static const struct dmi_system_id nau8824_quirk_table[] = {
},
.driver_data = (void *)(NAU8824_JD_ACTIVE_HIGH),
},
{
/* Positivo CW14Q01P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
DMI_MATCH(DMI_BOARD_NAME, "CW14Q01P"),
},
.driver_data = (void *)(NAU8824_JD_ACTIVE_HIGH),
},
{
/* Positivo K1424G */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
DMI_MATCH(DMI_BOARD_NAME, "K1424G"),
},
.driver_data = (void *)(NAU8824_JD_ACTIVE_HIGH),
},
{
/* Positivo N14ZP74G */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
DMI_MATCH(DMI_BOARD_NAME, "N14ZP74G"),
},
.driver_data = (void *)(NAU8824_JD_ACTIVE_HIGH),
},
{}
};