Merge 5.4.258 into android11-5.4-lts

Changes in 5.4.258
	NFS/pNFS: Report EINVAL errors from connect() to the server
	SUNRPC: Mark the cred for revalidation if the server rejects it
	tracing: Increase trace array ref count on enable and filter files
	ata: libahci: clear pending interrupt status
	ext4: remove the 'group' parameter of ext4_trim_extent
	ext4: add new helper interface ext4_try_to_trim_range()
	ext4: scope ret locally in ext4_try_to_trim_range()
	ext4: change s_last_trim_minblks type to unsigned long
	ext4: mark group as trimmed only if it was fully scanned
	ext4: replace the traditional ternary conditional operator with with max()/min()
	ext4: move setting of trimmed bit into ext4_try_to_trim_range()
	ext4: do not let fstrim block system suspend
	ASoC: meson: spdifin: start hw on dai probe
	netfilter: nf_tables: disallow element removal on anonymous sets
	bpf: Avoid deadlock when using queue and stack maps from NMI
	selftests/tls: Add {} to avoid static checker warning
	selftests: tls: swap the TX and RX sockets in some tests
	ASoC: imx-audmix: Fix return error with devm_clk_get()
	i40e: Fix for persistent lldp support
	i40e: Remove scheduling while atomic possibility
	i40e: Fix warning message and call stack during rmmod i40e driver
	i40e: Fix VF VLAN offloading when port VLAN is configured
	ipv4: fix null-deref in ipv4_link_failure
	powerpc/perf/hv-24x7: Update domain value check
	dccp: fix dccp_v4_err()/dccp_v6_err() again
	net: hns3: add 5ms delay before clear firmware reset irq source
	net: bridge: use DEV_STATS_INC()
	team: fix null-ptr-deref when team device type is changed
	net: rds: Fix possible NULL-pointer dereference
	netfilter: ipset: Fix race between IPSET_CMD_CREATE and IPSET_CMD_SWAP
	gpio: tb10x: Fix an error handling path in tb10x_gpio_probe()
	i2c: mux: demux-pinctrl: check the return value of devm_kstrdup()
	Input: i8042 - add quirk for TUXEDO Gemini 17 Gen1/Clevo PD70PN
	scsi: qla2xxx: Fix update_fcport for current_topology
	scsi: qla2xxx: Fix deletion race condition
	drm/amd/display: Reinstate LFC optimization
	drm/amd/display: Fix LFC multiplier changing erratically
	drm/amd/display: prevent potential division by zero errors
	ata: libata: disallow dev-initiated LPM transitions to unsupported states
	MIPS: Alchemy: only build mmc support helpers if au1xmmc is enabled
	clk: tegra: fix error return case for recalc_rate
	ARM: dts: ti: omap: motorola-mapphone: Fix abe_clkctrl warning on boot
	bus: ti-sysc: Fix SYSC_QUIRK_SWSUP_SIDLE_ACT handling for uart wake-up
	xtensa: add default definition for XCHAL_HAVE_DIV32
	xtensa: iss/network: make functions static
	xtensa: boot: don't add include-dirs
	xtensa: boot/lib: fix function prototypes
	gpio: pmic-eic-sprd: Add can_sleep flag for PMIC EIC chip
	parisc: sba: Fix compile warning wrt list of SBA devices
	parisc: iosapic.c: Fix sparse warnings
	parisc: drivers: Fix sparse warning
	parisc: irq: Make irq_stack_union static to avoid sparse warning
	selftests/ftrace: Correctly enable event in instance-event.tc
	ring-buffer: Avoid softlockup in ring_buffer_resize()
	ata: libata-eh: do not clear ATA_PFLAG_EH_PENDING in ata_eh_reset()
	spi: nxp-fspi: reset the FLSHxCR1 registers
	bpf: Clarify error expectations from bpf_clone_redirect
	powerpc/watchpoints: Annotate atomic context in more places
	ncsi: Propagate carrier gain/loss events to the NCSI controller
	fbdev/sh7760fb: Depend on FB=y
	nvme-pci: do not set the NUMA node of device if it has none
	watchdog: iTCO_wdt: No need to stop the timer in probe
	watchdog: iTCO_wdt: Set NO_REBOOT if the watchdog is not already running
	i40e: improve locking of mac_filter_hash
	i40e: always propagate error value in i40e_set_vsi_promisc()
	i40e: fix return of uninitialized aq_ret in i40e_set_vsi_promisc
	smack: Record transmuting in smk_transmuted
	smack: Retrieve transmuting information in smack_inode_getsecurity()
	Smack:- Use overlay inode label in smack_inode_copy_up()
	serial: 8250_port: Check IRQ data before use
	nilfs2: fix potential use after free in nilfs_gccache_submit_read_data()
	ALSA: hda: Disable power save for solving pop issue on Lenovo ThinkCentre M70q
	ata: libata-scsi: ignore reserved bits for REPORT SUPPORTED OPERATION CODES
	i2c: i801: unregister tco_pdev in i801_probe() error path
	ring-buffer: Update "shortest_full" in polling
	btrfs: properly report 0 avail for very full file systems
	net: thunderbolt: Fix TCPv6 GSO checksum calculation
	ata: libata-core: Fix ata_port_request_pm() locking
	ata: libata-core: Fix port and device removal
	ata: libata-core: Do not register PM operations for SAS ports
	ata: libata-sata: increase PMP SRST timeout to 10s
	fs: binfmt_elf_efpic: fix personality for ELF-FDPIC
	rbd: move rbd_dev_refresh() definition
	rbd: decouple header read-in from updating rbd_dev->header
	rbd: decouple parent info read-in from updating rbd_dev
	rbd: take header_rwsem in rbd_dev_refresh() only when updating
	Revert "PCI: qcom: Disable write access to read only registers for IP v2.3.3"
	scsi: zfcp: Fix a double put in zfcp_port_enqueue()
	qed/red_ll2: Fix undefined behavior bug in struct qed_ll2_info
	wifi: mwifiex: Fix tlv_buf_left calculation
	net: replace calls to sock->ops->connect() with kernel_connect()
	net: prevent rewrite of msg_name in sock_sendmsg()
	ubi: Refuse attaching if mtd's erasesize is 0
	wifi: iwlwifi: dbg_ini: fix structure packing
	wifi: mwifiex: Fix oob check condition in mwifiex_process_rx_packet
	drivers/net: process the result of hdlc_open() and add call of hdlc_close() in uhdlc_close()
	wifi: mt76: mt76x02: fix MT76x0 external LNA gain handling
	regmap: rbtree: Fix wrong register marked as in-cache when creating new node
	ima: Finish deprecation of IMA_TRUSTED_KEYRING Kconfig
	scsi: target: core: Fix deadlock due to recursive locking
	NFS4: Trace state recovery operation
	NFS: Add a helper nfs_client_for_each_server()
	NFSv4: Fix a nfs4_state_manager() race
	modpost: add missing else to the "of" check
	net: fix possible store tearing in neigh_periodic_work()
	ipv4, ipv6: Fix handling of transhdrlen in __ip{,6}_append_data()
	net: dsa: mv88e6xxx: Avoid EEPROM timeout when EEPROM is absent
	net: usb: smsc75xx: Fix uninit-value access in __smsc75xx_read_reg
	net: nfc: llcp: Add lock when modifying device list
	netfilter: handle the connecting collision properly in nf_conntrack_proto_sctp
	net: stmmac: dwmac-stm32: fix resume on STM32 MCU
	tcp: fix quick-ack counting to count actual ACKs of new data
	tcp: fix delayed ACKs for MSS boundary condition
	sctp: update transport state when processing a dupcook packet
	sctp: update hb timer immediately after users change hb_interval
	cpupower: add Makefile dependencies for install targets
	RDMA/core: Require admin capabilities to set system parameters
	IB/mlx4: Fix the size of a buffer in add_port_entries()
	gpio: aspeed: fix the GPIO number passed to pinctrl_gpio_set_config()
	gpio: pxa: disable pinctrl calls for MMP_GPIO
	RDMA/cma: Fix truncation compilation warning in make_cma_ports
	RDMA/uverbs: Fix typo of sizeof argument
	RDMA/siw: Fix connection failure handling
	RDMA/mlx5: Fix NULL string error
	parisc: Restore __ldcw_align for PA-RISC 2.0 processors
	NFS: Fix a race in __nfs_list_for_each_server()
	ima: rework CONFIG_IMA dependency block
	xen/events: replace evtchn_rwlock with RCU
	Linux 5.4.258

Change-Id: I5f0e742bb16c2e7edae606510d1fd037032cdec7
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-10-12 10:04:29 +00:00
commit e18c011afe
61 changed files with 677 additions and 410 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 257
SUBLEVEL = 258
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -2,14 +2,28 @@
#ifndef __PARISC_LDCW_H
#define __PARISC_LDCW_H
#ifndef CONFIG_PA20
/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
and GCC only guarantees 8-byte alignment for stack locals, we can't
be assured of 16-byte alignment for atomic lock data even if we
specify "__attribute ((aligned(16)))" in the type declaration. So,
we use a struct containing an array of four ints for the atomic lock
type and dynamically select the 16-byte aligned int from the array
for the semaphore. */
for the semaphore. */
/* From: "Jim Hull" <jim.hull of hp.com>
I've attached a summary of the change, but basically, for PA 2.0, as
long as the ",CO" (coherent operation) completer is implemented, then the
16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
they only require "natural" alignment (4-byte for ldcw, 8-byte for
ldcd).
Although the cache control hint is accepted by all PA 2.0 processors,
it is only implemented on PA8800/PA8900 CPUs. Prior PA8X00 CPUs still
require 16-byte alignment. If the address is unaligned, the operation
of the instruction is undefined. The ldcw instruction does not generate
unaligned data reference traps so misaligned accesses are not detected.
This hid the problem for years. So, restore the 16-byte alignment dropped
by Kyle McMartin in "Remove __ldcw_align for PA-RISC 2.0 processors". */
#define __PA_LDCW_ALIGNMENT 16
#define __PA_LDCW_ALIGN_ORDER 4
@ -19,22 +33,12 @@
& ~(__PA_LDCW_ALIGNMENT - 1); \
(volatile unsigned int *) __ret; \
})
#define __LDCW "ldcw"
#else /*CONFIG_PA20*/
/* From: "Jim Hull" <jim.hull of hp.com>
I've attached a summary of the change, but basically, for PA 2.0, as
long as the ",CO" (coherent operation) completer is specified, then the
16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
they only require "natural" alignment (4-byte for ldcw, 8-byte for
ldcd). */
#define __PA_LDCW_ALIGNMENT 4
#define __PA_LDCW_ALIGN_ORDER 2
#define __ldcw_align(a) (&(a)->slock)
#ifdef CONFIG_PA20
#define __LDCW "ldcw,co"
#endif /*!CONFIG_PA20*/
#else
#define __LDCW "ldcw"
#endif
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
We don't explicitly expose that "*a" may be written as reload

View File

@ -3,13 +3,8 @@
#define __ASM_SPINLOCK_TYPES_H
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
volatile unsigned int lock[4];
# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#endif
} arch_spinlock_t;
typedef struct {

View File

@ -5751,17 +5751,19 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
struct ata_link *link;
unsigned long flags;
/* Previous resume operation might still be in
* progress. Wait for PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
/* request PM ops to EH */
spin_lock_irqsave(ap->lock, flags);
/*
* A previous PM operation might still be in progress. Wait for
* ATA_PFLAG_PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
spin_unlock_irqrestore(ap->lock, flags);
ata_port_wait_eh(ap);
spin_lock_irqsave(ap->lock, flags);
}
/* Request PM operation to EH */
ap->pm_mesg = mesg;
ap->pflags |= ATA_PFLAG_PM_PENDING;
ata_for_each_link(link, ap, HOST_FIRST) {
@ -5773,10 +5775,8 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
spin_unlock_irqrestore(ap->lock, flags);
if (!async) {
if (!async)
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
}
/*
@ -5942,7 +5942,7 @@ void ata_host_resume(struct ata_host *host)
#endif
const struct device_type ata_port_type = {
.name = "ata_port",
.name = ATA_PORT_TYPE_NAME,
#ifdef CONFIG_PM
.pm = &ata_port_pm_ops,
#endif
@ -6745,11 +6745,30 @@ static void ata_port_detach(struct ata_port *ap)
if (!ap->ops->error_handler)
goto skip_eh;
/* tell EH we're leaving & flush EH */
/* Wait for any ongoing EH */
ata_port_wait_eh(ap);
mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
/* Remove scsi devices */
ata_for_each_link(link, ap, HOST_FIRST) {
ata_for_each_dev(dev, link, ALL) {
if (dev->sdev) {
spin_unlock_irqrestore(ap->lock, flags);
scsi_remove_device(dev->sdev);
spin_lock_irqsave(ap->lock, flags);
dev->sdev = NULL;
}
}
}
/* Tell EH to disable all devices */
ap->pflags |= ATA_PFLAG_UNLOADING;
ata_port_schedule_eh(ap);
spin_unlock_irqrestore(ap->lock, flags);
mutex_unlock(&ap->scsi_scan_mutex);
/* wait till EH commits suicide */
ata_port_wait_eh(ap);

View File

@ -266,6 +266,10 @@ void ata_tport_delete(struct ata_port *ap)
put_device(dev);
}
static const struct device_type ata_port_sas_type = {
.name = ATA_PORT_TYPE_NAME,
};
/** ata_tport_add - initialize a transport ATA port structure
*
* @parent: parent device
@ -283,7 +287,10 @@ int ata_tport_add(struct device *parent,
struct device *dev = &ap->tdev;
device_initialize(dev);
dev->type = &ata_port_type;
if (ap->flags & ATA_FLAG_SAS_HOST)
dev->type = &ata_port_sas_type;
else
dev->type = &ata_port_type;
dev->parent = parent;
ata_host_get(ap->host);

View File

@ -30,6 +30,8 @@ enum {
ATA_DNXFER_QUIET = (1 << 31),
};
#define ATA_PORT_TYPE_NAME "ata_port"
extern atomic_t ata_print_id;
extern int atapi_passthru16;
extern int libata_fua;

View File

@ -453,7 +453,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
if (!rbnode)
return -ENOMEM;
regcache_rbtree_set_register(map, rbnode,
reg - rbnode->base_reg, value);
(reg - rbnode->base_reg) / map->reg_stride,
value);
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
rbtree_ctx->cached_rbnode = rbnode;
}

View File

@ -626,9 +626,8 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
static int rbd_dev_header_info(struct rbd_device *rbd_dev);
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
struct rbd_image_header *header);
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
u64 snap_id);
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
@ -1098,15 +1097,24 @@ static void rbd_init_layout(struct rbd_device *rbd_dev)
RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
}
static void rbd_image_header_cleanup(struct rbd_image_header *header)
{
kfree(header->object_prefix);
ceph_put_snap_context(header->snapc);
kfree(header->snap_sizes);
kfree(header->snap_names);
memset(header, 0, sizeof(*header));
}
/*
* Fill an rbd image header with information from the given format 1
* on-disk header.
*/
static int rbd_header_from_disk(struct rbd_device *rbd_dev,
struct rbd_image_header_ondisk *ondisk)
static int rbd_header_from_disk(struct rbd_image_header *header,
struct rbd_image_header_ondisk *ondisk,
bool first_time)
{
struct rbd_image_header *header = &rbd_dev->header;
bool first_time = header->object_prefix == NULL;
struct ceph_snap_context *snapc;
char *object_prefix = NULL;
char *snap_names = NULL;
@ -1173,11 +1181,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev,
if (first_time) {
header->object_prefix = object_prefix;
header->obj_order = ondisk->options.order;
rbd_init_layout(rbd_dev);
} else {
ceph_put_snap_context(header->snapc);
kfree(header->snap_names);
kfree(header->snap_sizes);
}
/* The remaining fields always get updated (when we refresh) */
@ -4983,7 +4986,9 @@ out_req:
* return, the rbd_dev->header field will contain up-to-date
* information about the image.
*/
static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
struct rbd_image_header *header,
bool first_time)
{
struct rbd_image_header_ondisk *ondisk = NULL;
u32 snap_count = 0;
@ -5031,7 +5036,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
snap_count = le32_to_cpu(ondisk->snap_count);
} while (snap_count != want_count);
ret = rbd_header_from_disk(rbd_dev, ondisk);
ret = rbd_header_from_disk(header, ondisk, first_time);
out:
kfree(ondisk);
@ -5075,43 +5080,6 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev)
}
}
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
{
u64 mapping_size;
int ret;
down_write(&rbd_dev->header_rwsem);
mapping_size = rbd_dev->mapping.size;
ret = rbd_dev_header_info(rbd_dev);
if (ret)
goto out;
/*
* If there is a parent, see if it has disappeared due to the
* mapped image getting flattened.
*/
if (rbd_dev->parent) {
ret = rbd_dev_v2_parent_info(rbd_dev);
if (ret)
goto out;
}
if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
rbd_dev->mapping.size = rbd_dev->header.image_size;
} else {
/* validate mapped snapshot's EXISTS flag */
rbd_exists_validate(rbd_dev);
}
out:
up_write(&rbd_dev->header_rwsem);
if (!ret && mapping_size != rbd_dev->mapping.size)
rbd_dev_update_size(rbd_dev);
return ret;
}
static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
@ -5681,17 +5649,12 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
return 0;
}
static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
{
return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
&rbd_dev->header.obj_order,
&rbd_dev->header.image_size);
}
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
char **pobject_prefix)
{
size_t size;
void *reply_buf;
char *object_prefix;
int ret;
void *p;
@ -5709,16 +5672,16 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
goto out;
p = reply_buf;
rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
p + ret, NULL, GFP_NOIO);
object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL,
GFP_NOIO);
if (IS_ERR(object_prefix)) {
ret = PTR_ERR(object_prefix);
goto out;
}
ret = 0;
if (IS_ERR(rbd_dev->header.object_prefix)) {
ret = PTR_ERR(rbd_dev->header.object_prefix);
rbd_dev->header.object_prefix = NULL;
} else {
dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
}
*pobject_prefix = object_prefix;
dout(" object_prefix = %s\n", object_prefix);
out:
kfree(reply_buf);
@ -5763,12 +5726,6 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
return 0;
}
static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
{
return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
&rbd_dev->header.features);
}
/*
* These are generic image flags, but since they are used only for
* object map, store them in rbd_dev->object_map_flags.
@ -5805,6 +5762,14 @@ struct parent_image_info {
u64 overlap;
};
static void rbd_parent_info_cleanup(struct parent_image_info *pii)
{
kfree(pii->pool_ns);
kfree(pii->image_id);
memset(pii, 0, sizeof(*pii));
}
/*
* The caller is responsible for @pii.
*/
@ -5874,6 +5839,9 @@ static int __get_parent_info(struct rbd_device *rbd_dev,
if (pii->has_overlap)
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
__func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
pii->has_overlap, pii->overlap);
return 0;
e_inval:
@ -5912,14 +5880,17 @@ static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
pii->has_overlap = true;
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
__func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
pii->has_overlap, pii->overlap);
return 0;
e_inval:
return -EINVAL;
}
static int get_parent_info(struct rbd_device *rbd_dev,
struct parent_image_info *pii)
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
struct parent_image_info *pii)
{
struct page *req_page, *reply_page;
void *p;
@ -5947,7 +5918,7 @@ static int get_parent_info(struct rbd_device *rbd_dev,
return ret;
}
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
{
struct rbd_spec *parent_spec;
struct parent_image_info pii = { 0 };
@ -5957,37 +5928,12 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
if (!parent_spec)
return -ENOMEM;
ret = get_parent_info(rbd_dev, &pii);
ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
if (ret)
goto out_err;
dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
__func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
pii.has_overlap, pii.overlap);
if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
/*
* Either the parent never existed, or we have
* record of it but the image got flattened so it no
* longer has a parent. When the parent of a
* layered image disappears we immediately set the
* overlap to 0. The effect of this is that all new
* requests will be treated as if the image had no
* parent.
*
* If !pii.has_overlap, the parent image spec is not
* applicable. It's there to avoid duplication in each
* snapshot record.
*/
if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0;
rbd_dev_parent_put(rbd_dev);
pr_info("%s: clone image has been flattened\n",
rbd_dev->disk->disk_name);
}
if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap)
goto out; /* No parent? No problem. */
}
/* The ceph file layout needs to fit pool id in 32 bits */
@ -5999,58 +5945,46 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
}
/*
* The parent won't change (except when the clone is
* flattened, already handled that). So we only need to
* record the parent spec we have not already done so.
* The parent won't change except when the clone is flattened,
* so we only need to record the parent image spec once.
*/
if (!rbd_dev->parent_spec) {
parent_spec->pool_id = pii.pool_id;
if (pii.pool_ns && *pii.pool_ns) {
parent_spec->pool_ns = pii.pool_ns;
pii.pool_ns = NULL;
}
parent_spec->image_id = pii.image_id;
pii.image_id = NULL;
parent_spec->snap_id = pii.snap_id;
rbd_dev->parent_spec = parent_spec;
parent_spec = NULL; /* rbd_dev now owns this */
parent_spec->pool_id = pii.pool_id;
if (pii.pool_ns && *pii.pool_ns) {
parent_spec->pool_ns = pii.pool_ns;
pii.pool_ns = NULL;
}
parent_spec->image_id = pii.image_id;
pii.image_id = NULL;
parent_spec->snap_id = pii.snap_id;
rbd_assert(!rbd_dev->parent_spec);
rbd_dev->parent_spec = parent_spec;
parent_spec = NULL; /* rbd_dev now owns this */
/*
* We always update the parent overlap. If it's zero we issue
* a warning, as we will proceed as if there was no parent.
* Record the parent overlap. If it's zero, issue a warning as
* we will proceed as if there is no parent.
*/
if (!pii.overlap) {
if (parent_spec) {
/* refresh, careful to warn just once */
if (rbd_dev->parent_overlap)
rbd_warn(rbd_dev,
"clone now standalone (overlap became 0)");
} else {
/* initial probe */
rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
}
}
if (!pii.overlap)
rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
rbd_dev->parent_overlap = pii.overlap;
out:
ret = 0;
out_err:
kfree(pii.pool_ns);
kfree(pii.image_id);
rbd_parent_info_cleanup(&pii);
rbd_spec_put(parent_spec);
return ret;
}
static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
u64 *stripe_unit, u64 *stripe_count)
{
struct {
__le64 stripe_unit;
__le64 stripe_count;
} __attribute__ ((packed)) striping_info_buf = { 0 };
size_t size = sizeof (striping_info_buf);
void *p;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
@ -6062,27 +5996,33 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
if (ret < size)
return -ERANGE;
p = &striping_info_buf;
rbd_dev->header.stripe_unit = ceph_decode_64(&p);
rbd_dev->header.stripe_count = ceph_decode_64(&p);
*stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit);
*stripe_count = le64_to_cpu(striping_info_buf.stripe_count);
dout(" stripe_unit = %llu stripe_count = %llu\n", *stripe_unit,
*stripe_count);
return 0;
}
static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
{
__le64 data_pool_id;
__le64 data_pool_buf;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_data_pool",
NULL, 0, &data_pool_id, sizeof(data_pool_id));
NULL, 0, &data_pool_buf,
sizeof(data_pool_buf));
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
if (ret < sizeof(data_pool_id))
if (ret < sizeof(data_pool_buf))
return -EBADMSG;
rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
*data_pool_id = le64_to_cpu(data_pool_buf);
dout(" data_pool_id = %lld\n", *data_pool_id);
WARN_ON(*data_pool_id == CEPH_NOPOOL);
return 0;
}
@ -6274,7 +6214,8 @@ out_err:
return ret;
}
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
struct ceph_snap_context **psnapc)
{
size_t size;
int ret;
@ -6335,9 +6276,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
for (i = 0; i < snap_count; i++)
snapc->snaps[i] = ceph_decode_64(&p);
ceph_put_snap_context(rbd_dev->header.snapc);
rbd_dev->header.snapc = snapc;
*psnapc = snapc;
dout(" snap context seq = %llu, snap_count = %u\n",
(unsigned long long)seq, (unsigned int)snap_count);
out:
@ -6386,38 +6325,42 @@ out:
return snap_name;
}
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
struct rbd_image_header *header,
bool first_time)
{
bool first_time = rbd_dev->header.object_prefix == NULL;
int ret;
ret = rbd_dev_v2_image_size(rbd_dev);
ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
first_time ? &header->obj_order : NULL,
&header->image_size);
if (ret)
return ret;
if (first_time) {
ret = rbd_dev_v2_header_onetime(rbd_dev);
ret = rbd_dev_v2_header_onetime(rbd_dev, header);
if (ret)
return ret;
}
ret = rbd_dev_v2_snap_context(rbd_dev);
if (ret && first_time) {
kfree(rbd_dev->header.object_prefix);
rbd_dev->header.object_prefix = NULL;
}
ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
if (ret)
return ret;
return ret;
return 0;
}
static int rbd_dev_header_info(struct rbd_device *rbd_dev)
static int rbd_dev_header_info(struct rbd_device *rbd_dev,
struct rbd_image_header *header,
bool first_time)
{
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
rbd_assert(!header->object_prefix && !header->snapc);
if (rbd_dev->image_format == 1)
return rbd_dev_v1_header_info(rbd_dev);
return rbd_dev_v1_header_info(rbd_dev, header, first_time);
return rbd_dev_v2_header_info(rbd_dev);
return rbd_dev_v2_header_info(rbd_dev, header, first_time);
}
/*
@ -6767,60 +6710,49 @@ out:
*/
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
struct rbd_image_header *header;
rbd_dev_parent_put(rbd_dev);
rbd_object_map_free(rbd_dev);
rbd_dev_mapping_clear(rbd_dev);
/* Free dynamic fields from the header, then zero it out */
header = &rbd_dev->header;
ceph_put_snap_context(header->snapc);
kfree(header->snap_sizes);
kfree(header->snap_names);
kfree(header->object_prefix);
memset(header, 0, sizeof (*header));
rbd_image_header_cleanup(&rbd_dev->header);
}
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
struct rbd_image_header *header)
{
int ret;
ret = rbd_dev_v2_object_prefix(rbd_dev);
ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
if (ret)
goto out_err;
return ret;
/*
* Get the and check features for the image. Currently the
* features are assumed to never change.
*/
ret = rbd_dev_v2_features(rbd_dev);
ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
&header->features);
if (ret)
goto out_err;
return ret;
/* If the image supports fancy striping, get its parameters */
if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
ret = rbd_dev_v2_striping_info(rbd_dev);
if (ret < 0)
goto out_err;
}
if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
ret = rbd_dev_v2_data_pool(rbd_dev);
if (header->features & RBD_FEATURE_STRIPINGV2) {
ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
&header->stripe_count);
if (ret)
goto out_err;
return ret;
}
rbd_init_layout(rbd_dev);
return 0;
if (header->features & RBD_FEATURE_DATA_POOL) {
ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
if (ret)
return ret;
}
out_err:
rbd_dev->header.features = 0;
kfree(rbd_dev->header.object_prefix);
rbd_dev->header.object_prefix = NULL;
return ret;
return 0;
}
/*
@ -6998,10 +6930,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
if (!depth)
down_write(&rbd_dev->header_rwsem);
ret = rbd_dev_header_info(rbd_dev);
ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
if (ret)
goto err_out_probe;
rbd_init_layout(rbd_dev);
/*
* If this image is the one being mapped, we have pool name and
* id, image name and id, and snap name - need to fill snap id.
@ -7035,7 +6969,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
}
if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
ret = rbd_dev_v2_parent_info(rbd_dev);
ret = rbd_dev_setup_parent(rbd_dev);
if (ret)
goto err_out_probe;
}
@ -7061,6 +6995,112 @@ err_out_format:
return ret;
}
static void rbd_dev_update_header(struct rbd_device *rbd_dev,
struct rbd_image_header *header)
{
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
if (rbd_dev->header.image_size != header->image_size) {
rbd_dev->header.image_size = header->image_size;
if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
rbd_dev->mapping.size = header->image_size;
rbd_dev_update_size(rbd_dev);
}
}
if (rbd_dev->spec->snap_id != CEPH_NOSNAP) {
/* validate mapped snapshot's EXISTS flag */
rbd_exists_validate(rbd_dev);
}
ceph_put_snap_context(rbd_dev->header.snapc);
rbd_dev->header.snapc = header->snapc;
header->snapc = NULL;
if (rbd_dev->image_format == 1) {
kfree(rbd_dev->header.snap_names);
rbd_dev->header.snap_names = header->snap_names;
header->snap_names = NULL;
kfree(rbd_dev->header.snap_sizes);
rbd_dev->header.snap_sizes = header->snap_sizes;
header->snap_sizes = NULL;
}
}
static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
struct parent_image_info *pii)
{
if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) {
/*
* Either the parent never existed, or we have
* record of it but the image got flattened so it no
* longer has a parent. When the parent of a
* layered image disappears we immediately set the
* overlap to 0. The effect of this is that all new
* requests will be treated as if the image had no
* parent.
*
* If !pii.has_overlap, the parent image spec is not
* applicable. It's there to avoid duplication in each
* snapshot record.
*/
if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0;
rbd_dev_parent_put(rbd_dev);
pr_info("%s: clone has been flattened\n",
rbd_dev->disk->disk_name);
}
} else {
rbd_assert(rbd_dev->parent_spec);
/*
* Update the parent overlap. If it became zero, issue
* a warning as we will proceed as if there is no parent.
*/
if (!pii->overlap && rbd_dev->parent_overlap)
rbd_warn(rbd_dev,
"clone has become standalone (overlap 0)");
rbd_dev->parent_overlap = pii->overlap;
}
}
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
{
struct rbd_image_header header = { 0 };
struct parent_image_info pii = { 0 };
int ret;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
ret = rbd_dev_header_info(rbd_dev, &header, false);
if (ret)
goto out;
/*
* If there is a parent, see if it has disappeared due to the
* mapped image getting flattened.
*/
if (rbd_dev->parent) {
ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
if (ret)
goto out;
}
down_write(&rbd_dev->header_rwsem);
rbd_dev_update_header(rbd_dev, &header);
if (rbd_dev->parent)
rbd_dev_update_parent(rbd_dev, &pii);
up_write(&rbd_dev->header_rwsem);
out:
rbd_parent_info_cleanup(&pii);
rbd_image_header_cleanup(&header);
return ret;
}
static ssize_t do_rbd_add(struct bus_type *bus,
const char *buf,
size_t count)

View File

@ -967,7 +967,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
else if (param == PIN_CONFIG_BIAS_DISABLE ||
param == PIN_CONFIG_BIAS_PULL_DOWN ||
param == PIN_CONFIG_DRIVE_STRENGTH)
return pinctrl_gpio_set_config(offset, config);
return pinctrl_gpio_set_config(chip->base + offset, config);
else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN ||
param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
/* Return -ENOTSUPP to trigger emulation, as per datasheet */

View File

@ -243,6 +243,7 @@ static bool pxa_gpio_has_pinctrl(void)
switch (gpio_type) {
case PXA3XX_GPIO:
case MMP2_GPIO:
case MMP_GPIO:
return false;
default:

View File

@ -218,7 +218,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
}
for (i = 0; i < ports_num; i++) {
char port_str[10];
char port_str[11];
ports[i].port_num = i + 1;
snprintf(port_str, sizeof(port_str), "%u", i + 1);

View File

@ -2080,6 +2080,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
},
[RDMA_NLDEV_CMD_SYS_SET] = {
.doit = nldev_set_sys_set_doit,
.flags = RDMA_NL_ADMIN_PERM,
},
[RDMA_NLDEV_CMD_STAT_SET] = {
.doit = nldev_stat_set_doit,

View File

@ -633,7 +633,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
if (hdr->in_words * 4 != count)
return -EINVAL;
if (count < method_elm->req_size + sizeof(hdr)) {
if (count < method_elm->req_size + sizeof(*hdr)) {
/*
* rdma-core v18 and v19 have a bug where they send DESTROY_CQ
* with a 16 byte write instead of 24. Old kernels didn't

View File

@ -221,7 +221,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
{
int i;
char buff[11];
char buff[12];
struct mlx4_ib_iov_port *port = NULL;
int ret = 0 ;
struct ib_port_attr attr;

View File

@ -2053,7 +2053,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
case MLX5_IB_MMAP_DEVICE_MEM:
return "Device Memory";
default:
return NULL;
return "Unknown";
}
}

View File

@ -981,6 +981,7 @@ static void siw_accept_newconn(struct siw_cep *cep)
siw_cep_put(cep);
new_cep->listen_cep = NULL;
if (rv) {
siw_cancel_mpatimer(new_cep);
siw_cep_set_free(new_cep);
goto error;
}
@ -1105,9 +1106,12 @@ static void siw_cm_work_handler(struct work_struct *w)
/*
* Socket close before MPA request received.
*/
siw_dbg_cep(cep, "no mpareq: drop listener\n");
siw_cep_put(cep->listen_cep);
cep->listen_cep = NULL;
if (cep->listen_cep) {
siw_dbg_cep(cep,
"no mpareq: drop listener\n");
siw_cep_put(cep->listen_cep);
cep->listen_cep = NULL;
}
}
}
release_cep = 1;
@ -1230,7 +1234,11 @@ static void siw_cm_llp_data_ready(struct sock *sk)
if (!cep)
goto out;
siw_dbg_cep(cep, "state: %d\n", cep->state);
siw_dbg_cep(cep, "cep state: %d, socket state %d\n",
cep->state, sk->sk_state);
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
switch (cep->state) {
case SIW_EPSTATE_RDMA_MODE:

View File

@ -865,6 +865,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
return -EINVAL;
}
/* UBI cannot work on flashes with zero erasesize. */
if (!mtd->erasesize) {
pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n",
mtd->index);
return -EINVAL;
}
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)

View File

@ -2149,14 +2149,16 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
* from the wrong location resulting in the switch booting
* to wrong mode and inoperable.
*/
mv88e6xxx_g1_wait_eeprom_done(chip);
if (chip->info->ops->get_eeprom)
mv88e6xxx_g2_eeprom_wait(chip);
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
mv88e6xxx_g1_wait_eeprom_done(chip);
if (chip->info->ops->get_eeprom)
mv88e6xxx_g2_eeprom_wait(chip);
}
}

View File

@ -75,37 +75,6 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
{
const unsigned long timeout = jiffies + 1 * HZ;
u16 val;
int err;
/* Wait up to 1 second for the switch to finish reading the
* EEPROM.
*/
while (time_before(jiffies, timeout)) {
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
if (err) {
dev_err(chip->dev, "Error reading status");
return;
}
/* If the switch is still resetting, it may not
* respond on the bus, and so MDIO read returns
* 0xffff. Differentiate between that, and waiting for
* the EEPROM to be done by bit 0 being set.
*/
if (val != 0xffff &&
val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
return;
usleep_range(1000, 2000);
}
dev_err(chip->dev, "Timeout waiting for EEPROM done");
}
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5

View File

@ -277,7 +277,6 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);

View File

@ -310,7 +310,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
* Offset 0x15: EEPROM Addr (for 8-bit data access)
*/
static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
int err;

View File

@ -340,6 +340,7 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
int port);
int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip);
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;

View File

@ -123,9 +123,9 @@ struct qed_ll2_info {
enum core_tx_dest tx_dest;
u8 tx_stats_en;
bool main_func_queue;
struct qed_ll2_cbs cbs;
struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue;
struct qed_ll2_cbs cbs;
};
/**

View File

@ -111,6 +111,7 @@ struct stm32_ops {
int (*parse_data)(struct stm32_dwmac *dwmac,
struct device *dev);
u32 syscfg_eth_mask;
bool clk_rx_enable_in_suspend;
};
static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
@ -128,7 +129,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
if (ret)
return ret;
if (!dwmac->dev->power.is_suspended) {
if (!dwmac->ops->clk_rx_enable_in_suspend ||
!dwmac->dev->power.is_suspended) {
ret = clk_prepare_enable(dwmac->clk_rx);
if (ret) {
clk_disable_unprepare(dwmac->clk_tx);
@ -508,7 +510,8 @@ static struct stm32_ops stm32mp1_dwmac_data = {
.suspend = stm32mp1_suspend,
.resume = stm32mp1_resume,
.parse_data = stm32mp1_parse_data,
.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK
.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
.clk_rx_enable_in_suspend = true
};
static const struct of_device_id stm32_dwmac_match[] = {

View File

@ -958,12 +958,11 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
*tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0,
ip_hdr(skb)->protocol, 0);
} else if (skb_is_gso_v6(skb)) {
} else if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0,
IPPROTO_TCP, 0);
return false;
} else if (protocol == htons(ETH_P_IPV6)) {
tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,

View File

@ -90,7 +90,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
if (unlikely(ret < 0)) {
if (unlikely(ret < 4)) {
ret = ret < 0 ? ret : -ENODATA;
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);
return ret;

View File

@ -34,6 +34,8 @@
#define TDM_PPPOHT_SLIC_MAXIN
#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
static int uhdlc_close(struct net_device *dev);
static struct ucc_tdm_info utdm_primary_info = {
.uf_info = {
.tsa = 0,
@ -710,6 +712,7 @@ static int uhdlc_open(struct net_device *dev)
hdlc_device *hdlc = dev_to_hdlc(dev);
struct ucc_hdlc_private *priv = hdlc->priv;
struct ucc_tdm *utdm = priv->utdm;
int rc = 0;
if (priv->hdlc_busy != 1) {
if (request_irq(priv->ut_info->uf_info.irq,
@ -733,10 +736,13 @@ static int uhdlc_open(struct net_device *dev)
napi_enable(&priv->napi);
netdev_reset_queue(dev);
netif_start_queue(dev);
hdlc_open(dev);
rc = hdlc_open(dev);
if (rc)
uhdlc_close(dev);
}
return 0;
return rc;
}
static void uhdlc_memclean(struct ucc_hdlc_private *priv)
@ -826,6 +832,8 @@ static int uhdlc_close(struct net_device *dev)
netdev_reset_queue(dev);
priv->hdlc_busy = 0;
hdlc_close(dev);
return 0;
}

View File

@ -332,9 +332,9 @@ struct iwl_fw_ini_fifo_hdr {
struct iwl_fw_ini_error_dump_range {
__le32 range_data_size;
union {
__le32 internal_base_addr;
__le64 dram_base_addr;
__le32 page_num;
__le32 internal_base_addr __packed;
__le64 dram_base_addr __packed;
__le32 page_num __packed;
struct iwl_fw_ini_fifo_hdr fifo_hdr;
};
__le32 data[];

View File

@ -977,8 +977,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
}
}
tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len);
tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len;
tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
}
}

View File

@ -98,7 +98,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) {
if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) +
rx_pkt_off > skb->len) {
mwifiex_dbg(priv->adapter, ERROR,
"wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
skb->len, rx_pkt_off);
@ -107,12 +108,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
return -1;
}
if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
sizeof(bridge_tunnel_header))) ||
(!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
sizeof(rfc1042_header)) &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len &&
((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
sizeof(bridge_tunnel_header))) ||
(!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
sizeof(rfc1042_header)) &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) {
/*
* Replace the 803 header and rfc1042 header (llc/snap) with an
* EthernetII header, keep the src/dst and snap_type

View File

@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan)
{
u16 val;
u8 lna;
val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
*lna_2g = 0;
if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
memset(lna_5g, 0, sizeof(s8) * 3);
if (chan->band == NL80211_BAND_2GHZ)
lna = *lna_2g;
else if (chan->hw_value <= 64)

View File

@ -251,7 +251,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
int channel = chan->hw_value;
s8 lna_5g[3], lna_2g;
u8 lna;
bool use_lna;
u8 lna = 0;
u16 val;
if (chan->band == NL80211_BAND_2GHZ)
@ -270,7 +271,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
if (chan->band == NL80211_BAND_2GHZ)
use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G);
else
use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G);
if (use_lna)
lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
}
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);

View File

@ -818,8 +818,6 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
return PTR_ERR(res->phy_ahb_reset);
}
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}

View File

@ -488,12 +488,12 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
if (port) {
put_device(&port->dev);
retval = -EEXIST;
goto err_out;
goto err_put;
}
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
if (!port)
goto err_out;
goto err_put;
rwlock_init(&port->unit_list_lock);
INIT_LIST_HEAD(&port->unit_list);
@ -516,7 +516,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;
goto err_put;
}
retval = -EINVAL;
@ -533,7 +533,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
return port;
err_out:
err_put:
zfcp_ccw_adapter_put(adapter);
err_out:
return ERR_PTR(retval);
}

View File

@ -856,7 +856,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
EXPORT_SYMBOL(target_to_linux_sector);
struct devices_idr_iter {
struct config_item *prev_item;
int (*fn)(struct se_device *dev, void *data);
void *data;
};
@ -866,11 +865,9 @@ static int target_devices_idr_iter(int id, void *p, void *data)
{
struct devices_idr_iter *iter = data;
struct se_device *dev = p;
struct config_item *item;
int ret;
config_item_put(iter->prev_item);
iter->prev_item = NULL;
/*
* We add the device early to the idr, so it can be used
* by backend modules during configuration. We do not want
@ -880,12 +877,13 @@ static int target_devices_idr_iter(int id, void *p, void *data)
if (!target_dev_configured(dev))
return 0;
iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
if (!iter->prev_item)
item = config_item_get_unless_zero(&dev->dev_group.cg_item);
if (!item)
return 0;
mutex_unlock(&device_mutex);
ret = iter->fn(dev, iter->data);
config_item_put(item);
mutex_lock(&device_mutex);
return ret;
@ -908,7 +906,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
mutex_lock(&device_mutex);
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
mutex_unlock(&device_mutex);
config_item_put(iter.prev_item);
return ret;
}

View File

@ -83,23 +83,13 @@ const struct evtchn_ops *evtchn_ops;
*/
static DEFINE_MUTEX(irq_mapping_update_lock);
/*
* Lock protecting event handling loop against removing event channels.
* Adding of event channels is no issue as the associated IRQ becomes active
* only after everything is setup (before request_[threaded_]irq() the handler
* can't be entered for an event, as the event channel will be unmasked only
* then).
*/
static DEFINE_RWLOCK(evtchn_rwlock);
/*
* Lock hierarchy:
*
* irq_mapping_update_lock
* evtchn_rwlock
* IRQ-desc lock
* percpu eoi_list_lock
* irq_info->lock
* IRQ-desc lock
* percpu eoi_list_lock
* irq_info->lock
*/
static LIST_HEAD(xen_irq_list_head);
@ -214,6 +204,22 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info)
irq_set_chip_data(irq, info);
}
static void delayed_free_irq(struct work_struct *work)
{
struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
rwork);
unsigned int irq = info->irq;
/* Remove the info pointer only now, with no potential users left. */
set_info_for_irq(irq, NULL);
kfree(info);
/* Legacy IRQ descriptors are managed by the arch. */
if (irq >= nr_legacy_irqs())
irq_free_desc(irq);
}
/* Constructors for packed IRQ information. */
static int xen_irq_info_common_setup(struct irq_info *info,
unsigned irq,
@ -548,33 +554,36 @@ static void xen_irq_lateeoi_worker(struct work_struct *work)
eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
read_lock_irqsave(&evtchn_rwlock, flags);
rcu_read_lock();
while (true) {
spin_lock(&eoi->eoi_list_lock);
spin_lock_irqsave(&eoi->eoi_list_lock, flags);
info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
eoi_list);
if (info == NULL || now < info->eoi_time) {
spin_unlock(&eoi->eoi_list_lock);
if (info == NULL)
break;
if (now < info->eoi_time) {
mod_delayed_work_on(info->eoi_cpu, system_wq,
&eoi->delayed,
info->eoi_time - now);
break;
}
list_del_init(&info->eoi_list);
spin_unlock(&eoi->eoi_list_lock);
spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
info->eoi_time = 0;
xen_irq_lateeoi_locked(info, false);
}
if (info)
mod_delayed_work_on(info->eoi_cpu, system_wq,
&eoi->delayed, info->eoi_time - now);
spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
read_unlock_irqrestore(&evtchn_rwlock, flags);
rcu_read_unlock();
}
static void xen_cpu_init_eoi(unsigned int cpu)
@ -589,16 +598,15 @@ static void xen_cpu_init_eoi(unsigned int cpu)
void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
{
struct irq_info *info;
unsigned long flags;
read_lock_irqsave(&evtchn_rwlock, flags);
rcu_read_lock();
info = info_for_irq(irq);
if (info)
xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
read_unlock_irqrestore(&evtchn_rwlock, flags);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
@ -617,6 +625,7 @@ static void xen_irq_init(unsigned irq)
info->type = IRQT_UNBOUND;
info->refcnt = -1;
INIT_RCU_WORK(&info->rwork, delayed_free_irq);
set_info_for_irq(irq, info);
@ -669,31 +678,18 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
static void xen_free_irq(unsigned irq)
{
struct irq_info *info = info_for_irq(irq);
unsigned long flags;
if (WARN_ON(!info))
return;
write_lock_irqsave(&evtchn_rwlock, flags);
if (!list_empty(&info->eoi_list))
lateeoi_list_del(info);
list_del(&info->list);
set_info_for_irq(irq, NULL);
WARN_ON(info->refcnt > 0);
write_unlock_irqrestore(&evtchn_rwlock, flags);
kfree(info);
/* Legacy IRQ descriptors are managed by the arch. */
if (irq < nr_legacy_irqs())
return;
irq_free_desc(irq);
queue_rcu_work(system_wq, &info->rwork);
}
static void xen_evtchn_close(unsigned int port)
@ -1604,7 +1600,14 @@ static void __xen_evtchn_do_upcall(void)
unsigned count;
struct evtchn_loop_ctrl ctrl = { 0 };
read_lock(&evtchn_rwlock);
/*
* When closing an event channel the associated IRQ must not be freed
* until all cpus have left the event handling loop. This is ensured
* by taking the rcu_read_lock() while handling events, as freeing of
* the IRQ is handled via queue_rcu_work() _after_ closing the event
* channel.
*/
rcu_read_lock();
do {
vcpu_info->evtchn_upcall_pending = 0;
@ -1621,7 +1624,7 @@ static void __xen_evtchn_do_upcall(void)
} while (count != 1 || vcpu_info->evtchn_upcall_pending);
out:
read_unlock(&evtchn_rwlock);
rcu_read_unlock();
/*
* Increment irq_epoch only now to defer EOIs only for

View File

@ -6,6 +6,7 @@
*/
#ifndef __EVENTS_INTERNAL_H__
#define __EVENTS_INTERNAL_H__
#include <linux/rcupdate.h>
/* Interrupt types. */
enum xen_irq_type {
@ -31,6 +32,7 @@ enum xen_irq_type {
struct irq_info {
struct list_head list;
struct list_head eoi_list;
struct rcu_work rwork;
short refcnt;
short spurious_cnt;
short type; /* type */

View File

@ -345,10 +345,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
/* there's now no turning back... the old userspace image is dead,
* defunct, deceased, etc.
*/
SET_PERSONALITY(exec_params.hdr);
if (elf_check_fdpic(&exec_params.hdr))
set_personality(PER_LINUX_FDPIC);
else
set_personality(PER_LINUX);
current->personality |= PER_LINUX_FDPIC;
if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
current->personality |= READ_IMPLIES_EXEC;

View File

@ -2135,7 +2135,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
* calculated f_bavail.
*/
if (!mixed && block_rsv->space_info->full &&
total_free_meta - thresh < block_rsv->size)
(total_free_meta < thresh || total_free_meta - thresh < block_rsv->size))
buf->f_bavail = 0;
buf->f_type = BTRFS_SUPER_MAGIC;

View File

@ -411,7 +411,9 @@ extern int __init register_nfs_fs(void);
extern void __exit unregister_nfs_fs(void);
extern bool nfs_sb_active(struct super_block *sb);
extern void nfs_sb_deactive(struct super_block *sb);
extern int nfs_client_for_each_server(struct nfs_client *clp,
int (*fn)(struct nfs_server *, void *),
void *data);
/* io.c */
extern void nfs_start_io_read(struct inode *inode);
extern void nfs_end_io_read(struct inode *inode);

View File

@ -61,6 +61,7 @@
#include "nfs4session.h"
#include "pnfs.h"
#include "netns.h"
#include "nfs4trace.h"
#define NFSDBG_FACILITY NFSDBG_STATE
@ -2525,6 +2526,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
/* Ensure exclusive access to NFSv4 state */
do {
trace_nfs4_state_mgr(clp);
clear_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
section = "purge state";
@ -2621,6 +2623,13 @@ static void nfs4_state_manager(struct nfs_client *clp)
nfs4_end_drain_session(clp);
nfs4_clear_state_manager_bit(clp);
if (test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
!test_and_set_bit(NFS4CLNT_MANAGER_RUNNING,
&clp->cl_state)) {
memflags = memalloc_nofs_save();
continue;
}
if (!test_and_set_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state)) {
if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
nfs_client_return_marked_delegations(clp);
@ -2641,6 +2650,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
out_error:
if (strlen(section))
section_sep = ": ";
trace_nfs4_state_mgr_failed(clp, section, status);
pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s"
" with error %d\n", section_sep, section,
clp->cl_hostname, -status);

View File

@ -563,6 +563,99 @@ TRACE_EVENT(nfs4_setup_sequence,
)
);
TRACE_DEFINE_ENUM(NFS4CLNT_MANAGER_RUNNING);
TRACE_DEFINE_ENUM(NFS4CLNT_CHECK_LEASE);
TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_EXPIRED);
TRACE_DEFINE_ENUM(NFS4CLNT_RECLAIM_REBOOT);
TRACE_DEFINE_ENUM(NFS4CLNT_RECLAIM_NOGRACE);
TRACE_DEFINE_ENUM(NFS4CLNT_DELEGRETURN);
TRACE_DEFINE_ENUM(NFS4CLNT_SESSION_RESET);
TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_CONFIRM);
TRACE_DEFINE_ENUM(NFS4CLNT_SERVER_SCOPE_MISMATCH);
TRACE_DEFINE_ENUM(NFS4CLNT_PURGE_STATE);
TRACE_DEFINE_ENUM(NFS4CLNT_BIND_CONN_TO_SESSION);
TRACE_DEFINE_ENUM(NFS4CLNT_MOVED);
TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_MOVED);
TRACE_DEFINE_ENUM(NFS4CLNT_DELEGATION_EXPIRED);
TRACE_DEFINE_ENUM(NFS4CLNT_RUN_MANAGER);
TRACE_DEFINE_ENUM(NFS4CLNT_DELEGRETURN_RUNNING);
#define show_nfs4_clp_state(state) \
__print_flags(state, "|", \
{ NFS4CLNT_MANAGER_RUNNING, "MANAGER_RUNNING" }, \
{ NFS4CLNT_CHECK_LEASE, "CHECK_LEASE" }, \
{ NFS4CLNT_LEASE_EXPIRED, "LEASE_EXPIRED" }, \
{ NFS4CLNT_RECLAIM_REBOOT, "RECLAIM_REBOOT" }, \
{ NFS4CLNT_RECLAIM_NOGRACE, "RECLAIM_NOGRACE" }, \
{ NFS4CLNT_DELEGRETURN, "DELEGRETURN" }, \
{ NFS4CLNT_SESSION_RESET, "SESSION_RESET" }, \
{ NFS4CLNT_LEASE_CONFIRM, "LEASE_CONFIRM" }, \
{ NFS4CLNT_SERVER_SCOPE_MISMATCH, \
"SERVER_SCOPE_MISMATCH" }, \
{ NFS4CLNT_PURGE_STATE, "PURGE_STATE" }, \
{ NFS4CLNT_BIND_CONN_TO_SESSION, \
"BIND_CONN_TO_SESSION" }, \
{ NFS4CLNT_MOVED, "MOVED" }, \
{ NFS4CLNT_LEASE_MOVED, "LEASE_MOVED" }, \
{ NFS4CLNT_DELEGATION_EXPIRED, "DELEGATION_EXPIRED" }, \
{ NFS4CLNT_RUN_MANAGER, "RUN_MANAGER" }, \
{ NFS4CLNT_DELEGRETURN_RUNNING, "DELEGRETURN_RUNNING" })
TRACE_EVENT(nfs4_state_mgr,
TP_PROTO(
const struct nfs_client *clp
),
TP_ARGS(clp),
TP_STRUCT__entry(
__field(unsigned long, state)
__string(hostname, clp->cl_hostname)
),
TP_fast_assign(
__entry->state = clp->cl_state;
__assign_str(hostname, clp->cl_hostname)
),
TP_printk(
"hostname=%s clp state=%s", __get_str(hostname),
show_nfs4_clp_state(__entry->state)
)
)
TRACE_EVENT(nfs4_state_mgr_failed,
TP_PROTO(
const struct nfs_client *clp,
const char *section,
int status
),
TP_ARGS(clp, section, status),
TP_STRUCT__entry(
__field(unsigned long, error)
__field(unsigned long, state)
__string(hostname, clp->cl_hostname)
__string(section, section)
),
TP_fast_assign(
__entry->error = status;
__entry->state = clp->cl_state;
__assign_str(hostname, clp->cl_hostname);
__assign_str(section, section);
),
TP_printk(
"hostname=%s clp state=%s error=%ld (%s) section=%s",
__get_str(hostname),
show_nfs4_clp_state(__entry->state), -__entry->error,
show_nfsv4_errors(__entry->error), __get_str(section)
)
)
TRACE_EVENT(nfs4_xdr_status,
TP_PROTO(
const struct xdr_stream *xdr,

View File

@ -436,6 +436,41 @@ void nfs_sb_deactive(struct super_block *sb)
}
EXPORT_SYMBOL_GPL(nfs_sb_deactive);
static int __nfs_list_for_each_server(struct list_head *head,
int (*fn)(struct nfs_server *, void *),
void *data)
{
struct nfs_server *server, *last = NULL;
int ret = 0;
rcu_read_lock();
list_for_each_entry_rcu(server, head, client_link) {
if (!(server->super && nfs_sb_active(server->super)))
continue;
rcu_read_unlock();
if (last)
nfs_sb_deactive(last->super);
last = server;
ret = fn(server, data);
if (ret)
goto out;
rcu_read_lock();
}
rcu_read_unlock();
out:
if (last)
nfs_sb_deactive(last->super);
return ret;
}
int nfs_client_for_each_server(struct nfs_client *clp,
int (*fn)(struct nfs_server *, void *),
void *data)
{
return __nfs_list_for_each_server(&clp->cl_superblocks, fn, data);
}
EXPORT_SYMBOL_GPL(nfs_client_for_each_server);
/*
* Deliver file system statistics to userspace
*/

View File

@ -298,7 +298,7 @@ enum {
* advised to wait only for the following duration before
* doing SRST.
*/
ATA_TMOUT_PMP_SRST_WAIT = 5000,
ATA_TMOUT_PMP_SRST_WAIT = 10000,
/* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
* be a spurious PHY event, so ignore the first PHY event that

View File

@ -9,6 +9,7 @@ struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
u8 init[IP_CT_DIR_MAX];
u8 last_dir;
u8 flags;
};

View File

@ -343,12 +343,14 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
static inline void tcp_dec_quickack_mode(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ack.quick) {
/* How many ACKs S/ACKing new data have we sent? */
const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
if (pkts >= icsk->icsk_ack.quick) {
icsk->icsk_ack.quick = 0;
/* Leaving quickack mode we deflate ATO. */

View File

@ -933,7 +933,9 @@ static void neigh_periodic_work(struct work_struct *work)
(state == NUD_FAILED ||
!time_in_range_open(jiffies, n->used,
n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
*np = n->next;
rcu_assign_pointer(*np,
rcu_dereference_protected(n->next,
lockdep_is_held(&tbl->lock)));
neigh_mark_dead(n);
write_unlock(&n->lock);
neigh_cleanup_and_release(n);

View File

@ -179,6 +179,19 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
if (unlikely(len > icsk->icsk_ack.rcv_mss +
MAX_TCP_OPTION_SPACE))
tcp_gro_dev_warn(sk, skb, len);
/* If the skb has a len of exactly 1*MSS and has the PSH bit
* set then it is likely the end of an application write. So
* more data may not be arriving soon, and yet the data sender
* may be waiting for an ACK if cwnd-bound or using TX zero
* copy. So we set ICSK_ACK_PUSHED here so that
* tcp_cleanup_rbuf() will send an ACK immediately if the app
* reads all of the data and is not ping-pong. If len > MSS
* then this logic does not matter (and does not hurt) because
* tcp_cleanup_rbuf() will always ACK immediately if the app
* reads data and there is more than an MSS of unACKed data.
*/
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH)
icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
} else {
/* Otherwise, we make more careful check taking into account,
* that SACKs block is variable.

View File

@ -179,8 +179,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
}
/* Account for an ACK we sent. */
static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
u32 rcv_nxt)
static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
{
struct tcp_sock *tp = tcp_sk(sk);
@ -194,7 +193,7 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
if (unlikely(rcv_nxt != tp->rcv_nxt))
return; /* Special ACK sent by DCTCP to reflect ECN */
tcp_dec_quickack_mode(sk, pkts);
tcp_dec_quickack_mode(sk);
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
}
@ -1152,7 +1151,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
icsk->icsk_af_ops->send_check(sk, skb);
if (likely(tcb->tcp_flags & TCPHDR_ACK))
tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
tcp_event_ack_sent(sk, rcv_nxt);
if (skb->len != tcp_header_size) {
tcp_event_data_sent(tp, sk);

View File

@ -521,7 +521,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
*/
if (len > INT_MAX - transhdrlen)
return -EMSGSIZE;
ulen = len + transhdrlen;
/* Mirror BSD error message compatibility */
if (msg->msg_flags & MSG_OOB)
@ -645,6 +644,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
back_from_confirm:
lock_sock(sk);
ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0;
err = ip6_append_data(sk, ip_generic_getfrag, msg,
ulen, transhdrlen, &ipc6,
&fl6, (struct rt6_info *)dst,

View File

@ -1510,8 +1510,8 @@ static int make_send_sock(struct netns_ipvs *ipvs, int id,
}
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->mcfg, id);
result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
salen, 0);
result = kernel_connect(sock, (struct sockaddr *)&mcast_addr,
salen, 0);
if (result < 0) {
pr_err("Error connecting to the multicast addr\n");
goto error;

View File

@ -112,7 +112,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA},
/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/
/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */
/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
/* cookie_ack */ {sCL, sCL, sCW, sES, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL},
/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
@ -126,7 +126,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV},
/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV},
/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV},
/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
/* cookie_echo */ {sIV, sCL, sCE, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV},
/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV},
/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
@ -426,6 +426,9 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
/* (D) vtag must be same as init_vtag as found in INIT_ACK */
if (sh->vtag != ct->proto.sctp.vtag[dir])
goto out_unlock;
} else if (sch->type == SCTP_CID_COOKIE_ACK) {
ct->proto.sctp.init[dir] = 0;
ct->proto.sctp.init[!dir] = 0;
} else if (sch->type == SCTP_CID_HEARTBEAT) {
if (ct->proto.sctp.vtag[dir] == 0) {
pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir);
@ -474,16 +477,18 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
}
/* If it is an INIT or an INIT ACK note down the vtag */
if (sch->type == SCTP_CID_INIT ||
sch->type == SCTP_CID_INIT_ACK) {
struct sctp_inithdr _inithdr, *ih;
if (sch->type == SCTP_CID_INIT) {
struct sctp_inithdr _ih, *ih;
ih = skb_header_pointer(skb, offset + sizeof(_sch),
sizeof(_inithdr), &_inithdr);
if (ih == NULL)
ih = skb_header_pointer(skb, offset + sizeof(_sch), sizeof(*ih), &_ih);
if (!ih)
goto out_unlock;
pr_debug("Setting vtag %x for dir %d\n",
ih->init_tag, !dir);
if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir])
ct->proto.sctp.init[!dir] = 0;
ct->proto.sctp.init[dir] = 1;
pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir);
ct->proto.sctp.vtag[!dir] = ih->init_tag;
/* don't renew timeout on init retransmit so
@ -494,6 +499,24 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
old_state == SCTP_CONNTRACK_CLOSED &&
nf_ct_is_confirmed(ct))
ignore = true;
} else if (sch->type == SCTP_CID_INIT_ACK) {
struct sctp_inithdr _ih, *ih;
__be32 vtag;
ih = skb_header_pointer(skb, offset + sizeof(_sch), sizeof(*ih), &_ih);
if (!ih)
goto out_unlock;
vtag = ct->proto.sctp.vtag[!dir];
if (!ct->proto.sctp.init[!dir] && vtag && vtag != ih->init_tag)
goto out_unlock;
/* collision */
if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir] &&
vtag != ih->init_tag)
goto out_unlock;
pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir);
ct->proto.sctp.vtag[!dir] = ih->init_tag;
}
ct->proto.sctp.state = new_state;

View File

@ -1646,7 +1646,9 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
timer_setup(&local->sdreq_timer, nfc_llcp_sdreq_timer, 0);
INIT_WORK(&local->sdreq_timeout_work, nfc_llcp_sdreq_timeout_work);
spin_lock(&llcp_devices_lock);
list_add(&local->list, &llcp_devices);
spin_unlock(&llcp_devices_lock);
return 0;
}

View File

@ -169,7 +169,7 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
* own the socket
*/
rds_tcp_set_callbacks(sock, cp);
ret = sock->ops->connect(sock, addr, addrlen, O_NONBLOCK);
ret = kernel_connect(sock, addr, addrlen, O_NONBLOCK);
rdsdebug("connect to address %pI6c returned %d\n", &conn->c_faddr, ret);
if (ret == -EINPROGRESS)

View File

@ -1156,8 +1156,7 @@ int sctp_assoc_update(struct sctp_association *asoc,
/* Add any peer addresses from the new association. */
list_for_each_entry(trans, &new->peer.transport_addr_list,
transports)
if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) &&
!sctp_assoc_add_peer(asoc, &trans->ipaddr,
if (!sctp_assoc_add_peer(asoc, &trans->ipaddr,
GFP_ATOMIC, trans->state))
return -ENOMEM;

View File

@ -2491,6 +2491,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
if (trans) {
trans->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);
sctp_transport_reset_hb_timer(trans);
} else if (asoc) {
asoc->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);

View File

@ -642,6 +642,14 @@ static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
return ret;
}
static int __sock_sendmsg(struct socket *sock, struct msghdr *msg)
{
int err = security_socket_sendmsg(sock, msg,
msg_data_left(msg));
return err ?: sock_sendmsg_nosec(sock, msg);
}
/**
* sock_sendmsg - send a message through @sock
* @sock: socket
@ -652,10 +660,19 @@ static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
*/
int sock_sendmsg(struct socket *sock, struct msghdr *msg)
{
int err = security_socket_sendmsg(sock, msg,
msg_data_left(msg));
struct sockaddr_storage *save_addr = (struct sockaddr_storage *)msg->msg_name;
struct sockaddr_storage address;
int ret;
return err ?: sock_sendmsg_nosec(sock, msg);
if (msg->msg_name) {
memcpy(&address, msg->msg_name, msg->msg_namelen);
msg->msg_name = &address;
}
ret = __sock_sendmsg(sock, msg);
msg->msg_name = save_addr;
return ret;
}
EXPORT_SYMBOL(sock_sendmsg);
@ -987,7 +1004,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (sock->type == SOCK_SEQPACKET)
msg.msg_flags |= MSG_EOR;
res = sock_sendmsg(sock, &msg);
res = __sock_sendmsg(sock, &msg);
*from = msg.msg_iter;
return res;
}
@ -1939,7 +1956,7 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
msg.msg_flags = flags;
err = sock_sendmsg(sock, &msg);
err = __sock_sendmsg(sock, &msg);
out_put:
fput_light(sock->file, fput_needed);
@ -2284,7 +2301,7 @@ static int ____sys_sendmsg(struct socket *sock, struct msghdr *msg_sys,
err = sock_sendmsg_nosec(sock, msg_sys);
goto out_freectl;
}
err = sock_sendmsg(sock, msg_sys);
err = __sock_sendmsg(sock, msg_sys);
/*
* If this is sendmmsg() and sending to current destination address was
* successful, remember it.

View File

@ -1455,7 +1455,7 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
/* First handle the "special" cases */
if (sym_is(name, namelen, "usb"))
do_usb_table(symval, sym->st_size, mod);
if (sym_is(name, namelen, "of"))
else if (sym_is(name, namelen, "of"))
do_of_table(symval, sym->st_size, mod);
else if (sym_is(name, namelen, "pnp"))
do_pnp_device_entry(symval, sym->st_size, mod);

View File

@ -29,9 +29,11 @@ config IMA
to learn more about IMA.
If unsure, say N.
if IMA
config IMA_KEXEC
bool "Enable carrying the IMA measurement list across a soft boot"
depends on IMA && TCG_TPM && HAVE_IMA_KEXEC
depends on TCG_TPM && HAVE_IMA_KEXEC
default n
help
TPM PCRs are only reset on a hard reboot. In order to validate
@ -43,7 +45,6 @@ config IMA_KEXEC
config IMA_MEASURE_PCR_IDX
int
depends on IMA
range 8 14
default 10
help
@ -53,7 +54,7 @@ config IMA_MEASURE_PCR_IDX
config IMA_LSM_RULES
bool
depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK)
depends on AUDIT && (SECURITY_SELINUX || SECURITY_SMACK)
default y
help
Disabling this option will disregard LSM based policy rules.
@ -61,7 +62,6 @@ config IMA_LSM_RULES
choice
prompt "Default template"
default IMA_NG_TEMPLATE
depends on IMA
help
Select the default IMA measurement template.
@ -80,14 +80,12 @@ endchoice
config IMA_DEFAULT_TEMPLATE
string
depends on IMA
default "ima-ng" if IMA_NG_TEMPLATE
default "ima-sig" if IMA_SIG_TEMPLATE
choice
prompt "Default integrity hash algorithm"
default IMA_DEFAULT_HASH_SHA1
depends on IMA
help
Select the default hash algorithm used for the measurement
list, integrity appraisal and audit log. The compiled default
@ -113,7 +111,6 @@ endchoice
config IMA_DEFAULT_HASH
string
depends on IMA
default "sha1" if IMA_DEFAULT_HASH_SHA1
default "sha256" if IMA_DEFAULT_HASH_SHA256
default "sha512" if IMA_DEFAULT_HASH_SHA512
@ -121,7 +118,6 @@ config IMA_DEFAULT_HASH
config IMA_WRITE_POLICY
bool "Enable multiple writes to the IMA policy"
depends on IMA
default n
help
IMA policy can now be updated multiple times. The new rules get
@ -132,7 +128,6 @@ config IMA_WRITE_POLICY
config IMA_READ_POLICY
bool "Enable reading back the current IMA policy"
depends on IMA
default y if IMA_WRITE_POLICY
default n if !IMA_WRITE_POLICY
help
@ -142,7 +137,6 @@ config IMA_READ_POLICY
config IMA_APPRAISE
bool "Appraise integrity measurements"
depends on IMA
default n
help
This option enables local measurement integrity appraisal.
@ -263,7 +257,7 @@ config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
config IMA_BLACKLIST_KEYRING
bool "Create IMA machine owner blacklist keyrings (EXPERIMENTAL)"
depends on SYSTEM_TRUSTED_KEYRING
depends on IMA_TRUSTED_KEYRING
depends on INTEGRITY_TRUSTED_KEYRING
default n
help
This option creates an IMA blacklist keyring, which contains all
@ -273,7 +267,7 @@ config IMA_BLACKLIST_KEYRING
config IMA_LOAD_X509
bool "Load X509 certificate onto the '.ima' trusted keyring"
depends on IMA_TRUSTED_KEYRING
depends on INTEGRITY_TRUSTED_KEYRING
default n
help
File signature verification is based on the public keys
@ -295,3 +289,5 @@ config IMA_APPRAISE_SIGNED_INIT
default n
help
This option requires user-space init to be signed.
endif

View File

@ -270,14 +270,14 @@ clean:
$(MAKE) -C bench O=$(OUTPUT) clean
install-lib:
install-lib: libcpupower
$(INSTALL) -d $(DESTDIR)${libdir}
$(CP) $(OUTPUT)libcpupower.so* $(DESTDIR)${libdir}/
$(INSTALL) -d $(DESTDIR)${includedir}
$(INSTALL_DATA) lib/cpufreq.h $(DESTDIR)${includedir}/cpufreq.h
$(INSTALL_DATA) lib/cpuidle.h $(DESTDIR)${includedir}/cpuidle.h
install-tools:
install-tools: $(OUTPUT)cpupower
$(INSTALL) -d $(DESTDIR)${bindir}
$(INSTALL_PROGRAM) $(OUTPUT)cpupower $(DESTDIR)${bindir}
$(INSTALL) -d $(DESTDIR)${bash_completion_dir}
@ -293,14 +293,14 @@ install-man:
$(INSTALL_DATA) -D man/cpupower-info.1 $(DESTDIR)${mandir}/man1/cpupower-info.1
$(INSTALL_DATA) -D man/cpupower-monitor.1 $(DESTDIR)${mandir}/man1/cpupower-monitor.1
install-gmo:
install-gmo: create-gmo
$(INSTALL) -d $(DESTDIR)${localedir}
for HLANG in $(LANGUAGES); do \
echo '$(INSTALL_DATA) -D $(OUTPUT)po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo'; \
$(INSTALL_DATA) -D $(OUTPUT)po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo; \
done;
install-bench:
install-bench: compile-bench
@#DESTDIR must be set from outside to survive
@sbindir=$(sbindir) bindir=$(bindir) docdir=$(docdir) confdir=$(confdir) $(MAKE) -C bench O=$(OUTPUT) install

View File

@ -27,7 +27,7 @@ $(OUTPUT)cpufreq-bench: $(OBJS)
all: $(OUTPUT)cpufreq-bench
install:
install: $(OUTPUT)cpufreq-bench
mkdir -p $(DESTDIR)/$(sbindir)
mkdir -p $(DESTDIR)/$(bindir)
mkdir -p $(DESTDIR)/$(docdir)