This is the 5.4.201 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmK25xoACgkQONu9yGCS
 aT5eiA/7BXMYuClkz09cr3QxiTIudY8VTY06rzao8ltg+n41apdVjMRndmnFvFEW
 sNz2LdmB5gFh0dKKiqASafc8tM43XMnrBOhsIrZsGphQhB1c+Hkdkdbx2yN9Lqfs
 +ZtbZNCHgvFmJcr4awl3JowvW5W7SNnjTNQ0wZ4ak989a9YOEGiqdjRUQBMX3m0A
 qJ2LjzwFQAV00xK//dDBGS8GH2sN7kYj/3VRYOh4wOs9R27R8p2anzpBDgigo8jA
 9nZfgBtfwjb8ndjG6O2nGUw8XqqyQbvqt9U/uFDM6G7UL/EWi0JJEzXNsPC/Xidk
 eZmOFN1ZJ/vx8QTqTFWNEGjZLo8pwR9tyKvtiTx9OaoRvT1VC8UPBGgWZyuE/oQt
 0eu2vujCl/DrfqX+KY8SZkLjB8bSDfKwPAQnoQ01KQ/Onc+vU+TSLtz4nQAfYhyg
 eKUCKfbvE5nz9cTDL7igDJmyjDzAOSaXI4OlypjNKoKWLKgoB+CC5bZmZ7NvoWPm
 wEKEHt2eqptxS56n4HudT8J6kGwXdW1Hpq9pXCjNRNUoCON2Xqp3KxICqh0khTw3
 6JqSzeO/K4o76shdz3j8pbXbKQtwxAfXP3s1OvaHLygY/mgmTtYgZOx+FkOGpnYr
 QkSEmuDsZvAlOcBefQqX8E8MdkWsZ61GxS5aUj12DBoUqwehYuI=
 =CO9r
 -----END PGP SIGNATURE-----

Merge 5.4.201 into android11-5.4-lts

Changes in 5.4.201
	s390/mm: use non-quiescing sske for KVM switch to keyed guest
	dm: remove special-casing of bio-based immutable singleton target on NVMe
	usb: gadget: u_ether: fix regression in setting fixed MAC address
	tcp: add some entropy in __inet_hash_connect()
	tcp: use different parts of the port_offset for index and offset
	tcp: add small random increments to the source port
	tcp: dynamically allocate the perturb table used by source ports
	tcp: increase source port perturb table to 2^16
	tcp: drop the hash_32() part from the index calculation
	arm64: mm: Don't invalidate FROM_DEVICE buffers at start of DMA transfer
	Revert "hwmon: Make chip parameter for with_info API mandatory"
	Linux 5.4.201

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ia0b51c88f15fe9e667d07e06fde3bc01787f365d
This commit is contained in:
Greg Kroah-Hartman 2022-06-25 16:13:44 +02:00
commit d7a5d91fa5
10 changed files with 53 additions and 119 deletions

View File

@ -72,7 +72,7 @@ hwmon_device_register_with_info is the most comprehensive and preferred means
to register a hardware monitoring device. It creates the standard sysfs
attributes in the hardware monitoring core, letting the driver focus on reading
from and writing to the chip instead of having to bother with sysfs attributes.
The parent device parameter as well as the chip parameter must not be NULL. Its
The parent device parameter cannot be NULL with non-NULL chip info. Its
parameters are described in more detail below.
devm_hwmon_device_register_with_info is similar to

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 200
SUBLEVEL = 201
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -228,8 +228,6 @@ ENDPIPROC(__dma_flush_area)
* - dir - DMA direction
*/
ENTRY(__dma_map_area)
cmp w2, #DMA_FROM_DEVICE
b.eq __dma_inv_area
b __dma_clean_area
ENDPIPROC(__dma_map_area)

View File

@ -716,7 +716,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
ptev = pte_val(*ptep);
if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
pgste_set_unlock(ptep, pgste);
preempt_enable();
}

View File

@ -715,12 +715,11 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
/**
* hwmon_device_register_with_info - register w/ hwmon
* @dev: the parent device (mandatory)
* @name: hwmon name attribute (mandatory)
* @drvdata: driver data to attach to created device (optional)
* @chip: pointer to hwmon chip information (mandatory)
* @dev: the parent device
* @name: hwmon name attribute
* @drvdata: driver data to attach to created device
* @chip: pointer to hwmon chip information
* @extra_groups: pointer to list of additional non-standard attribute groups
* (optional)
*
* hwmon_device_unregister() must be called when the device is no
* longer needed.
@ -733,10 +732,13 @@ hwmon_device_register_with_info(struct device *dev, const char *name,
const struct hwmon_chip_info *chip,
const struct attribute_group **extra_groups)
{
if (!dev || !name || !chip)
if (!name)
return ERR_PTR(-EINVAL);
if (!chip->ops || !chip->ops->is_visible || !chip->info)
if (chip && (!chip->ops || !chip->ops->is_visible || !chip->info))
return ERR_PTR(-EINVAL);
if (chip && !dev)
return ERR_PTR(-EINVAL);
return __hwmon_device_register(dev, name, drvdata, chip, extra_groups);

View File

@ -874,8 +874,7 @@ EXPORT_SYMBOL(dm_consume_args);
static bool __table_type_bio_based(enum dm_queue_mode table_type)
{
return (table_type == DM_TYPE_BIO_BASED ||
table_type == DM_TYPE_DAX_BIO_BASED ||
table_type == DM_TYPE_NVME_BIO_BASED);
table_type == DM_TYPE_DAX_BIO_BASED);
}
static bool __table_type_request_based(enum dm_queue_mode table_type)
@ -931,8 +930,6 @@ bool dm_table_supports_dax(struct dm_table *t,
return true;
}
static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@ -962,7 +959,6 @@ static int dm_table_determine_type(struct dm_table *t)
goto verify_bio_based;
}
BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
goto verify_rq_based;
}
@ -1001,15 +997,6 @@ verify_bio_based:
if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) ||
(list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
t->type = DM_TYPE_DAX_BIO_BASED;
} else {
/* Check if upgrading to NVMe bio-based is valid or required */
tgt = dm_table_get_immutable_target(t);
if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
t->type = DM_TYPE_NVME_BIO_BASED;
goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
} else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
t->type = DM_TYPE_NVME_BIO_BASED;
}
}
return 0;
}
@ -1026,8 +1013,7 @@ verify_rq_based:
* (e.g. request completion process for partial completion.)
*/
if (t->num_targets > 1) {
DMERR("%s DM doesn't support multiple targets",
t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
DMERR("request-based DM doesn't support multiple targets");
return -EINVAL;
}
@ -1764,20 +1750,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
return q && !blk_queue_add_random(q);
}
static int device_is_partial_completion(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
char b[BDEVNAME_SIZE];
/* For now, NVMe devices are the only devices of this class */
return (strncmp(bdevname(dev->bdev, b), "nvme", 4) != 0);
}
static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
{
return !dm_table_any_dev_attr(t, device_is_partial_completion, NULL);
}
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{

View File

@ -1028,7 +1028,7 @@ static void clone_endio(struct bio *bio)
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD &&
!bio->bi_disk->queue->limits.max_discard_sectors)
disable_discard(md);
@ -1305,7 +1305,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
sector = clone->bi_iter.bi_sector;
if (unlikely(swap_bios_limit(ti, clone))) {
struct mapped_device *md = io->md;
int latch = get_swap_bios();
if (unlikely(latch != md->swap_bios))
__set_swap_bios_limit(md, latch);
@ -1320,24 +1319,17 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(clone->bi_disk->queue, clone,
bio_dev(io->orig_bio), sector);
if (md->type == DM_TYPE_NVME_BIO_BASED)
ret = direct_make_request(clone);
else
ret = generic_make_request(clone);
ret = generic_make_request(clone);
break;
case DM_MAPIO_KILL:
if (unlikely(swap_bios_limit(ti, clone))) {
struct mapped_device *md = io->md;
if (unlikely(swap_bios_limit(ti, clone)))
up(&md->swap_bios_semaphore);
}
free_tio(tio);
dec_pending(io, BLK_STS_IOERR);
break;
case DM_MAPIO_REQUEUE:
if (unlikely(swap_bios_limit(ti, clone))) {
struct mapped_device *md = io->md;
if (unlikely(swap_bios_limit(ti, clone)))
up(&md->swap_bios_semaphore);
}
free_tio(tio);
dec_pending(io, BLK_STS_DM_REQUEUE);
break;
@ -1713,51 +1705,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
return ret;
}
/*
* Optimized variant of __split_and_process_bio that leverages the
* fact that targets that use it do _not_ have a need to split bios.
*/
static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
struct bio *bio, struct dm_target *ti)
{
struct clone_info ci;
blk_qc_t ret = BLK_QC_T_NONE;
int error = 0;
init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {
struct bio flush_bio;
/*
* Use an on-stack bio for this, it's safe since we don't
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
bio_init(&flush_bio, NULL, 0);
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
ci.bio = &flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
bio_uninit(ci.bio);
/* dec_pending submits any data associated with flush */
} else {
struct dm_target_io *tio;
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
if (__process_abnormal_io(&ci, ti, &error))
goto out;
tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
ret = __clone_and_map_simple_bio(&ci, tio, NULL);
}
out:
/* drop the extra reference count */
dec_pending(ci.io, errno_to_blk_status(error));
return ret;
}
static blk_qc_t dm_process_bio(struct mapped_device *md,
struct dm_table *map, struct bio *bio)
{
@ -1788,8 +1735,6 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
/* regular IO is split by __split_and_process_bio */
}
if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
return __process_bio(md, map, bio, ti);
return __split_and_process_bio(md, map, bio);
}
@ -2185,12 +2130,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
if (request_based)
dm_stop_queue(q);
if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
if (request_based) {
/*
* Leverage the fact that request-based DM targets and
* NVMe bio based targets are immutable singletons
* - used to optimize both dm_request_fn and dm_mq_queue_rq;
* and __process_bio.
* Leverage the fact that request-based DM targets are
* immutable singletons - used to optimize dm_mq_queue_rq.
*/
md->immutable_target = dm_table_get_immutable_target(t);
}
@ -2479,7 +2422,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
break;
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
case DM_TYPE_NVME_BIO_BASED:
dm_init_congested_fn(md);
break;
case DM_TYPE_NONE:
@ -3222,7 +3164,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
switch (type) {
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
case DM_TYPE_NVME_BIO_BASED:
pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);

View File

@ -772,9 +772,13 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
dev->qmult = qmult;
snprintf(net->name, sizeof(net->name), "%s%%d", netname);
if (get_ether_addr(dev_addr, net->dev_addr))
if (get_ether_addr(dev_addr, net->dev_addr)) {
net->addr_assign_type = NET_ADDR_RANDOM;
dev_warn(&g->dev,
"using random %s ethernet address\n", "self");
} else {
net->addr_assign_type = NET_ADDR_SET;
}
if (get_ether_addr(host_addr, dev->host_mac))
dev_warn(&g->dev,
"using random %s ethernet address\n", "host");
@ -831,6 +835,9 @@ struct net_device *gether_setup_name_default(const char *netname)
INIT_LIST_HEAD(&dev->tx_reqs);
INIT_LIST_HEAD(&dev->rx_reqs);
/* by default we always have a random MAC address */
net->addr_assign_type = NET_ADDR_RANDOM;
skb_queue_head_init(&dev->rx_frames);
/* network device setup */
@ -868,7 +875,6 @@ int gether_register_netdev(struct net_device *net)
g = dev->gadget;
memcpy(net->dev_addr, dev->dev_mac, ETH_ALEN);
net->addr_assign_type = NET_ADDR_RANDOM;
status = register_netdev(net);
if (status < 0) {
@ -908,6 +914,7 @@ int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
if (get_ether_addr(dev_addr, new_addr))
return -EINVAL;
memcpy(dev->dev_mac, new_addr, ETH_ALEN);
net->addr_assign_type = NET_ADDR_SET;
return 0;
}
EXPORT_SYMBOL_GPL(gether_set_dev_addr);

View File

@ -29,7 +29,6 @@ enum dm_queue_mode {
DM_TYPE_BIO_BASED = 1,
DM_TYPE_REQUEST_BASED = 2,
DM_TYPE_DAX_BIO_BASED = 3,
DM_TYPE_NVME_BIO_BASED = 4,
};
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;

View File

@ -675,12 +675,14 @@ EXPORT_SYMBOL_GPL(inet_unhash);
* Note that we use 32bit integers (vs RFC 'short integers')
* because 2^16 is not a multiple of num_ephemeral and this
* property might be used by clever attacker.
* RFC claims using TABLE_LENGTH=10 buckets gives an improvement,
* we use 256 instead to really give more isolation and
* privacy, this only consumes 1 KB of kernel memory.
* RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
* attacks were since demonstrated, thus we use 65536 instead to really
* give more isolation and privacy, at the expense of 256kB of kernel
* memory.
*/
#define INET_TABLE_PERTURB_SHIFT 8
static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT];
#define INET_TABLE_PERTURB_SHIFT 16
#define INET_TABLE_PERTURB_SIZE (1 << INET_TABLE_PERTURB_SHIFT)
static u32 *table_perturb;
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u64 port_offset,
@ -723,10 +725,11 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
if (likely(remaining > 1))
remaining &= ~1U;
net_get_random_once(table_perturb, sizeof(table_perturb));
index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT);
net_get_random_once(table_perturb,
INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
offset = READ_ONCE(table_perturb[index]) + port_offset;
offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
offset %= remaining;
/* In first pass we try ports of @low parity.
@ -782,6 +785,12 @@ next_port:
return -EADDRNOTAVAIL;
ok:
/* Here we want to add a little bit of randomness to the next source
* port that will be chosen. We use a max() with a random here so that
* on low contention the randomness is maximal and on high contention
* it may be inexistent.
*/
i = max_t(int, i, (prandom_u32() & 7) * 2);
WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
/* Head lock still held and bh's disabled */
@ -855,6 +864,12 @@ void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
low_limit,
high_limit);
init_hashinfo_lhash2(h);
/* this one is used for source ports of outgoing connections */
table_perturb = kmalloc_array(INET_TABLE_PERTURB_SIZE,
sizeof(*table_perturb), GFP_KERNEL);
if (!table_perturb)
panic("TCP: failed to alloc table_perturb");
}
int inet_hashinfo2_init_mod(struct inet_hashinfo *h)