This is the 5.4.197 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmKdn9MACgkQONu9yGCS
 aT4bpg//X6xA3rkYaTO2ymBqhqwvZ47OvS5M6dL4N5wTKhkuGk89Ij4XDI/z65lm
 WeaRbVhvFbmnjm44lLvP8Ly5rHk/PTOVWqtGkbsFe05CStZb9VThJ9Eqrl2RfY1h
 QswWZe1V3QquBCMaXVP85VjvsUmqxRz1Sq1XL7u3DE56LRzzjgBBFO6Iu3+tfI4X
 6e1FOQec9nn2TRT3EQdYWy11EZawnoir5YF7wz1ao3epUu57xRLJogsaovA8Zu1l
 xJPpss0wro+9aB1VSwrpSggDtYqDOnarKBst0Q2i43wSPOlcjy7DcqYpnn9JaTKf
 zGchCQf/Zz0GrAuASqkwtc9ap3OhGNMv2x6RTtiH2MrKs8bH8/iQAhwwAvkgxQQg
 yAUw7DxB4L069dOYXoQQw+jPH1YIsqY4k6w9KtmyjcA8S5FJh2jHuhOw5lWL9y4+
 0Gz1LjoF59ZZZh49cNmKKuSmfFN3xO8CpWvz3qvd/2kjzus/dJXs7gq9+PS9uCC6
 e6SccW9vp0RZ9yhSy59xHuyhEZQdeU2Wp5RGcyUEN0mXdXFGaj0asWouu2H/gm1y
 coWZpcOlALk+uaP9+tYc+kLFCash98xhX10mNzb+nHODM/Mwxh+tzGbtOmQ8UuNX
 KkUqXjGz3AXQZzxKOUePvq9mrAFkEkdIKZDOjdwQvoEq+iQg9Lc=
 =JE5x
 -----END PGP SIGNATURE-----

Merge 5.4.197 into android11-5.4-lts

Changes in 5.4.197
	lockdown: also lock down previous kgdb use
	x86/pci/xen: Disable PCI/MSI[-X] masking for XEN_HVM guests
	staging: rtl8723bs: prevent ->Ssid overflow in rtw_wx_set_scan()
	Input: goodix - fix spurious key release events
	tcp: change source port randomizarion at connect() time
	secure_seq: use the 64 bits of the siphash for port offset calculation
	media: vim2m: Register video device after setting up internals
	media: vim2m: initialize the media device earlier
	ACPI: sysfs: Make sparse happy about address space in use
	ACPI: sysfs: Fix BERT error region memory mapping
	pinctrl: sunxi: fix f1c100s uart2 function
	net: af_key: check encryption module availability consistency
	net: ftgmac100: Disable hardware checksum on AST2600
	i2c: ismt: Provide a DMA buffer for Interrupt Cause Logging
	drivers: i2c: thunderx: Allow driver to work with ACPI defined TWSI controllers
	assoc_array: Fix BUG_ON during garbage collect
	cfg80211: set custom regdomain after wiphy registration
	drm/i915: Fix -Wstringop-overflow warning in call to intel_read_wm_latency()
	exec: Force single empty string when argv is empty
	netfilter: conntrack: re-fetch conntrack after insertion
	crypto: ecrdsa - Fix incorrect use of vli_cmp
	zsmalloc: fix races between asynchronous zspage free and page migration
	dm integrity: fix error code in dm_integrity_ctr()
	dm crypt: make printing of the key constant-time
	dm stats: add cond_resched when looping over entries
	dm verity: set DM_TARGET_IMMUTABLE feature flag
	raid5: introduce MD_BROKEN
	HID: multitouch: Add support for Google Whiskers Touchpad
	tpm: Fix buffer access in tpm2_get_tpm_pt()
	tpm: ibmvtpm: Correct the return value in tpm_ibmvtpm_probe()
	docs: submitting-patches: Fix crossref to 'The canonical patch format'
	NFS: Memory allocation failures are not server fatal errors
	NFSD: Fix possible sleep during nfsd4_release_lockowner()
	bpf: Enlarge offset check value to INT_MAX in bpf_skb_{load,store}_bytes
	Linux 5.4.197

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I1b3af44df22d11027b65b96eaac53fe2c3b5db92
This commit is contained in:
Greg Kroah-Hartman 2022-06-06 11:06:30 +02:00
commit 9eae8fc396
40 changed files with 327 additions and 97 deletions

View File

@ -133,7 +133,7 @@ as you intend it to.
The maintainer will thank you if you write your patch description in a
form which can be easily pulled into Linux's source code management
system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`.
system, ``git``, as a "commit log". See :ref:`the_canonical_patch_format`.
Solve only one problem per patch. If your description starts to get
long, that's a sign that you probably need to split up your patch.

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 196
SUBLEVEL = 197
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -442,6 +442,11 @@ void __init xen_msi_init(void)
x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
/*
* With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely
* controlled by the hypervisor.
*/
pci_msi_ignore_mask = 1;
}
#endif

View File

@ -112,15 +112,15 @@ static int ecrdsa_verify(struct akcipher_request *req)
/* Step 1: verify that 0 < r < q, 0 < s < q */
if (vli_is_zero(r, ndigits) ||
vli_cmp(r, ctx->curve->n, ndigits) == 1 ||
vli_cmp(r, ctx->curve->n, ndigits) >= 0 ||
vli_is_zero(s, ndigits) ||
vli_cmp(s, ctx->curve->n, ndigits) == 1)
vli_cmp(s, ctx->curve->n, ndigits) >= 0)
return -EKEYREJECTED;
/* Step 2: calculate hash (h) of the message (passed as input) */
/* Step 3: calculate e = h \mod q */
vli_from_le64(e, digest, ndigits);
if (vli_cmp(e, ctx->curve->n, ndigits) == 1)
if (vli_cmp(e, ctx->curve->n, ndigits) >= 0)
vli_sub(e, e, ctx->curve->n, ndigits);
if (vli_is_zero(e, ndigits))
e[0] = 1;
@ -136,7 +136,7 @@ static int ecrdsa_verify(struct akcipher_request *req)
/* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */
ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key,
ctx->curve);
if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1)
if (vli_cmp(cc.x, ctx->curve->n, ndigits) >= 0)
vli_sub(cc.x, cc.x, ctx->curve->n, ndigits);
/* Step 7: if R == r signature is valid */

View File

@ -439,18 +439,29 @@ static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
{
struct acpi_data_attr *data_attr;
void __iomem *base;
ssize_t rc;
ssize_t size;
data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
size = data_attr->attr.size;
base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
if (offset < 0)
return -EINVAL;
if (offset >= size)
return 0;
if (count > size - offset)
count = size - offset;
base = acpi_os_map_iomem(data_attr->addr, size);
if (!base)
return -ENOMEM;
rc = memory_read_from_buffer(buf, count, &offset, base,
data_attr->attr.size);
acpi_os_unmap_memory(base, data_attr->attr.size);
return rc;
memcpy_fromio(buf, base + offset, count);
acpi_os_unmap_iomem(base, size);
return count;
}
static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)

View File

@ -706,7 +706,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
if (!rc) {
out = (struct tpm2_get_cap_out *)
&buf.data[TPM_HEADER_SIZE];
*value = be32_to_cpu(out->value);
/*
* To prevent failing boot up of some systems, Infineon TPM2.0
* returns SUCCESS on TPM2_Startup in field upgrade mode. Also
* the TPM2_Getcapability command returns a zero length list
* in field upgrade mode.
*/
if (be32_to_cpu(out->property_cnt) > 0)
*value = be32_to_cpu(out->value);
else
rc = -ENODATA;
}
tpm_buf_destroy(&buf);
return rc;

View File

@ -685,6 +685,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
ibmvtpm->rtce_buf != NULL,
HZ)) {
rc = -ENODEV;
dev_err(dev, "CRQ response timed out\n");
goto init_irq_cleanup;
}

View File

@ -2822,7 +2822,7 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
}
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
u16 wm[8])
u16 wm[])
{
struct intel_uncore *uncore = &dev_priv->uncore;

View File

@ -2158,6 +2158,9 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_GOOGLE,
HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE,
USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) },
{ .driver_data = MT_CLS_GOOGLE,
HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE,
USB_DEVICE_ID_GOOGLE_WHISKERS) },
/* Generic MT device */
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },

View File

@ -81,6 +81,7 @@
#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
#define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
#define ISMT_LOG_ENTRIES 3 /* number of interrupt cause log entries */
/* Hardware Descriptor Constants - Control Field */
#define ISMT_DESC_CWRL 0x01 /* Command/Write Length */
@ -174,6 +175,8 @@ struct ismt_priv {
u8 head; /* ring buffer head pointer */
struct completion cmp; /* interrupt completion */
u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */
dma_addr_t log_dma;
u32 *log;
};
/**
@ -408,6 +411,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
memset(desc, 0, sizeof(struct ismt_desc));
desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
/* Always clear the log entries */
memset(priv->log, 0, ISMT_LOG_ENTRIES * sizeof(u32));
/* Initialize common control bits */
if (likely(pci_dev_msi_enabled(priv->pci_dev)))
desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
@ -697,6 +703,8 @@ static void ismt_hw_init(struct ismt_priv *priv)
/* initialize the Master Descriptor Base Address (MDBA) */
writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA);
writeq(priv->log_dma, priv->smba + ISMT_GR_SMTICL);
/* initialize the Master Control Register (MCTRL) */
writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL);
@ -784,6 +792,12 @@ static int ismt_dev_init(struct ismt_priv *priv)
priv->head = 0;
init_completion(&priv->cmp);
priv->log = dmam_alloc_coherent(&priv->pci_dev->dev,
ISMT_LOG_ENTRIES * sizeof(u32),
&priv->log_dma, GFP_KERNEL);
if (!priv->log)
return -ENOMEM;
return 0;
}

View File

@ -208,6 +208,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
i2c->adap.dev.parent = dev;
i2c->adap.dev.of_node = pdev->dev.of_node;
i2c->adap.dev.fwnode = dev->fwnode;
snprintf(i2c->adap.name, sizeof(i2c->adap.name),
"Cavium ThunderX i2c adapter at %s", dev_name(dev));
i2c_set_adapdata(&i2c->adap, i2c);

View File

@ -335,7 +335,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
* The Goodix panel will send spurious interrupts after a
* 'finger up' event, which will always cause a timeout.
*/
return 0;
return -ENOMSG;
}
static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data)

View File

@ -2817,6 +2817,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
static char hex2asc(unsigned char c)
{
return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
}
static void crypt_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
@ -2835,9 +2840,12 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
if (cc->key_size > 0) {
if (cc->key_string)
DMEMIT(":%u:%s", cc->key_size, cc->key_string);
else
for (i = 0; i < cc->key_size; i++)
DMEMIT("%02x", cc->key[i]);
else {
for (i = 0; i < cc->key_size; i++) {
DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
hex2asc(cc->key[i] & 0xf));
}
}
} else
DMEMIT("-");

View File

@ -4149,8 +4149,6 @@ try_smaller_buffer:
}
if (should_write_sb) {
int r;
init_journal(ic, 0, ic->journal_sections, 0);
r = dm_integrity_failed(ic);
if (unlikely(r)) {

View File

@ -224,6 +224,7 @@ void dm_stats_cleanup(struct dm_stats *stats)
atomic_read(&shared->in_flight[READ]),
atomic_read(&shared->in_flight[WRITE]));
}
cond_resched();
}
dm_stat_free(&s->rcu_head);
}
@ -313,6 +314,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
for (ni = 0; ni < n_entries; ni++) {
atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
cond_resched();
}
if (s->n_histogram_entries) {
@ -325,6 +327,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
for (ni = 0; ni < n_entries; ni++) {
s->stat_shared[ni].tmp.histogram = hi;
hi += s->n_histogram_entries + 1;
cond_resched();
}
}
@ -345,6 +348,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
for (ni = 0; ni < n_entries; ni++) {
p[ni].histogram = hi;
hi += s->n_histogram_entries + 1;
cond_resched();
}
}
}
@ -474,6 +478,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
}
DMEMIT("\n");
}
cond_resched();
}
mutex_unlock(&stats->mutex);
@ -750,6 +755,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
local_irq_enable();
}
}
cond_resched();
}
}
@ -865,6 +871,8 @@ static int dm_stats_print(struct dm_stats *stats, int id,
if (unlikely(sz + 1 >= maxlen))
goto buffer_overflow;
cond_resched();
}
if (clear)

View File

@ -1221,6 +1221,7 @@ bad:
static struct target_type verity_target = {
.name = "verity",
.features = DM_TARGET_IMMUTABLE,
.version = {1, 5, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,

View File

@ -609,17 +609,17 @@ int raid5_calc_degraded(struct r5conf *conf)
return degraded;
}
static int has_failed(struct r5conf *conf)
static bool has_failed(struct r5conf *conf)
{
int degraded;
int degraded = conf->mddev->degraded;
if (conf->mddev->reshape_position == MaxSector)
return conf->mddev->degraded > conf->max_degraded;
if (test_bit(MD_BROKEN, &conf->mddev->flags))
return true;
degraded = raid5_calc_degraded(conf);
if (degraded > conf->max_degraded)
return 1;
return 0;
if (conf->mddev->reshape_position != MaxSector)
degraded = raid5_calc_degraded(conf);
return degraded > conf->max_degraded;
}
struct stripe_head *
@ -2679,34 +2679,31 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
unsigned long flags;
pr_debug("raid456: error called\n");
pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n",
mdname(mddev), bdevname(rdev->bdev, b));
spin_lock_irqsave(&conf->device_lock, flags);
if (test_bit(In_sync, &rdev->flags) &&
mddev->degraded == conf->max_degraded) {
/*
* Don't allow to achieve failed state
* Don't try to recover this device
*/
conf->recovery_disabled = mddev->recovery_disabled;
spin_unlock_irqrestore(&conf->device_lock, flags);
return;
}
set_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
mddev->degraded = raid5_calc_degraded(conf);
if (has_failed(conf)) {
set_bit(MD_BROKEN, &conf->mddev->flags);
conf->recovery_disabled = mddev->recovery_disabled;
pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n",
mdname(mddev), mddev->degraded, conf->raid_disks);
} else {
pr_crit("md/raid:%s: Operation continuing on %d devices.\n",
mdname(mddev), conf->raid_disks - mddev->degraded);
}
spin_unlock_irqrestore(&conf->device_lock, flags);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(Blocked, &rdev->flags);
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
"md/raid:%s: Operation continuing on %d devices.\n",
mdname(mddev),
bdevname(rdev->bdev, b),
mdname(mddev),
conf->raid_disks - mddev->degraded);
r5c_update_on_rdev_error(mddev, rdev);
}

View File

@ -1333,12 +1333,6 @@ static int vim2m_probe(struct platform_device *pdev)
vfd->lock = &dev->dev_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
goto error_v4l2;
}
video_set_drvdata(vfd, dev);
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
@ -1361,12 +1355,20 @@ static int vim2m_probe(struct platform_device *pdev)
media_device_init(&dev->mdev);
dev->mdev.ops = &m2m_media_ops;
dev->v4l2_dev.mdev = &dev->mdev;
#endif
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
goto error_m2m;
}
#ifdef CONFIG_MEDIA_CONTROLLER
ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
MEDIA_ENT_F_PROC_VIDEO_SCALER);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
goto error_dev;
goto error_v4l2;
}
ret = media_device_register(&dev->mdev);
@ -1381,11 +1383,13 @@ static int vim2m_probe(struct platform_device *pdev)
error_m2m_mc:
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
#endif
error_dev:
error_v4l2:
video_unregister_device(&dev->vfd);
/* vim2m_device_release called by video_unregister_device to release various objects */
return ret;
error_v4l2:
error_m2m:
v4l2_m2m_release(dev->m2m_dev);
error_dev:
v4l2_device_unregister(&dev->v4l2_dev);
error_free:
kfree(dev);

View File

@ -1880,6 +1880,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* AST2400 doesn't have working HW checksum generation */
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
netdev->hw_features &= ~NETIF_F_HW_CSUM;
/* AST2600 tx checksum with NCSI is broken */
if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
netdev->hw_features &= ~NETIF_F_HW_CSUM;
if (np && of_get_property(np, "no-hw-checksum", NULL))
netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
netdev->features |= netdev->hw_features;

View File

@ -204,7 +204,7 @@ static const struct sunxi_desc_pin suniv_f1c100s_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd"), /* D20 */
SUNXI_FUNCTION(0x3, "lvds1"), /* RX */
SUNXI_FUNCTION(0x3, "uart2"), /* RX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),

View File

@ -1351,9 +1351,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
sec_len = *(pos++); len-= 1;
if (sec_len>0 && sec_len<=len) {
if (sec_len > 0 &&
sec_len <= len &&
sec_len <= 32) {
ssid[ssid_index].SsidLength = sec_len;
memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
memcpy(ssid[ssid_index].Ssid, pos, sec_len);
/* DBG_871X("%s COMBO_SCAN with specific ssid:%s, %d\n", __func__ */
/* , ssid[ssid_index].Ssid, ssid[ssid_index].SsidLength); */
ssid_index++;

View File

@ -454,6 +454,9 @@ static int prepare_arg_pages(struct linux_binprm *bprm,
unsigned long limit, ptr_size;
bprm->argc = count(argv, MAX_ARG_STRINGS);
if (bprm->argc == 0)
pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n",
current->comm, bprm->filename);
if (bprm->argc < 0)
return bprm->argc;
@ -482,8 +485,14 @@ static int prepare_arg_pages(struct linux_binprm *bprm,
* the stack. They aren't stored until much later when we can't
* signal to the parent that the child has run out of stack space.
* Instead, calculate it here so it's possible to fail gracefully.
*
* In the case of argc = 0, make sure there is space for adding a
* empty string (which will bump argc to 1), to ensure confused
* userspace programs don't start processing from argv[1], thinking
* argc can never be 0, to keep them from walking envp by accident.
* See do_execveat_common().
*/
ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
ptr_size = (max(bprm->argc, 1) + bprm->envc) * sizeof(void *);
if (limit <= ptr_size)
return -E2BIG;
limit -= ptr_size;
@ -1832,6 +1841,20 @@ static int __do_execve_file(int fd, struct filename *filename,
if (retval < 0)
goto out;
/*
* When argv is empty, add an empty string ("") as argv[0] to
* ensure confused userspace programs that start processing
* from argv[1] won't end up walking envp. See also
* bprm_stack_limits().
*/
if (bprm->argc == 0) {
const char *argv[] = { "", NULL };
retval = copy_strings_kernel(1, argv, bprm);
if (retval < 0)
goto out;
bprm->argc = 1;
}
retval = exec_binprm(bprm);
if (retval < 0)
goto out;

View File

@ -775,6 +775,7 @@ static inline bool nfs_error_is_fatal_on_server(int err)
case 0:
case -ERESTARTSYS:
case -EINTR:
case -ENOMEM:
return false;
}
return nfs_error_is_fatal(err);

View File

@ -6894,16 +6894,12 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
if (sop->so_is_open_owner || !same_owner_str(sop, owner))
continue;
/* see if there are still any locks associated with it */
lo = lockowner(sop);
list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
if (check_for_locks(stp->st_stid.sc_file, lo)) {
status = nfserr_locks_held;
spin_unlock(&clp->cl_lock);
return status;
}
if (atomic_read(&sop->so_count) != 1) {
spin_unlock(&clp->cl_lock);
return nfserr_locks_held;
}
lo = lockowner(sop);
nfs4_get_stateowner(sop);
break;
}

View File

@ -118,10 +118,12 @@ enum lockdown_reason {
LOCKDOWN_MMIOTRACE,
LOCKDOWN_DEBUGFS,
LOCKDOWN_XMON_WR,
LOCKDOWN_DBG_WRITE_KERNEL,
LOCKDOWN_INTEGRITY_MAX,
LOCKDOWN_KCORE,
LOCKDOWN_KPROBES,
LOCKDOWN_BPF_READ,
LOCKDOWN_DBG_READ_KERNEL,
LOCKDOWN_PERF,
LOCKDOWN_TRACEFS,
LOCKDOWN_XMON_RW,

View File

@ -420,7 +420,7 @@ static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
}
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u32 port_offset,
struct sock *sk, u64 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16,
struct inet_timewait_sock **));

View File

@ -59,8 +59,13 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
int ret = NF_ACCEPT;
if (ct) {
if (!nf_ct_is_confirmed(ct))
if (!nf_ct_is_confirmed(ct)) {
ret = __nf_conntrack_confirm(skb);
if (ret == NF_ACCEPT)
ct = (struct nf_conn *)skb_nfct(skb);
}
if (likely(ret == NF_ACCEPT))
nf_ct_deliver_cached_events(ct);
}

View File

@ -4,8 +4,8 @@
#include <linux/types.h>
u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport);
u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);

View File

@ -56,6 +56,7 @@
#include <linux/vmacache.h>
#include <linux/rcupdate.h>
#include <linux/irq.h>
#include <linux/security.h>
#include <asm/cacheflush.h>
#include <asm/byteorder.h>
@ -685,6 +686,29 @@ cpu_master_loop:
continue;
kgdb_connected = 0;
} else {
/*
* This is a brutal way to interfere with the debugger
* and prevent gdb being used to poke at kernel memory.
* This could cause trouble if lockdown is applied when
* there is already an active gdb session. For now the
* answer is simply "don't do that". Typically lockdown
* *will* be applied before the debug core gets started
* so only developers using kgdb for fairly advanced
* early kernel debug can be biten by this. Hopefully
* they are sophisticated enough to take care of
* themselves, especially with help from the lockdown
* message printed on the console!
*/
if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) {
if (IS_ENABLED(CONFIG_KGDB_KDB)) {
/* Switch back to kdb if possible... */
dbg_kdb_mode = 1;
continue;
} else {
/* ... otherwise just bail */
break;
}
}
error = gdb_serial_stub(ks);
}

View File

@ -45,6 +45,7 @@
#include <linux/proc_fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/security.h>
#include "kdb_private.h"
#undef MODULE_PARAM_PREFIX
@ -198,10 +199,62 @@ struct task_struct *kdb_curr_task(int cpu)
}
/*
* Check whether the flags of the current command and the permissions
* of the kdb console has allow a command to be run.
* Update the permissions flags (kdb_cmd_enabled) to match the
* current lockdown state.
*
* Within this function the calls to security_locked_down() are "lazy". We
* avoid calling them if the current value of kdb_cmd_enabled already excludes
* flags that might be subject to lockdown. Additionally we deliberately check
* the lockdown flags independently (even though read lockdown implies write
* lockdown) since that results in both simpler code and clearer messages to
* the user on first-time debugger entry.
*
* The permission masks during a read+write lockdown permits the following
* flags: INSPECT, SIGNAL, REBOOT (and ALWAYS_SAFE).
*
* The INSPECT commands are not blocked during lockdown because they are
* not arbitrary memory reads. INSPECT covers the backtrace family (sometimes
* forcing them to have no arguments) and lsmod. These commands do expose
* some kernel state but do not allow the developer seated at the console to
* choose what state is reported. SIGNAL and REBOOT should not be controversial,
* given these are allowed for root during lockdown already.
*/
static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
static void kdb_check_for_lockdown(void)
{
const int write_flags = KDB_ENABLE_MEM_WRITE |
KDB_ENABLE_REG_WRITE |
KDB_ENABLE_FLOW_CTRL;
const int read_flags = KDB_ENABLE_MEM_READ |
KDB_ENABLE_REG_READ;
bool need_to_lockdown_write = false;
bool need_to_lockdown_read = false;
if (kdb_cmd_enabled & (KDB_ENABLE_ALL | write_flags))
need_to_lockdown_write =
security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL);
if (kdb_cmd_enabled & (KDB_ENABLE_ALL | read_flags))
need_to_lockdown_read =
security_locked_down(LOCKDOWN_DBG_READ_KERNEL);
/* De-compose KDB_ENABLE_ALL if required */
if (need_to_lockdown_write || need_to_lockdown_read)
if (kdb_cmd_enabled & KDB_ENABLE_ALL)
kdb_cmd_enabled = KDB_ENABLE_MASK & ~KDB_ENABLE_ALL;
if (need_to_lockdown_write)
kdb_cmd_enabled &= ~write_flags;
if (need_to_lockdown_read)
kdb_cmd_enabled &= ~read_flags;
}
/*
* Check whether the flags of the current command, the permissions of the kdb
* console and the lockdown state allow a command to be run.
*/
static bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
bool no_args)
{
/* permissions comes from userspace so needs massaging slightly */
@ -1188,6 +1241,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
kdb_curr_task(raw_smp_processor_id());
KDB_DEBUG_STATE("kdb_local 1", reason);
kdb_check_for_lockdown();
kdb_go_count = 0;
if (reason == KDB_REASON_DEBUG) {
/* special case below */

View File

@ -1462,6 +1462,7 @@ int assoc_array_gc(struct assoc_array *array,
struct assoc_array_ptr *cursor, *ptr;
struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
unsigned long nr_leaves_on_tree;
bool retained;
int keylen, slot, nr_free, next_slot, i;
pr_devel("-->%s()\n", __func__);
@ -1538,6 +1539,7 @@ continue_node:
goto descend;
}
retry_compress:
pr_devel("-- compress node %p --\n", new_n);
/* Count up the number of empty slots in this node and work out the
@ -1555,6 +1557,7 @@ continue_node:
pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
/* See what we can fold in */
retained = false;
next_slot = 0;
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
struct assoc_array_shortcut *s;
@ -1604,9 +1607,14 @@ continue_node:
pr_devel("[%d] retain node %lu/%d [nx %d]\n",
slot, child->nr_leaves_on_branch, nr_free + 1,
next_slot);
retained = true;
}
}
if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
pr_devel("internal nodes remain despite enough space, retrying\n");
goto retry_compress;
}
pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
nr_leaves_on_tree = new_n->nr_leaves_on_branch;

View File

@ -1748,11 +1748,40 @@ static enum fullness_group putback_zspage(struct size_class *class,
*/
static void lock_zspage(struct zspage *zspage)
{
struct page *page = get_first_page(zspage);
struct page *curr_page, *page;
do {
lock_page(page);
} while ((page = get_next_page(page)) != NULL);
/*
* Pages we haven't locked yet can be migrated off the list while we're
* trying to lock them, so we need to be careful and only attempt to
* lock each page under migrate_read_lock(). Otherwise, the page we lock
* may no longer belong to the zspage. This means that we may wait for
* the wrong page to unlock, so we must take a reference to the page
* prior to waiting for it to unlock outside migrate_read_lock().
*/
while (1) {
migrate_read_lock(zspage);
page = get_first_page(zspage);
if (trylock_page(page))
break;
get_page(page);
migrate_read_unlock(zspage);
wait_on_page_locked(page);
put_page(page);
}
curr_page = page;
while ((page = get_next_page(curr_page))) {
if (trylock_page(page)) {
curr_page = page;
} else {
get_page(page);
migrate_read_unlock(zspage);
wait_on_page_locked(page);
put_page(page);
migrate_read_lock(zspage);
}
}
migrate_read_unlock(zspage);
}
static int zs_init_fs_context(struct fs_context *fc)

View File

@ -1668,7 +1668,7 @@ BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
return -EINVAL;
if (unlikely(offset > 0xffff))
if (unlikely(offset > INT_MAX))
return -EFAULT;
if (unlikely(bpf_try_make_writable(skb, offset + len)))
return -EFAULT;
@ -1703,7 +1703,7 @@ BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
{
void *ptr;
if (unlikely(offset > 0xffff))
if (unlikely(offset > INT_MAX))
goto err_clear;
ptr = skb_header_pointer(skb, offset, len, to);

View File

@ -97,7 +97,7 @@ u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
}
EXPORT_SYMBOL(secure_tcpv6_seq);
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport)
{
const struct {
@ -147,7 +147,7 @@ u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
}
EXPORT_SYMBOL_GPL(secure_tcp_seq);
u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
{
net_secret_init();
return siphash_4u32((__force u32)saddr, (__force u32)daddr,

View File

@ -464,7 +464,7 @@ not_unique:
return -EADDRNOTAVAIL;
}
static u32 inet_sk_port_offset(const struct sock *sk)
static u64 inet_sk_port_offset(const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
@ -671,8 +671,19 @@ unlock:
}
EXPORT_SYMBOL_GPL(inet_unhash);
/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm
* Note that we use 32bit integers (vs RFC 'short integers')
* because 2^16 is not a multiple of num_ephemeral and this
* property might be used by clever attacker.
* RFC claims using TABLE_LENGTH=10 buckets gives an improvement,
* we use 256 instead to really give more isolation and
* privacy, this only consumes 1 KB of kernel memory.
*/
#define INET_TABLE_PERTURB_SHIFT 8
static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT];
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u32 port_offset,
struct sock *sk, u64 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16, struct inet_timewait_sock **))
{
@ -684,8 +695,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct inet_bind_bucket *tb;
u32 remaining, offset;
int ret, i, low, high;
static u32 hint;
int l3mdev;
u32 index;
if (port) {
head = &hinfo->bhash[inet_bhashfn(net, port,
@ -712,7 +723,12 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
if (likely(remaining > 1))
remaining &= ~1U;
offset = (hint + port_offset) % remaining;
net_get_random_once(table_perturb, sizeof(table_perturb));
index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT);
offset = READ_ONCE(table_perturb[index]) + port_offset;
offset %= remaining;
/* In first pass we try ports of @low parity.
* inet_csk_get_port() does the opposite choice.
*/
@ -766,7 +782,7 @@ next_port:
return -EADDRNOTAVAIL;
ok:
hint += i + 2;
WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
/* Head lock still held and bh's disabled */
inet_bind_hash(sk, tb, port);
@ -789,7 +805,7 @@ ok:
int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk)
{
u32 port_offset = 0;
u64 port_offset = 0;
if (!inet_sk(sk)->inet_num)
port_offset = inet_sk_port_offset(sk);

View File

@ -262,7 +262,7 @@ not_unique:
return -EADDRNOTAVAIL;
}
static u32 inet6_sk_port_offset(const struct sock *sk)
static u64 inet6_sk_port_offset(const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
@ -274,7 +274,7 @@ static u32 inet6_sk_port_offset(const struct sock *sk)
int inet6_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk)
{
u32 port_offset = 0;
u64 port_offset = 0;
if (!inet_sk(sk)->inet_num)
port_offset = inet6_sk_port_offset(sk);

View File

@ -2904,7 +2904,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
break;
if (!aalg->pfkey_supported)
continue;
if (aalg_tmpl_set(t, aalg))
if (aalg_tmpl_set(t, aalg) && aalg->available)
sz += sizeof(struct sadb_comb);
}
return sz + sizeof(struct sadb_prop);
@ -2922,7 +2922,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
if (!ealg->pfkey_supported)
continue;
if (!(ealg_tmpl_set(t, ealg)))
if (!(ealg_tmpl_set(t, ealg) && ealg->available))
continue;
for (k = 1; ; k++) {
@ -2933,7 +2933,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
if (!aalg->pfkey_supported)
continue;
if (aalg_tmpl_set(t, aalg))
if (aalg_tmpl_set(t, aalg) && aalg->available)
sz += sizeof(struct sadb_comb);
}
}

View File

@ -5,7 +5,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2020 Intel Corporation
* Copyright (C) 2018-2021 Intel Corporation
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@ -906,9 +906,6 @@ int wiphy_register(struct wiphy *wiphy)
return res;
}
/* set up regulatory info */
wiphy_regulatory_register(wiphy);
list_add_rcu(&rdev->list, &cfg80211_rdev_list);
cfg80211_rdev_list_generation++;
@ -919,6 +916,9 @@ int wiphy_register(struct wiphy *wiphy)
cfg80211_debugfs_rdev_add(rdev);
nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
/* set up regulatory info */
wiphy_regulatory_register(wiphy);
if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) {
struct regulatory_request request;

View File

@ -3792,6 +3792,7 @@ void wiphy_regulatory_register(struct wiphy *wiphy)
wiphy_update_regulatory(wiphy, lr->initiator);
wiphy_all_share_dfs_chan_state(wiphy);
reg_process_self_managed_hints();
}
void wiphy_regulatory_deregister(struct wiphy *wiphy)

View File

@ -33,10 +33,12 @@ static const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
[LOCKDOWN_MMIOTRACE] = "unsafe mmio",
[LOCKDOWN_DEBUGFS] = "debugfs access",
[LOCKDOWN_XMON_WR] = "xmon write access",
[LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM",
[LOCKDOWN_INTEGRITY_MAX] = "integrity",
[LOCKDOWN_KCORE] = "/proc/kcore access",
[LOCKDOWN_KPROBES] = "use of kprobes",
[LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM",
[LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM",
[LOCKDOWN_PERF] = "unsafe use of perf",
[LOCKDOWN_TRACEFS] = "use of tracefs",
[LOCKDOWN_XMON_RW] = "xmon read and write access",