This is the 5.4.82 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl/PSigACgkQONu9yGCS aT6bSw//eDCpWcnLDa1Rt4bOrnO82484ebr1PZeYPfca/3QVS59j8DsVOf6Xklmz z2ponI6SRFxZwO2SmXrfoiOhUVI9Kd3ohTH+LSo3ezpk0klamIf60L914RBc7QFE wmVgOPz5LwLxfkU5a148/H4rwLGlM9oBxVcCXpnLkN03Ul4JM/P6A/T3rFrX8ZkW 3r4NYu3jOHgNz+irosW8zAea+jIf7ALg4Gch3ILwrbM4KSQiyXbAp0mJsY+li7HE BSa1RJHBXkqCwK/mWT4LWuJNf871T656kKr04/rxipRu2lEcGCPghO4DGba1mjqR NdnuMWBjoxetlRAbWOylWT+2ngQNx+E9hFrBxg1+js/mcHvfpeM4EuSK4YCnI7rO 6r5JZqYdw7GGHqvy51JPLx1m+NMt8XhTp5+1vOIZhjtdNrcTMBz0kxIiGbvTwdlb BbO+LDjmBmQYwmTcadbBPPMRLKnvx5bbNtTAzdwkvYEC8ev5RfxebFO/StTbmVRd JIUKkwmNw803OjhMgs+dXVw0lX8C1nLSSROKHf4+lCGFhCDnDhos5DpKpfBIwXxP Xv0Uf1YA4ygFVId+kuJOoXWNBkzB6UOlKMxoU1YcuRwpZHFk8b+MvTAzaCbSSl3A nJT6CK3K3H6WSiF9PC8i85kFJbAJbwifjx904nGBekaqU0bgI+s= =Faec -----END PGP SIGNATURE----- Merge 5.4.82 into android11-5.4-lts Changes in 5.4.82 devlink: Hold rtnl lock while reading netdev attributes ipv6: addrlabel: fix possible memory leak in ip6addrlbl_net_init net/af_iucv: set correct sk_protocol for child sockets net/tls: missing received data after fast remote close net/tls: Protect from calling tls_dev_del for TLS RX twice rose: Fix Null pointer dereference in rose_send_frame() sock: set sk_err to ee_errno on dequeue from errq tcp: Set INET_ECN_xmit configuration in tcp_reinit_congestion_control tun: honor IOCB_NOWAIT flag usbnet: ipheth: fix connectivity with iOS 14 bonding: wait for sysfs kobject destruction before freeing struct slave staging/octeon: fix up merge error ima: extend boot_aggregate with kernel measurements sched/fair: Fix unthrottle_cfs_rq() for leaf_cfs_rq list netfilter: bridge: reset skb->pkt_type after NF_INET_POST_ROUTING traversal ipv4: Fix tos mask in inet_rtm_getroute() dt-bindings: net: correct interrupt flags in examples chelsio/chtls: fix panic during unload reload chtls ibmvnic: Ensure that SCRQ entry reads are correctly ordered ibmvnic: Fix TX completion error handling inet_ecn: Fix endianness of checksum update when setting ECT(1) geneve: pull IP header before ECN decapsulation net: ip6_gre: set dev->hard_header_len when using header_ops net/x25: prevent a couple of overflows cxgb3: fix error return code in t3_sge_alloc_qset() net: pasemi: fix error return code in pasemi_mac_open() vxlan: fix error return code in __vxlan_dev_create() chelsio/chtls: fix a double free in chtls_setkey() net: mvpp2: Fix error return code in mvpp2_open() net: skbuff: ensure LSE is pullable before decrementing the MPLS ttl net: openvswitch: ensure LSE is pullable before reading it net/sched: act_mpls: ensure LSE is pullable before reading it net/mlx5: DR, Proper handling of unsupported Connect-X6DX SW steering net/mlx5: Fix wrong address reclaim when command interface is down ALSA: usb-audio: US16x08: fix value count for level meters Input: xpad - support Ardwiino Controllers Input: i8042 - add ByteSpeed touchpad to noloop table tracing: Remove WARN_ON in start_thread() RDMA/i40iw: Address an mmap handler exploit in i40iw Linux 5.4.82 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ie7c035895e3413f7a58012c372cfc64deb2e6081
This commit is contained in:
commit
5a742c2b56
@ -33,7 +33,7 @@ tcan4x5x: tcan4x5x@0 {
|
||||
spi-max-frequency = <10000000>;
|
||||
bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
|
||||
interrupt-parent = <&gpio1>;
|
||||
interrupts = <14 GPIO_ACTIVE_LOW>;
|
||||
interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
|
||||
device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
|
||||
device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
|
||||
reset-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
|
||||
|
@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with NPC100 NFC controller on I2C2):
|
||||
clock-frequency = <100000>;
|
||||
|
||||
interrupt-parent = <&gpio1>;
|
||||
interrupts = <29 GPIO_ACTIVE_HIGH>;
|
||||
interrupts = <29 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
enable-gpios = <&gpio0 30 GPIO_ACTIVE_HIGH>;
|
||||
firmware-gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>;
|
||||
|
@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with PN544 on I2C2):
|
||||
clock-frequency = <400000>;
|
||||
|
||||
interrupt-parent = <&gpio1>;
|
||||
interrupts = <17 GPIO_ACTIVE_HIGH>;
|
||||
interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
enable-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
|
||||
firmware-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 81
|
||||
SUBLEVEL = 82
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -1077,6 +1077,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
|
||||
sk_setup_caps(newsk, dst);
|
||||
ctx = tls_get_ctx(lsk);
|
||||
newsk->sk_destruct = ctx->sk_destruct;
|
||||
newsk->sk_prot_creator = lsk->sk_prot_creator;
|
||||
csk->sk = newsk;
|
||||
csk->passive_reap_next = oreq;
|
||||
csk->tx_chan = cxgb4_port_chan(ndev);
|
||||
|
@ -365,6 +365,7 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
|
||||
csk->wr_unacked += DIV_ROUND_UP(len, 16);
|
||||
enqueue_wr(csk, skb);
|
||||
cxgb4_ofld_send(csk->egress_dev, skb);
|
||||
skb = NULL;
|
||||
|
||||
chtls_set_scmd(csk);
|
||||
/* Clear quiesce for Rx key */
|
||||
|
@ -54,10 +54,6 @@
|
||||
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
|
||||
__stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
|
||||
|
||||
static int push_mode;
|
||||
module_param(push_mode, int, 0644);
|
||||
MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
|
||||
|
||||
static int debug;
|
||||
module_param(debug, int, 0644);
|
||||
MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
|
||||
@ -1588,7 +1584,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
|
||||
if (status)
|
||||
goto exit;
|
||||
iwdev->obj_next = iwdev->obj_mem;
|
||||
iwdev->push_mode = push_mode;
|
||||
|
||||
init_waitqueue_head(&iwdev->vchnl_waitq);
|
||||
init_waitqueue_head(&dev->vf_reqs);
|
||||
|
@ -168,38 +168,16 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
|
||||
*/
|
||||
static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
{
|
||||
struct i40iw_ucontext *ucontext;
|
||||
u64 db_addr_offset;
|
||||
u64 push_offset;
|
||||
struct i40iw_ucontext *ucontext = to_ucontext(context);
|
||||
u64 dbaddr;
|
||||
|
||||
ucontext = to_ucontext(context);
|
||||
if (ucontext->iwdev->sc_dev.is_pf) {
|
||||
db_addr_offset = I40IW_DB_ADDR_OFFSET;
|
||||
push_offset = I40IW_PUSH_OFFSET;
|
||||
if (vma->vm_pgoff)
|
||||
vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
|
||||
} else {
|
||||
db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
|
||||
push_offset = I40IW_VF_PUSH_OFFSET;
|
||||
if (vma->vm_pgoff)
|
||||
vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
|
||||
}
|
||||
if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
|
||||
dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);
|
||||
|
||||
if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vma->vm_private_data = ucontext;
|
||||
} else {
|
||||
if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
else
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
}
|
||||
|
||||
if (io_remap_pfn_range(vma, vma->vm_start,
|
||||
vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
|
||||
PAGE_SIZE, vma->vm_page_prot))
|
||||
if (io_remap_pfn_range(vma, vma->vm_start, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
|
||||
pgprot_noncached(vma->vm_page_prot)))
|
||||
return -EAGAIN;
|
||||
|
||||
return 0;
|
||||
|
@ -241,6 +241,7 @@ static const struct xpad_device {
|
||||
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
|
||||
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
|
||||
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
|
||||
{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
|
||||
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
|
||||
{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
|
||||
{ 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
|
||||
@ -418,6 +419,7 @@ static const struct usb_device_id xpad_table[] = {
|
||||
XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
|
||||
XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
|
||||
XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
|
||||
XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */
|
||||
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
|
||||
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
|
||||
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
|
||||
|
@ -219,6 +219,10 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
|
||||
},
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
@ -1293,29 +1293,9 @@ static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
|
||||
slave->dev->flags &= ~IFF_SLAVE;
|
||||
}
|
||||
|
||||
static struct slave *bond_alloc_slave(struct bonding *bond)
|
||||
{
|
||||
struct slave *slave = NULL;
|
||||
|
||||
slave = kzalloc(sizeof(*slave), GFP_KERNEL);
|
||||
if (!slave)
|
||||
return NULL;
|
||||
|
||||
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
|
||||
SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
|
||||
GFP_KERNEL);
|
||||
if (!SLAVE_AD_INFO(slave)) {
|
||||
kfree(slave);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
|
||||
|
||||
return slave;
|
||||
}
|
||||
|
||||
static void bond_free_slave(struct slave *slave)
|
||||
static void slave_kobj_release(struct kobject *kobj)
|
||||
{
|
||||
struct slave *slave = to_slave(kobj);
|
||||
struct bonding *bond = bond_get_bond_by_slave(slave);
|
||||
|
||||
cancel_delayed_work_sync(&slave->notify_work);
|
||||
@ -1325,6 +1305,53 @@ static void bond_free_slave(struct slave *slave)
|
||||
kfree(slave);
|
||||
}
|
||||
|
||||
static struct kobj_type slave_ktype = {
|
||||
.release = slave_kobj_release,
|
||||
#ifdef CONFIG_SYSFS
|
||||
.sysfs_ops = &slave_sysfs_ops,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int bond_kobj_init(struct slave *slave)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = kobject_init_and_add(&slave->kobj, &slave_ktype,
|
||||
&(slave->dev->dev.kobj), "bonding_slave");
|
||||
if (err)
|
||||
kobject_put(&slave->kobj);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct slave *bond_alloc_slave(struct bonding *bond,
|
||||
struct net_device *slave_dev)
|
||||
{
|
||||
struct slave *slave = NULL;
|
||||
|
||||
slave = kzalloc(sizeof(*slave), GFP_KERNEL);
|
||||
if (!slave)
|
||||
return NULL;
|
||||
|
||||
slave->bond = bond;
|
||||
slave->dev = slave_dev;
|
||||
|
||||
if (bond_kobj_init(slave))
|
||||
return NULL;
|
||||
|
||||
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
|
||||
SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
|
||||
GFP_KERNEL);
|
||||
if (!SLAVE_AD_INFO(slave)) {
|
||||
kobject_put(&slave->kobj);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
|
||||
|
||||
return slave;
|
||||
}
|
||||
|
||||
static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
|
||||
{
|
||||
info->bond_mode = BOND_MODE(bond);
|
||||
@ -1508,14 +1535,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
|
||||
goto err_undo_flags;
|
||||
}
|
||||
|
||||
new_slave = bond_alloc_slave(bond);
|
||||
new_slave = bond_alloc_slave(bond, slave_dev);
|
||||
if (!new_slave) {
|
||||
res = -ENOMEM;
|
||||
goto err_undo_flags;
|
||||
}
|
||||
|
||||
new_slave->bond = bond;
|
||||
new_slave->dev = slave_dev;
|
||||
/* Set the new_slave's queue_id to be zero. Queue ID mapping
|
||||
* is set via sysfs or module option if desired.
|
||||
*/
|
||||
@ -1837,7 +1862,7 @@ err_restore_mtu:
|
||||
dev_set_mtu(slave_dev, new_slave->original_mtu);
|
||||
|
||||
err_free:
|
||||
bond_free_slave(new_slave);
|
||||
kobject_put(&new_slave->kobj);
|
||||
|
||||
err_undo_flags:
|
||||
/* Enslave of first slave has failed and we need to fix master's mac */
|
||||
@ -2017,7 +2042,7 @@ static int __bond_release_one(struct net_device *bond_dev,
|
||||
if (!netif_is_bond_master(slave_dev))
|
||||
slave_dev->priv_flags &= ~IFF_BONDING;
|
||||
|
||||
bond_free_slave(slave);
|
||||
kobject_put(&slave->kobj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -121,7 +121,6 @@ static const struct slave_attribute *slave_attrs[] = {
|
||||
};
|
||||
|
||||
#define to_slave_attr(_at) container_of(_at, struct slave_attribute, attr)
|
||||
#define to_slave(obj) container_of(obj, struct slave, kobj)
|
||||
|
||||
static ssize_t slave_show(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
@ -132,28 +131,15 @@ static ssize_t slave_show(struct kobject *kobj,
|
||||
return slave_attr->show(slave, buf);
|
||||
}
|
||||
|
||||
static const struct sysfs_ops slave_sysfs_ops = {
|
||||
const struct sysfs_ops slave_sysfs_ops = {
|
||||
.show = slave_show,
|
||||
};
|
||||
|
||||
static struct kobj_type slave_ktype = {
|
||||
#ifdef CONFIG_SYSFS
|
||||
.sysfs_ops = &slave_sysfs_ops,
|
||||
#endif
|
||||
};
|
||||
|
||||
int bond_sysfs_slave_add(struct slave *slave)
|
||||
{
|
||||
const struct slave_attribute **a;
|
||||
int err;
|
||||
|
||||
err = kobject_init_and_add(&slave->kobj, &slave_ktype,
|
||||
&(slave->dev->dev.kobj), "bonding_slave");
|
||||
if (err) {
|
||||
kobject_put(&slave->kobj);
|
||||
return err;
|
||||
}
|
||||
|
||||
for (a = slave_attrs; *a; ++a) {
|
||||
err = sysfs_create_file(&slave->kobj, &((*a)->attr));
|
||||
if (err) {
|
||||
@ -171,6 +157,4 @@ void bond_sysfs_slave_del(struct slave *slave)
|
||||
|
||||
for (a = slave_attrs; *a; ++a)
|
||||
sysfs_remove_file(&slave->kobj, &((*a)->attr));
|
||||
|
||||
kobject_put(&slave->kobj);
|
||||
}
|
||||
|
@ -3176,6 +3176,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
|
||||
GFP_KERNEL | __GFP_COMP);
|
||||
if (!avail) {
|
||||
CH_ALERT(adapter, "free list queue 0 initialization failed\n");
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
if (avail < q->fl[0].size)
|
||||
|
@ -2307,6 +2307,12 @@ restart_poll:
|
||||
|
||||
if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
|
||||
break;
|
||||
/* The queue entry at the current index is peeked at above
|
||||
* to determine that there is a valid descriptor awaiting
|
||||
* processing. We want to be sure that the current slot
|
||||
* holds a valid descriptor before reading its contents.
|
||||
*/
|
||||
dma_rmb();
|
||||
next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
|
||||
rx_buff =
|
||||
(struct ibmvnic_rx_buff *)be64_to_cpu(next->
|
||||
@ -2988,13 +2994,18 @@ restart_loop:
|
||||
unsigned int pool = scrq->pool_index;
|
||||
int num_entries = 0;
|
||||
|
||||
/* The queue entry at the current index is peeked at above
|
||||
* to determine that there is a valid descriptor awaiting
|
||||
* processing. We want to be sure that the current slot
|
||||
* holds a valid descriptor before reading its contents.
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
next = ibmvnic_next_scrq(adapter, scrq);
|
||||
for (i = 0; i < next->tx_comp.num_comps; i++) {
|
||||
if (next->tx_comp.rcs[i]) {
|
||||
if (next->tx_comp.rcs[i])
|
||||
dev_err(dev, "tx error %x\n",
|
||||
next->tx_comp.rcs[i]);
|
||||
continue;
|
||||
}
|
||||
index = be32_to_cpu(next->tx_comp.correlators[i]);
|
||||
if (index & IBMVNIC_TSO_POOL_MASK) {
|
||||
tx_pool = &adapter->tso_pool[pool];
|
||||
@ -3388,6 +3399,11 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
|
||||
}
|
||||
spin_unlock_irqrestore(&scrq->lock, flags);
|
||||
|
||||
/* Ensure that the entire buffer descriptor has been
|
||||
* loaded before reading its contents
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
|
@ -3696,6 +3696,7 @@ static int mvpp2_open(struct net_device *dev)
|
||||
if (!valid) {
|
||||
netdev_err(port->dev,
|
||||
"invalid configuration: no dt or link IRQ");
|
||||
err = -ENOENT;
|
||||
goto err_free_irq;
|
||||
}
|
||||
|
||||
|
@ -339,6 +339,24 @@ out_free:
|
||||
return err;
|
||||
}
|
||||
|
||||
static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
|
||||
u32 npages)
|
||||
{
|
||||
u32 pages_set = 0;
|
||||
unsigned int n;
|
||||
|
||||
for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
|
||||
MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
|
||||
fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
|
||||
pages_set++;
|
||||
|
||||
if (!--npages)
|
||||
break;
|
||||
}
|
||||
|
||||
return pages_set;
|
||||
}
|
||||
|
||||
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
|
||||
u32 *in, int in_size, u32 *out, int out_size)
|
||||
{
|
||||
@ -362,8 +380,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
|
||||
if (fwp->func_id != func_id)
|
||||
continue;
|
||||
|
||||
MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
|
||||
i++;
|
||||
i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
|
||||
}
|
||||
|
||||
MLX5_SET(manage_pages_out, out, output_num_entries, i);
|
||||
|
@ -92,6 +92,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
|
||||
caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager);
|
||||
caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
|
||||
caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
|
||||
caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
|
||||
|
||||
if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) {
|
||||
caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
|
||||
|
@ -223,6 +223,11 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (dmn->info.caps.sw_format_ver != MLX5_STEERING_FORMAT_CONNECTX_5) {
|
||||
mlx5dr_err(dmn, "SW steering is not supported on this device\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
ret = dr_domain_query_fdb_caps(mdev, dmn);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -613,6 +613,7 @@ struct mlx5dr_cmd_caps {
|
||||
u8 max_ft_level;
|
||||
u16 roce_min_src_udp;
|
||||
u8 num_esw_ports;
|
||||
u8 sw_format_ver;
|
||||
bool eswitch_manager;
|
||||
bool rx_sw_owner;
|
||||
bool tx_sw_owner;
|
||||
|
@ -1078,16 +1078,20 @@ static int pasemi_mac_open(struct net_device *dev)
|
||||
|
||||
mac->tx = pasemi_mac_setup_tx_resources(dev);
|
||||
|
||||
if (!mac->tx)
|
||||
if (!mac->tx) {
|
||||
ret = -ENOMEM;
|
||||
goto out_tx_ring;
|
||||
}
|
||||
|
||||
/* We might already have allocated rings in case mtu was changed
|
||||
* before interface was brought up.
|
||||
*/
|
||||
if (dev->mtu > 1500 && !mac->num_cs) {
|
||||
pasemi_mac_setup_csrings(mac);
|
||||
if (!mac->num_cs)
|
||||
if (!mac->num_cs) {
|
||||
ret = -ENOMEM;
|
||||
goto out_tx_ring;
|
||||
}
|
||||
}
|
||||
|
||||
/* Zero out rmon counters */
|
||||
|
@ -254,11 +254,21 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
|
||||
skb_dst_set(skb, &tun_dst->dst);
|
||||
|
||||
/* Ignore packet loops (and multicast echo) */
|
||||
if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
|
||||
geneve->dev->stats.rx_errors++;
|
||||
goto drop;
|
||||
}
|
||||
if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
|
||||
goto rx_error;
|
||||
|
||||
switch (skb_protocol(skb, true)) {
|
||||
case htons(ETH_P_IP):
|
||||
if (pskb_may_pull(skb, sizeof(struct iphdr)))
|
||||
goto rx_error;
|
||||
break;
|
||||
case htons(ETH_P_IPV6):
|
||||
if (pskb_may_pull(skb, sizeof(struct ipv6hdr)))
|
||||
goto rx_error;
|
||||
break;
|
||||
default:
|
||||
goto rx_error;
|
||||
}
|
||||
oiph = skb_network_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
@ -299,6 +309,8 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
}
|
||||
return;
|
||||
rx_error:
|
||||
geneve->dev->stats.rx_errors++;
|
||||
drop:
|
||||
/* Consume bad packet */
|
||||
kfree_skb(skb);
|
||||
|
@ -2028,12 +2028,15 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
struct tun_file *tfile = file->private_data;
|
||||
struct tun_struct *tun = tun_get(tfile);
|
||||
ssize_t result;
|
||||
int noblock = 0;
|
||||
|
||||
if (!tun)
|
||||
return -EBADFD;
|
||||
|
||||
result = tun_get_user(tun, tfile, NULL, from,
|
||||
file->f_flags & O_NONBLOCK, false);
|
||||
if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
|
||||
noblock = 1;
|
||||
|
||||
result = tun_get_user(tun, tfile, NULL, from, noblock, false);
|
||||
|
||||
tun_put(tun);
|
||||
return result;
|
||||
@ -2254,10 +2257,15 @@ static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
struct tun_file *tfile = file->private_data;
|
||||
struct tun_struct *tun = tun_get(tfile);
|
||||
ssize_t len = iov_iter_count(to), ret;
|
||||
int noblock = 0;
|
||||
|
||||
if (!tun)
|
||||
return -EBADFD;
|
||||
ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
|
||||
|
||||
if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
|
||||
noblock = 1;
|
||||
|
||||
ret = tun_do_read(tun, tfile, to, noblock, NULL);
|
||||
ret = min_t(ssize_t, ret, len);
|
||||
if (ret > 0)
|
||||
iocb->ki_pos = ret;
|
||||
|
@ -59,7 +59,7 @@
|
||||
#define IPHETH_USBINTF_SUBCLASS 253
|
||||
#define IPHETH_USBINTF_PROTO 1
|
||||
|
||||
#define IPHETH_BUF_SIZE 1516
|
||||
#define IPHETH_BUF_SIZE 1514
|
||||
#define IPHETH_IP_ALIGN 2 /* padding at front of URB */
|
||||
#define IPHETH_TX_TIMEOUT (5 * HZ)
|
||||
|
||||
|
@ -3617,8 +3617,10 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
|
||||
|
||||
if (dst->remote_ifindex) {
|
||||
remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
|
||||
if (!remote_dev)
|
||||
if (!remote_dev) {
|
||||
err = -ENODEV;
|
||||
goto errout;
|
||||
}
|
||||
|
||||
err = netdev_upper_dev_link(remote_dev, dev, extack);
|
||||
if (err)
|
||||
|
@ -352,10 +352,10 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
skb_dst_set(skb, NULL);
|
||||
skb_ext_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
skb_reset_redirect(skb);
|
||||
|
||||
#ifdef CONFIG_NET_SCHED
|
||||
skb->tc_index = 0;
|
||||
skb_reset_tc(skb);
|
||||
#endif /* CONFIG_NET_SCHED */
|
||||
#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
|
||||
|
||||
|
@ -1139,6 +1139,11 @@ enum mlx5_fc_bulk_alloc_bitmask {
|
||||
|
||||
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
|
||||
|
||||
enum {
|
||||
MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
|
||||
MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
|
||||
};
|
||||
|
||||
struct mlx5_ifc_cmd_hca_cap_bits {
|
||||
u8 reserved_at_0[0x30];
|
||||
u8 vhca_id[0x10];
|
||||
@ -1419,7 +1424,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
||||
|
||||
u8 general_obj_types[0x40];
|
||||
|
||||
u8 reserved_at_440[0x20];
|
||||
u8 reserved_at_440[0x4];
|
||||
u8 steering_format_version[0x4];
|
||||
u8 create_qp_start_hint[0x18];
|
||||
|
||||
u8 reserved_at_460[0x3];
|
||||
u8 log_max_uctx[0x5];
|
||||
|
@ -180,6 +180,11 @@ struct slave {
|
||||
struct rtnl_link_stats64 slave_stats;
|
||||
};
|
||||
|
||||
static inline struct slave *to_slave(struct kobject *kobj)
|
||||
{
|
||||
return container_of(kobj, struct slave, kobj);
|
||||
}
|
||||
|
||||
struct bond_up_slave {
|
||||
unsigned int count;
|
||||
struct rcu_head rcu;
|
||||
@ -743,6 +748,9 @@ extern struct bond_parm_tbl ad_select_tbl[];
|
||||
/* exported from bond_netlink.c */
|
||||
extern struct rtnl_link_ops bond_link_ops;
|
||||
|
||||
/* exported from bond_sysfs_slave.c */
|
||||
extern const struct sysfs_ops slave_sysfs_ops;
|
||||
|
||||
static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
atomic_long_inc(&dev->tx_dropped);
|
||||
|
@ -107,7 +107,7 @@ static inline int IP_ECN_set_ect1(struct iphdr *iph)
|
||||
if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
|
||||
return 0;
|
||||
|
||||
check += (__force u16)htons(0x100);
|
||||
check += (__force u16)htons(0x1);
|
||||
|
||||
iph->check = (__force __sum16)(check + (check>=0xFFFF));
|
||||
iph->tos ^= INET_ECN_MASK;
|
||||
|
@ -222,6 +222,12 @@ enum tls_context_flags {
|
||||
* to be atomic.
|
||||
*/
|
||||
TLS_TX_SYNC_SCHED = 1,
|
||||
/* tls_dev_del was called for the RX side, device state was released,
|
||||
* but tls_ctx->netdev might still be kept, because TX-side driver
|
||||
* resources might not be released yet. Used to prevent the second
|
||||
* tls_dev_del call in tls_device_down if it happens simultaneously.
|
||||
*/
|
||||
TLS_RX_DEV_CLOSED = 2,
|
||||
};
|
||||
|
||||
struct cipher_context {
|
||||
|
@ -4608,7 +4608,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
|
||||
struct sched_entity *se;
|
||||
int enqueue = 1;
|
||||
long task_delta, idle_task_delta;
|
||||
|
||||
se = cfs_rq->tg->se[cpu_of(rq)];
|
||||
@ -4632,21 +4631,41 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
idle_task_delta = cfs_rq->idle_h_nr_running;
|
||||
for_each_sched_entity(se) {
|
||||
if (se->on_rq)
|
||||
enqueue = 0;
|
||||
|
||||
break;
|
||||
cfs_rq = cfs_rq_of(se);
|
||||
if (enqueue)
|
||||
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
|
||||
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
|
||||
|
||||
cfs_rq->h_nr_running += task_delta;
|
||||
cfs_rq->idle_h_nr_running += idle_task_delta;
|
||||
|
||||
/* end evaluation on encountering a throttled cfs_rq */
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
break;
|
||||
goto unthrottle_throttle;
|
||||
}
|
||||
|
||||
if (!se)
|
||||
add_nr_running(rq, task_delta);
|
||||
for_each_sched_entity(se) {
|
||||
cfs_rq = cfs_rq_of(se);
|
||||
|
||||
cfs_rq->h_nr_running += task_delta;
|
||||
cfs_rq->idle_h_nr_running += idle_task_delta;
|
||||
|
||||
|
||||
/* end evaluation on encountering a throttled cfs_rq */
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
goto unthrottle_throttle;
|
||||
|
||||
/*
|
||||
* One parent has been throttled and cfs_rq removed from the
|
||||
* list. Add it back to not break the leaf list.
|
||||
*/
|
||||
if (throttled_hierarchy(cfs_rq))
|
||||
list_add_leaf_cfs_rq(cfs_rq);
|
||||
}
|
||||
|
||||
/* At this point se is NULL and we are at root level*/
|
||||
add_nr_running(rq, task_delta);
|
||||
|
||||
unthrottle_throttle:
|
||||
/*
|
||||
* The cfs_rq_throttled() breaks in the above iteration can result in
|
||||
* incomplete leaf list maintenance, resulting in triggering the
|
||||
@ -4655,7 +4674,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
for_each_sched_entity(se) {
|
||||
cfs_rq = cfs_rq_of(se);
|
||||
|
||||
list_add_leaf_cfs_rq(cfs_rq);
|
||||
if (list_add_leaf_cfs_rq(cfs_rq))
|
||||
break;
|
||||
}
|
||||
|
||||
assert_list_leaf_cfs_rq(rq);
|
||||
|
@ -355,7 +355,7 @@ static int start_kthread(struct trace_array *tr)
|
||||
struct task_struct *kthread;
|
||||
int next_cpu;
|
||||
|
||||
if (WARN_ON(hwlat_kthread))
|
||||
if (hwlat_kthread)
|
||||
return 0;
|
||||
|
||||
/* Just pick the first CPU on first iteration */
|
||||
|
@ -735,6 +735,11 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
|
||||
mtu_reserved = nf_bridge_mtu_reduction(skb);
|
||||
mtu = skb->dev->mtu;
|
||||
|
||||
if (nf_bridge->pkt_otherhost) {
|
||||
skb->pkt_type = PACKET_OTHERHOST;
|
||||
nf_bridge->pkt_otherhost = false;
|
||||
}
|
||||
|
||||
if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
|
||||
mtu = nf_bridge->frag_max_size;
|
||||
|
||||
@ -835,8 +840,6 @@ static unsigned int br_nf_post_routing(void *priv,
|
||||
else
|
||||
return NF_ACCEPT;
|
||||
|
||||
/* We assume any code from br_dev_queue_push_xmit onwards doesn't care
|
||||
* about the value of skb->pkt_type. */
|
||||
if (skb->pkt_type == PACKET_OTHERHOST) {
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
nf_bridge->pkt_otherhost = true;
|
||||
|
@ -562,6 +562,8 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
|
||||
if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
|
||||
goto nla_put_failure;
|
||||
|
||||
/* Hold rtnl lock while accessing port's netdev attributes. */
|
||||
rtnl_lock();
|
||||
spin_lock_bh(&devlink_port->type_lock);
|
||||
if (nla_put_u16(msg, DEVLINK_ATTR_PORT_TYPE, devlink_port->type))
|
||||
goto nla_put_failure_type_locked;
|
||||
@ -588,6 +590,7 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
|
||||
goto nla_put_failure_type_locked;
|
||||
}
|
||||
spin_unlock_bh(&devlink_port->type_lock);
|
||||
rtnl_unlock();
|
||||
if (devlink_nl_port_attrs_put(msg, devlink_port))
|
||||
goto nla_put_failure;
|
||||
|
||||
@ -596,6 +599,7 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
|
||||
|
||||
nla_put_failure_type_locked:
|
||||
spin_unlock_bh(&devlink_port->type_lock);
|
||||
rtnl_unlock();
|
||||
nla_put_failure:
|
||||
genlmsg_cancel(msg, hdr);
|
||||
return -EMSGSIZE;
|
||||
|
@ -4546,7 +4546,7 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
|
||||
if (skb && (skb_next = skb_peek(q))) {
|
||||
icmp_next = is_icmp_err_skb(skb_next);
|
||||
if (icmp_next)
|
||||
sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
|
||||
sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
|
||||
}
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
|
||||
@ -5712,6 +5712,9 @@ int skb_mpls_dec_ttl(struct sk_buff *skb)
|
||||
if (unlikely(!eth_p_mpls(skb->protocol)))
|
||||
return -EINVAL;
|
||||
|
||||
if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
|
||||
return -ENOMEM;
|
||||
|
||||
lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
|
||||
ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
|
||||
if (!--ttl)
|
||||
|
@ -3132,7 +3132,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
|
||||
|
||||
fl4.daddr = dst;
|
||||
fl4.saddr = src;
|
||||
fl4.flowi4_tos = rtm->rtm_tos;
|
||||
fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
|
||||
fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
|
||||
fl4.flowi4_mark = mark;
|
||||
fl4.flowi4_uid = uid;
|
||||
@ -3156,8 +3156,9 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
|
||||
fl4.flowi4_iif = iif; /* for rt_fill_info */
|
||||
skb->dev = dev;
|
||||
skb->mark = mark;
|
||||
err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
|
||||
dev, &res);
|
||||
err = ip_route_input_rcu(skb, dst, src,
|
||||
rtm->rtm_tos & IPTOS_RT_MASK, dev,
|
||||
&res);
|
||||
|
||||
rt = skb_rtable(skb);
|
||||
if (err == 0 && rt->dst.error)
|
||||
|
@ -197,6 +197,11 @@ static void tcp_reinit_congestion_control(struct sock *sk,
|
||||
icsk->icsk_ca_setsockopt = 1;
|
||||
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
|
||||
|
||||
if (ca->flags & TCP_CONG_NEEDS_ECN)
|
||||
INET_ECN_xmit(sk);
|
||||
else
|
||||
INET_ECN_dontxmit(sk);
|
||||
|
||||
if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
|
||||
tcp_init_congestion_control(sk);
|
||||
}
|
||||
|
@ -306,7 +306,9 @@ static int ip6addrlbl_del(struct net *net,
|
||||
/* add default label */
|
||||
static int __net_init ip6addrlbl_net_init(struct net *net)
|
||||
{
|
||||
int err = 0;
|
||||
struct ip6addrlbl_entry *p = NULL;
|
||||
struct hlist_node *n;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
ADDRLABEL(KERN_DEBUG "%s\n", __func__);
|
||||
@ -315,14 +317,20 @@ static int __net_init ip6addrlbl_net_init(struct net *net)
|
||||
INIT_HLIST_HEAD(&net->ipv6.ip6addrlbl_table.head);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) {
|
||||
int ret = ip6addrlbl_add(net,
|
||||
ip6addrlbl_init_table[i].prefix,
|
||||
ip6addrlbl_init_table[i].prefixlen,
|
||||
0,
|
||||
ip6addrlbl_init_table[i].label, 0);
|
||||
/* XXX: should we free all rules when we catch an error? */
|
||||
if (ret && (!err || err != -ENOMEM))
|
||||
err = ret;
|
||||
err = ip6addrlbl_add(net,
|
||||
ip6addrlbl_init_table[i].prefix,
|
||||
ip6addrlbl_init_table[i].prefixlen,
|
||||
0,
|
||||
ip6addrlbl_init_table[i].label, 0);
|
||||
if (err)
|
||||
goto err_ip6addrlbl_add;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_ip6addrlbl_add:
|
||||
hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) {
|
||||
hlist_del_rcu(&p->list);
|
||||
kfree_rcu(p, rcu);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
@ -1120,8 +1120,13 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
|
||||
return;
|
||||
|
||||
if (rt->dst.dev) {
|
||||
dev->needed_headroom = rt->dst.dev->hard_header_len +
|
||||
t_hlen;
|
||||
unsigned short dst_len = rt->dst.dev->hard_header_len +
|
||||
t_hlen;
|
||||
|
||||
if (t->dev->header_ops)
|
||||
dev->hard_header_len = dst_len;
|
||||
else
|
||||
dev->needed_headroom = dst_len;
|
||||
|
||||
if (set_mtu) {
|
||||
dev->mtu = rt->dst.dev->mtu - t_hlen;
|
||||
@ -1146,7 +1151,12 @@ static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
|
||||
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
|
||||
|
||||
t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
|
||||
tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
|
||||
|
||||
if (tunnel->dev->header_ops)
|
||||
tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
|
||||
else
|
||||
tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
|
||||
|
||||
return t_hlen;
|
||||
}
|
||||
|
||||
|
@ -1785,7 +1785,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
|
||||
}
|
||||
|
||||
/* Create the new socket */
|
||||
nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
|
||||
nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
|
||||
if (!nsk) {
|
||||
err = pr_iucv->path_sever(path, user_data);
|
||||
iucv_path_free(path);
|
||||
@ -1991,7 +1991,7 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
|
||||
nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
|
||||
bh_lock_sock(sk);
|
||||
if ((sk->sk_state != IUCV_LISTEN) ||
|
||||
sk_acceptq_is_full(sk) ||
|
||||
|
@ -196,6 +196,9 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
||||
__be32 lse;
|
||||
int err;
|
||||
|
||||
if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
|
||||
return -ENOMEM;
|
||||
|
||||
stack = mpls_hdr(skb);
|
||||
lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
|
||||
err = skb_mpls_update_lse(skb, lse);
|
||||
|
@ -96,10 +96,19 @@ static void rose_loopback_timer(struct timer_list *unused)
|
||||
}
|
||||
|
||||
if (frametype == ROSE_CALL_REQUEST) {
|
||||
if ((dev = rose_dev_get(dest)) != NULL) {
|
||||
if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0)
|
||||
kfree_skb(skb);
|
||||
} else {
|
||||
if (!rose_loopback_neigh->dev) {
|
||||
kfree_skb(skb);
|
||||
continue;
|
||||
}
|
||||
|
||||
dev = rose_dev_get(dest);
|
||||
if (!dev) {
|
||||
kfree_skb(skb);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) {
|
||||
dev_put(dev);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
} else {
|
||||
|
@ -88,6 +88,9 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
|
||||
goto drop;
|
||||
break;
|
||||
case TCA_MPLS_ACT_MODIFY:
|
||||
if (!pskb_may_pull(skb,
|
||||
skb_network_offset(skb) + MPLS_HLEN))
|
||||
goto drop;
|
||||
new_lse = tcf_mpls_get_lse(mpls_hdr(skb), p, false);
|
||||
if (skb_mpls_update_lse(skb, new_lse))
|
||||
goto drop;
|
||||
|
@ -1163,6 +1163,8 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
|
||||
if (tls_ctx->tx_conf != TLS_HW) {
|
||||
dev_put(netdev);
|
||||
tls_ctx->netdev = NULL;
|
||||
} else {
|
||||
set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
|
||||
}
|
||||
out:
|
||||
up_read(&device_offload_lock);
|
||||
@ -1192,7 +1194,8 @@ static int tls_device_down(struct net_device *netdev)
|
||||
if (ctx->tx_conf == TLS_HW)
|
||||
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
|
||||
TLS_OFFLOAD_CTX_DIR_TX);
|
||||
if (ctx->rx_conf == TLS_HW)
|
||||
if (ctx->rx_conf == TLS_HW &&
|
||||
!test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
|
||||
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
|
||||
TLS_OFFLOAD_CTX_DIR_RX);
|
||||
WRITE_ONCE(ctx->netdev, NULL);
|
||||
|
@ -1291,6 +1291,12 @@ static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!skb_queue_empty(&sk->sk_receive_queue)) {
|
||||
__strp_unpause(&ctx->strp);
|
||||
if (ctx->recv_pkt)
|
||||
return ctx->recv_pkt;
|
||||
}
|
||||
|
||||
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
||||
return NULL;
|
||||
|
||||
|
@ -675,7 +675,8 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
||||
int len, i, rc = 0;
|
||||
|
||||
if (addr_len != sizeof(struct sockaddr_x25) ||
|
||||
addr->sx25_family != AF_X25) {
|
||||
addr->sx25_family != AF_X25 ||
|
||||
strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -769,7 +770,8 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
|
||||
|
||||
rc = -EINVAL;
|
||||
if (addr_len != sizeof(struct sockaddr_x25) ||
|
||||
addr->sx25_family != AF_X25)
|
||||
addr->sx25_family != AF_X25 ||
|
||||
strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN)
|
||||
goto out;
|
||||
|
||||
rc = -ENETUNREACH;
|
||||
|
@ -30,7 +30,7 @@
|
||||
|
||||
enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
|
||||
IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
|
||||
enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
|
||||
enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8, TPM_PCR10 = 10 };
|
||||
|
||||
/* digest size for IMA, fits SHA1 or MD5 */
|
||||
#define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
|
||||
|
@ -682,7 +682,7 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
/* cumulative sha1 over tpm registers 0-7 */
|
||||
/* cumulative digest over TPM registers 0-7 */
|
||||
for (i = TPM_PCR0; i < TPM_PCR8; i++) {
|
||||
ima_pcrread(i, &d);
|
||||
/* now accumulate with current aggregate */
|
||||
@ -691,6 +691,19 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
}
|
||||
/*
|
||||
* Extend cumulative digest over TPM registers 8-9, which contain
|
||||
* measurement for the kernel command line (reg. 8) and image (reg. 9)
|
||||
* in a typical PCR allocation. Registers 8-9 are only included in
|
||||
* non-SHA1 boot_aggregate digests to avoid ambiguity.
|
||||
*/
|
||||
if (alg_id != TPM_ALG_SHA1) {
|
||||
for (i = TPM_PCR8; i < TPM_PCR10; i++) {
|
||||
ima_pcrread(i, &d);
|
||||
rc = crypto_shash_update(shash, d.digest,
|
||||
crypto_shash_digestsize(tfm));
|
||||
}
|
||||
}
|
||||
if (!rc)
|
||||
crypto_shash_final(shash, digest);
|
||||
return rc;
|
||||
|
@ -607,7 +607,7 @@ static int snd_us16x08_eq_put(struct snd_kcontrol *kcontrol,
|
||||
static int snd_us16x08_meter_info(struct snd_kcontrol *kcontrol,
|
||||
struct snd_ctl_elem_info *uinfo)
|
||||
{
|
||||
uinfo->count = 1;
|
||||
uinfo->count = 34;
|
||||
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
|
||||
uinfo->value.integer.max = 0x7FFF;
|
||||
uinfo->value.integer.min = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user