This is the 5.4.137 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmEE64UACgkQONu9yGCS aT43BA/7BbeM1RL4UmHcsqTvk3m3nXyGCw/5v9c3JZflmfmfG1H/bbeeHpRs28jL MCzZxVHakxH2MpQxxzPyy7ZD1uAFe2GFXNPoHtfVTyFRvrIQRKWygFCiqeOKnato gRlzPklzO21b+YaiyV+53vG7q0K+kSz7/J2NY8jWSDNCDLOJjBMt0BsSMdq4VyRb R2dsoHAw7ifDUPrMk41xoWdQrYweXV4ebWnKS88wrFicczz5WTNAWu9YnpePzFFn lQCpgCy1rc/64zvJOyHw8Ou7V3dcWtYpVM0iAH1T4j7St7nyDokcZ1BzIxKSklTd QZPncyLszTN/UGGwFgFw4qizGzsothQDmEdQOWtVZBPbfDqntbZJO+a9jkwdfB7H E251/e1UaeyhzEshiYPCSdJEtT945ZDhJerQQZk1yMxUy1b8HobHL8P+Ce/uGypT 6yux9fKpWZJMFN0Su8G2exJcDXFgwiciGxD9oF7Iuo1++6gIrgfizSDLga8QPbub x6/YcoWU32KZ289AyvhCQPsPSh8MQntNz5XiiTNcsS1+/7kcBVtVStH67O/tbPZz lJc2G0lYeYe2SFQvJlmLruD690isKslEr5d3csieWco6+ey5h7YF6hLMLS1BjBOL /Hq2AJj72qDFOh5Dq+zPo2oJhWm2j9Am6REE4btDhOyjLB6YJN8= =8nQ8 -----END PGP SIGNATURE----- Merge 5.4.137 into android11-5.4-lts Changes in 5.4.137 selftest: fix build error in tools/testing/selftests/vm/userfaultfd.c tools: Allow proper CC/CXX/... override with LLVM=1 in Makefile.include KVM: x86: determine if an exception has an error code only when injecting it. af_unix: fix garbage collect vs MSG_PEEK workqueue: fix UAF in pwq_unbound_release_workfn() cgroup1: fix leaked context root causing sporadic NULL deref in LTP net/802/mrp: fix memleak in mrp_request_join() net/802/garp: fix memleak in garp_request_join() net: annotate data race around sk_ll_usec sctp: move 198 addresses from unusable to private scope ipv6: allocate enough headroom in ip6_finish_output2() hfs: add missing clean-up in hfs_fill_super hfs: fix high memory mapping in hfs_bnode_read hfs: add lock nesting notation to hfs_find_init firmware: arm_scmi: Fix possible scmi_linux_errmap buffer overflow firmware: arm_scmi: Fix range check for the maximum number of pending messages cifs: fix the out of range assignment to bit fields in parse_server_interfaces iomap: remove the length variable in iomap_seek_data iomap: remove the length variable in iomap_seek_hole ARM: dts: versatile: Fix up interrupt controller node names ipv6: ip6_finish_output2: set sk into newly allocated nskb Linux 5.4.137 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I441d065c6fd79c96c67172137806f71dbcd41753
This commit is contained in:
commit
758a7acf8b
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 136
|
||||
SUBLEVEL = 137
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -195,16 +195,15 @@
|
||||
#size-cells = <1>;
|
||||
ranges;
|
||||
|
||||
vic: intc@10140000 {
|
||||
vic: interrupt-controller@10140000 {
|
||||
compatible = "arm,versatile-vic";
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
reg = <0x10140000 0x1000>;
|
||||
clear-mask = <0xffffffff>;
|
||||
valid-mask = <0xffffffff>;
|
||||
};
|
||||
|
||||
sic: intc@10003000 {
|
||||
sic: interrupt-controller@10003000 {
|
||||
compatible = "arm,versatile-sic";
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
amba {
|
||||
/* The Versatile PB is using more SIC IRQ lines than the AB */
|
||||
sic: intc@10003000 {
|
||||
sic: interrupt-controller@10003000 {
|
||||
clear-mask = <0xffffffff>;
|
||||
/*
|
||||
* Valid interrupt lines mask according to
|
||||
|
@ -475,8 +475,6 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
|
||||
|
||||
if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
|
||||
queue:
|
||||
if (has_error && !is_protmode(vcpu))
|
||||
has_error = false;
|
||||
if (reinject) {
|
||||
/*
|
||||
* On vmentry, vcpu->arch.exception.pending is only
|
||||
@ -7592,6 +7590,13 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
|
||||
kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
|
||||
}
|
||||
|
||||
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
|
||||
vcpu->arch.exception.error_code = false;
|
||||
kvm_x86_ops->queue_exception(vcpu);
|
||||
}
|
||||
|
||||
static int inject_pending_event(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r;
|
||||
@ -7599,7 +7604,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
|
||||
/* try to reinject previous events if any */
|
||||
|
||||
if (vcpu->arch.exception.injected)
|
||||
kvm_x86_ops->queue_exception(vcpu);
|
||||
kvm_inject_exception(vcpu);
|
||||
/*
|
||||
* Do not inject an NMI or interrupt if there is a pending
|
||||
* exception. Exceptions and interrupts are recognized at
|
||||
@ -7665,7 +7670,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
kvm_x86_ops->queue_exception(vcpu);
|
||||
kvm_inject_exception(vcpu);
|
||||
}
|
||||
|
||||
/* Don't consider new event if we re-injected an event */
|
||||
|
@ -54,7 +54,6 @@ enum scmi_error_codes {
|
||||
SCMI_ERR_GENERIC = -8, /* Generic Error */
|
||||
SCMI_ERR_HARDWARE = -9, /* Hardware Error */
|
||||
SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
|
||||
SCMI_ERR_MAX
|
||||
};
|
||||
|
||||
/* List of all SCMI devices active in system */
|
||||
@ -176,8 +175,10 @@ static const int scmi_linux_errmap[] = {
|
||||
|
||||
static inline int scmi_to_linux_errno(int errno)
|
||||
{
|
||||
if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
|
||||
return scmi_linux_errmap[-errno];
|
||||
int err_idx = -errno;
|
||||
|
||||
if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
|
||||
return scmi_linux_errmap[err_idx];
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -693,8 +694,9 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
|
||||
struct scmi_xfers_info *info = &sinfo->tx_minfo;
|
||||
|
||||
/* Pre-allocated messages, no more than what hdr.seq can support */
|
||||
if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
|
||||
dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
|
||||
if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
|
||||
dev_err(dev,
|
||||
"Invalid maximum messages %d, not in range [1 - %lu]\n",
|
||||
desc->max_msg, MSG_TOKEN_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -498,8 +498,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
|
||||
p = buf;
|
||||
while (bytes_left >= sizeof(*p)) {
|
||||
info->speed = le64_to_cpu(p->LinkSpeed);
|
||||
info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
|
||||
info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
|
||||
info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
|
||||
info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
|
||||
|
||||
cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
|
||||
cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
|
||||
|
@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
|
||||
fd->key = ptr + tree->max_key_len + 2;
|
||||
hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
|
||||
tree->cnid, __builtin_return_address(0));
|
||||
mutex_lock(&tree->tree_lock);
|
||||
switch (tree->cnid) {
|
||||
case HFS_CAT_CNID:
|
||||
mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
|
||||
break;
|
||||
case HFS_EXT_CNID:
|
||||
mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
|
||||
break;
|
||||
case HFS_ATTR_CNID:
|
||||
mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -15,16 +15,31 @@
|
||||
|
||||
#include "btree.h"
|
||||
|
||||
void hfs_bnode_read(struct hfs_bnode *node, void *buf,
|
||||
int off, int len)
|
||||
void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
|
||||
{
|
||||
struct page *page;
|
||||
int pagenum;
|
||||
int bytes_read;
|
||||
int bytes_to_read;
|
||||
void *vaddr;
|
||||
|
||||
off += node->page_offset;
|
||||
page = node->page[0];
|
||||
pagenum = off >> PAGE_SHIFT;
|
||||
off &= ~PAGE_MASK; /* compute page offset for the first page */
|
||||
|
||||
memcpy(buf, kmap(page) + off, len);
|
||||
kunmap(page);
|
||||
for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
|
||||
if (pagenum >= node->tree->pages_per_bnode)
|
||||
break;
|
||||
page = node->page[pagenum];
|
||||
bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
|
||||
|
||||
vaddr = kmap_atomic(page);
|
||||
memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
|
||||
kunmap_atomic(vaddr);
|
||||
|
||||
pagenum++;
|
||||
off = 0; /* page offset only applies to the first page */
|
||||
}
|
||||
}
|
||||
|
||||
u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
|
||||
|
@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
|
||||
|
||||
#define NODE_HASH_SIZE 256
|
||||
|
||||
/* B-tree mutex nested subclasses */
|
||||
enum hfs_btree_mutex_classes {
|
||||
CATALOG_BTREE_MUTEX,
|
||||
EXTENTS_BTREE_MUTEX,
|
||||
ATTR_BTREE_MUTEX,
|
||||
};
|
||||
|
||||
/* A HFS BTree held in memory */
|
||||
struct hfs_btree {
|
||||
struct super_block *sb;
|
||||
|
@ -421,14 +421,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
if (!res) {
|
||||
if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
|
||||
res = -EIO;
|
||||
goto bail;
|
||||
goto bail_hfs_find;
|
||||
}
|
||||
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
|
||||
}
|
||||
if (res) {
|
||||
hfs_find_exit(&fd);
|
||||
goto bail_no_root;
|
||||
}
|
||||
if (res)
|
||||
goto bail_hfs_find;
|
||||
res = -EINVAL;
|
||||
root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
|
||||
hfs_find_exit(&fd);
|
||||
@ -444,6 +442,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
/* everything's okay */
|
||||
return 0;
|
||||
|
||||
bail_hfs_find:
|
||||
hfs_find_exit(&fd);
|
||||
bail_no_root:
|
||||
pr_err("get root inode failed\n");
|
||||
bail:
|
||||
|
@ -52,7 +52,6 @@ extern void __init chrdev_init(void);
|
||||
*/
|
||||
extern const struct fs_context_operations legacy_fs_context_ops;
|
||||
extern int parse_monolithic_mount_data(struct fs_context *, void *);
|
||||
extern void fc_drop_locked(struct fs_context *);
|
||||
extern void vfs_clean_context(struct fs_context *fc);
|
||||
extern int finish_clean_context(struct fs_context *fc);
|
||||
|
||||
|
@ -140,23 +140,20 @@ loff_t
|
||||
iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
|
||||
{
|
||||
loff_t size = i_size_read(inode);
|
||||
loff_t length = size - offset;
|
||||
loff_t ret;
|
||||
|
||||
/* Nothing to be found before or beyond the end of the file. */
|
||||
if (offset < 0 || offset >= size)
|
||||
return -ENXIO;
|
||||
|
||||
while (length > 0) {
|
||||
ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
|
||||
&offset, iomap_seek_hole_actor);
|
||||
while (offset < size) {
|
||||
ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
|
||||
ops, &offset, iomap_seek_hole_actor);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret == 0)
|
||||
break;
|
||||
|
||||
offset += ret;
|
||||
length -= ret;
|
||||
}
|
||||
|
||||
return offset;
|
||||
@ -186,27 +183,23 @@ loff_t
|
||||
iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
|
||||
{
|
||||
loff_t size = i_size_read(inode);
|
||||
loff_t length = size - offset;
|
||||
loff_t ret;
|
||||
|
||||
/* Nothing to be found before or beyond the end of the file. */
|
||||
if (offset < 0 || offset >= size)
|
||||
return -ENXIO;
|
||||
|
||||
while (length > 0) {
|
||||
ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
|
||||
&offset, iomap_seek_data_actor);
|
||||
while (offset < size) {
|
||||
ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
|
||||
ops, &offset, iomap_seek_data_actor);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret == 0)
|
||||
break;
|
||||
|
||||
return offset;
|
||||
offset += ret;
|
||||
length -= ret;
|
||||
}
|
||||
|
||||
if (length <= 0)
|
||||
return -ENXIO;
|
||||
return offset;
|
||||
/* We've reached the end of the file without finding data */
|
||||
return -ENXIO;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_seek_data);
|
||||
|
@ -134,6 +134,7 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
|
||||
extern int generic_parse_monolithic(struct fs_context *fc, void *data);
|
||||
extern int vfs_get_tree(struct fs_context *fc);
|
||||
extern void put_fs_context(struct fs_context *fc);
|
||||
extern void fc_drop_locked(struct fs_context *fc);
|
||||
|
||||
/*
|
||||
* sget() wrappers to be called from the ->get_tree() op.
|
||||
|
@ -36,7 +36,7 @@ static inline bool net_busy_loop_on(void)
|
||||
|
||||
static inline bool sk_can_busy_loop(const struct sock *sk)
|
||||
{
|
||||
return sk->sk_ll_usec && !signal_pending(current);
|
||||
return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
|
||||
}
|
||||
|
||||
bool sk_busy_loop_end(void *p, unsigned long start_time);
|
||||
|
@ -328,8 +328,7 @@ enum {
|
||||
#define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
|
||||
|
||||
/* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
|
||||
* SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
|
||||
* 192.88.99.0/24.
|
||||
* SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
|
||||
* Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
|
||||
* addresses.
|
||||
*/
|
||||
@ -337,7 +336,6 @@ enum {
|
||||
((htonl(INADDR_BROADCAST) == a) || \
|
||||
ipv4_is_multicast(a) || \
|
||||
ipv4_is_zeronet(a) || \
|
||||
ipv4_is_test_198(a) || \
|
||||
ipv4_is_anycast_6to4(a))
|
||||
|
||||
/* Flags used for the bind address copy functions. */
|
||||
|
@ -1231,9 +1231,7 @@ int cgroup1_get_tree(struct fs_context *fc)
|
||||
ret = cgroup_do_get_tree(fc);
|
||||
|
||||
if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
|
||||
struct super_block *sb = fc->root->d_sb;
|
||||
dput(fc->root);
|
||||
deactivate_locked_super(sb);
|
||||
fc_drop_locked(fc);
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
|
@ -3666,15 +3666,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
|
||||
unbound_release_work);
|
||||
struct workqueue_struct *wq = pwq->wq;
|
||||
struct worker_pool *pool = pwq->pool;
|
||||
bool is_last;
|
||||
bool is_last = false;
|
||||
|
||||
if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
|
||||
return;
|
||||
/*
|
||||
* when @pwq is not linked, it doesn't hold any reference to the
|
||||
* @wq, and @wq is invalid to access.
|
||||
*/
|
||||
if (!list_empty(&pwq->pwqs_node)) {
|
||||
if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
|
||||
return;
|
||||
|
||||
mutex_lock(&wq->mutex);
|
||||
list_del_rcu(&pwq->pwqs_node);
|
||||
is_last = list_empty(&wq->pwqs);
|
||||
mutex_unlock(&wq->mutex);
|
||||
mutex_lock(&wq->mutex);
|
||||
list_del_rcu(&pwq->pwqs_node);
|
||||
is_last = list_empty(&wq->pwqs);
|
||||
mutex_unlock(&wq->mutex);
|
||||
}
|
||||
|
||||
mutex_lock(&wq_pool_mutex);
|
||||
put_unbound_pool(pool);
|
||||
|
@ -203,6 +203,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr
|
||||
kfree(attr);
|
||||
}
|
||||
|
||||
static void garp_attr_destroy_all(struct garp_applicant *app)
|
||||
{
|
||||
struct rb_node *node, *next;
|
||||
struct garp_attr *attr;
|
||||
|
||||
for (node = rb_first(&app->gid);
|
||||
next = node ? rb_next(node) : NULL, node != NULL;
|
||||
node = next) {
|
||||
attr = rb_entry(node, struct garp_attr, node);
|
||||
garp_attr_destroy(app, attr);
|
||||
}
|
||||
}
|
||||
|
||||
static int garp_pdu_init(struct garp_applicant *app)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
@ -609,6 +622,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
|
||||
|
||||
spin_lock_bh(&app->lock);
|
||||
garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
|
||||
garp_attr_destroy_all(app);
|
||||
garp_pdu_queue(app);
|
||||
spin_unlock_bh(&app->lock);
|
||||
|
||||
|
@ -292,6 +292,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
|
||||
kfree(attr);
|
||||
}
|
||||
|
||||
static void mrp_attr_destroy_all(struct mrp_applicant *app)
|
||||
{
|
||||
struct rb_node *node, *next;
|
||||
struct mrp_attr *attr;
|
||||
|
||||
for (node = rb_first(&app->mad);
|
||||
next = node ? rb_next(node) : NULL, node != NULL;
|
||||
node = next) {
|
||||
attr = rb_entry(node, struct mrp_attr, node);
|
||||
mrp_attr_destroy(app, attr);
|
||||
}
|
||||
}
|
||||
|
||||
static int mrp_pdu_init(struct mrp_applicant *app)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
@ -895,6 +908,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
|
||||
|
||||
spin_lock_bh(&app->lock);
|
||||
mrp_mad_event(app, MRP_EVENT_TX);
|
||||
mrp_attr_destroy_all(app);
|
||||
mrp_pdu_queue(app);
|
||||
spin_unlock_bh(&app->lock);
|
||||
|
||||
|
@ -1099,7 +1099,7 @@ set_rcvbuf:
|
||||
if (val < 0)
|
||||
ret = -EINVAL;
|
||||
else
|
||||
sk->sk_ll_usec = val;
|
||||
WRITE_ONCE(sk->sk_ll_usec, val);
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
|
@ -59,10 +59,38 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net_device *dev = dst->dev;
|
||||
unsigned int hh_len = LL_RESERVED_SPACE(dev);
|
||||
int delta = hh_len - skb_headroom(skb);
|
||||
const struct in6_addr *nexthop;
|
||||
struct neighbour *neigh;
|
||||
int ret;
|
||||
|
||||
/* Be paranoid, rather than too clever. */
|
||||
if (unlikely(delta > 0) && dev->header_ops) {
|
||||
/* pskb_expand_head() might crash, if skb is shared */
|
||||
if (skb_shared(skb)) {
|
||||
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
|
||||
if (likely(nskb)) {
|
||||
if (skb->sk)
|
||||
skb_set_owner_w(nskb, skb->sk);
|
||||
consume_skb(skb);
|
||||
} else {
|
||||
kfree_skb(skb);
|
||||
}
|
||||
skb = nskb;
|
||||
}
|
||||
if (skb &&
|
||||
pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
|
||||
kfree_skb(skb);
|
||||
skb = NULL;
|
||||
}
|
||||
if (!skb) {
|
||||
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
|
||||
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
|
||||
|
||||
|
@ -397,7 +397,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr)
|
||||
retval = SCTP_SCOPE_LINK;
|
||||
} else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
|
||||
ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
|
||||
ipv4_is_private_192(addr->v4.sin_addr.s_addr)) {
|
||||
ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
|
||||
ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
|
||||
retval = SCTP_SCOPE_PRIVATE;
|
||||
} else {
|
||||
retval = SCTP_SCOPE_GLOBAL;
|
||||
|
@ -1512,6 +1512,53 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
||||
{
|
||||
scm->fp = scm_fp_dup(UNIXCB(skb).fp);
|
||||
|
||||
/*
|
||||
* Garbage collection of unix sockets starts by selecting a set of
|
||||
* candidate sockets which have reference only from being in flight
|
||||
* (total_refs == inflight_refs). This condition is checked once during
|
||||
* the candidate collection phase, and candidates are marked as such, so
|
||||
* that non-candidates can later be ignored. While inflight_refs is
|
||||
* protected by unix_gc_lock, total_refs (file count) is not, hence this
|
||||
* is an instantaneous decision.
|
||||
*
|
||||
* Once a candidate, however, the socket must not be reinstalled into a
|
||||
* file descriptor while the garbage collection is in progress.
|
||||
*
|
||||
* If the above conditions are met, then the directed graph of
|
||||
* candidates (*) does not change while unix_gc_lock is held.
|
||||
*
|
||||
* Any operations that changes the file count through file descriptors
|
||||
* (dup, close, sendmsg) does not change the graph since candidates are
|
||||
* not installed in fds.
|
||||
*
|
||||
* Dequeing a candidate via recvmsg would install it into an fd, but
|
||||
* that takes unix_gc_lock to decrement the inflight count, so it's
|
||||
* serialized with garbage collection.
|
||||
*
|
||||
* MSG_PEEK is special in that it does not change the inflight count,
|
||||
* yet does install the socket into an fd. The following lock/unlock
|
||||
* pair is to ensure serialization with garbage collection. It must be
|
||||
* done between incrementing the file count and installing the file into
|
||||
* an fd.
|
||||
*
|
||||
* If garbage collection starts after the barrier provided by the
|
||||
* lock/unlock, then it will see the elevated refcount and not mark this
|
||||
* as a candidate. If a garbage collection is already in progress
|
||||
* before the file count was incremented, then the lock/unlock pair will
|
||||
* ensure that garbage collection is finished before progressing to
|
||||
* installing the fd.
|
||||
*
|
||||
* (*) A -> B where B is on the queue of A or B is on the queue of C
|
||||
* which is on the queue of listening socket A.
|
||||
*/
|
||||
spin_lock(&unix_gc_lock);
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
|
||||
static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
|
||||
{
|
||||
int err = 0;
|
||||
@ -2137,7 +2184,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
sk_peek_offset_fwd(sk, size);
|
||||
|
||||
if (UNIXCB(skb).fp)
|
||||
scm.fp = scm_fp_dup(UNIXCB(skb).fp);
|
||||
unix_peek_fds(&scm, skb);
|
||||
}
|
||||
err = (flags & MSG_TRUNC) ? skb->len - skip : size;
|
||||
|
||||
@ -2378,7 +2425,7 @@ unlock:
|
||||
/* It is questionable, see note in unix_dgram_recvmsg.
|
||||
*/
|
||||
if (UNIXCB(skb).fp)
|
||||
scm.fp = scm_fp_dup(UNIXCB(skb).fp);
|
||||
unix_peek_fds(&scm, skb);
|
||||
|
||||
sk_peek_offset_fwd(sk, chunk);
|
||||
|
||||
|
@ -39,8 +39,6 @@ EXTRA_WARNINGS += -Wundef
|
||||
EXTRA_WARNINGS += -Wwrite-strings
|
||||
EXTRA_WARNINGS += -Wformat
|
||||
|
||||
CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
|
||||
|
||||
# Makefiles suck: This macro sets a default value of $(2) for the
|
||||
# variable named by $(1), unless the variable has been set by
|
||||
# environment or command line. This is necessary for CC and AR
|
||||
@ -52,12 +50,22 @@ define allow-override
|
||||
$(eval $(1) = $(2)))
|
||||
endef
|
||||
|
||||
ifneq ($(LLVM),)
|
||||
$(call allow-override,CC,clang)
|
||||
$(call allow-override,AR,llvm-ar)
|
||||
$(call allow-override,LD,ld.lld)
|
||||
$(call allow-override,CXX,clang++)
|
||||
$(call allow-override,STRIP,llvm-strip)
|
||||
else
|
||||
# Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix.
|
||||
$(call allow-override,CC,$(CROSS_COMPILE)gcc)
|
||||
$(call allow-override,AR,$(CROSS_COMPILE)ar)
|
||||
$(call allow-override,LD,$(CROSS_COMPILE)ld)
|
||||
$(call allow-override,CXX,$(CROSS_COMPILE)g++)
|
||||
$(call allow-override,STRIP,$(CROSS_COMPILE)strip)
|
||||
endif
|
||||
|
||||
CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
|
||||
|
||||
ifneq ($(LLVM),)
|
||||
HOSTAR ?= llvm-ar
|
||||
|
@ -141,7 +141,7 @@ static void anon_allocate_area(void **alloc_area)
|
||||
{
|
||||
*alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
if (*alloc_area == MAP_FAILED)
|
||||
if (*alloc_area == MAP_FAILED) {
|
||||
fprintf(stderr, "mmap of anonymous memory failed");
|
||||
*alloc_area = NULL;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user