This is the 5.4.209 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmLqRxEACgkQONu9yGCS aT6POg/+JsjJHkU2o/U+/3wstemdaTBXH3o6uVrHiARosJ0nY2ZIxx+nvhs5d7G4 xKEEJDoczhYDVjUINEUFztmKwV4jlMvIkubHwk0Z+h0XeV6PuQZ+ZKvgfAHsO3tx LuRwLKXdTybMol5UHn1RKuq3iDFO5rR4A6QLJKtDum2P+B1TIzoIdBUE7vPEOtj0 CvFcjhL80X/l7ARQU5J1oJNWIBLXUY8fpCbR5SiqalJrZm0PMs1jAXWfo0L9Io+U mHNnLlH3+Vh6WeaayS2QkhvlTHaJe0CvvdgJfwWc9ypS9vkadbCeaJusBUmn5FpT mw73UG8+P6wzTTeIFb/Rrwhz649ZnXXRdExovVn1xpsh/RiztSjMybrqglZrv0QN wVnWuMHvwSajmTEsTaSM1sOqbNejYyjw+UgjBOrFW63ZAYonKXXc5CR6zSvSVwVT pPKKHVgKCwygeGRmEW8IVhU2dAZbVsm7nrclIVCUCd4B+YzUc9ZzN/XtJEjUIPB0 HWuAstkOiWjJbIa8ujYm6YKxUVcI3tbTTrVgnIME/o0112YqeuKyodjWG3wQBKrT cLGtRLsd7rJrgn8NkludKnikptQ02FfOlTDT45KS8XhG1JTV5+0a35bnmI2541tS OZoJRRq/XYyfakUGMG9NwaAIDpRwKHzrBGhDBvSnofq8StvEDjY= =SoT3 -----END PGP SIGNATURE----- Merge 5.4.209 into android11-5.4-lts Changes in 5.4.209 Bluetooth: L2CAP: Fix use-after-free caused by l2cap_chan_put ntfs: fix use-after-free in ntfs_ucsncmp() s390/archrandom: prevent CPACF trng invocations in interrupt context tcp: Fix data-races around sysctl_tcp_dsack. tcp: Fix a data-race around sysctl_tcp_app_win. tcp: Fix a data-race around sysctl_tcp_adv_win_scale. tcp: Fix a data-race around sysctl_tcp_frto. tcp: Fix a data-race around sysctl_tcp_nometrics_save. ice: check (DD | EOF) bits on Rx descriptor rather than (EOP | RS) ice: do not setup vlan for loopback VSI scsi: ufs: host: Hold reference returned by of_parse_phandle() tcp: Fix a data-race around sysctl_tcp_limit_output_bytes. tcp: Fix a data-race around sysctl_tcp_challenge_ack_limit. net: ping6: Fix memleak in ipv6_renew_options(). ipv6/addrconf: fix a null-ptr-deref bug for ip6_ptr igmp: Fix data-races around sysctl_igmp_qrv. net: sungem_phy: Add of_node_put() for reference returned by of_get_parent() tcp: Fix a data-race around sysctl_tcp_min_tso_segs. tcp: Fix a data-race around sysctl_tcp_min_rtt_wlen. tcp: Fix a data-race around sysctl_tcp_autocorking. tcp: Fix a data-race around sysctl_tcp_invalid_ratelimit. Documentation: fix sctp_wmem in ip-sysctl.rst tcp: Fix a data-race around sysctl_tcp_comp_sack_delay_ns. tcp: Fix a data-race around sysctl_tcp_comp_sack_nr. i40e: Fix interface init with MSI interrupts (no MSI-X) sctp: fix sleep in atomic context bug in timer handlers netfilter: nf_queue: do not allow packet truncation below transport header offset virtio-net: fix the race between refill work and close perf symbol: Correct address for bss symbols sfc: disable softirqs for ptp TX sctp: leave the err path free in sctp_stream_init to sctp_stream_free ARM: crypto: comment out gcc warning that breaks clang builds mt7601u: add USB device ID for some versions of XiaoDu WiFi Dongle. scsi: core: Fix race between handling STS_RESOURCE and completion Linux 5.4.209 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I47925760dda15390893286236095322cdfb31837
This commit is contained in:
commit
60bba945eb
@ -2297,7 +2297,14 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max
|
||||
Default: 4K
|
||||
|
||||
sctp_wmem - vector of 3 INTEGERs: min, default, max
|
||||
Currently this tunable has no effect.
|
||||
Only the first value ("min") is used, "default" and "max" are
|
||||
ignored.
|
||||
|
||||
min: Minimum size of send buffer that can be used by SCTP sockets.
|
||||
It is guaranteed to each SCTP socket (but not association) even
|
||||
under moderate memory pressure.
|
||||
|
||||
Default: 4K
|
||||
|
||||
addr_scope_policy - INTEGER
|
||||
Control IPv4 address scoping - draft-stewart-tsvwg-sctp-ipv4-00
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 208
|
||||
SUBLEVEL = 209
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -26,8 +26,9 @@ MODULE_LICENSE("GPL");
|
||||
* While older versions of GCC do not generate incorrect code, they fail to
|
||||
* recognize the parallel nature of these functions, and emit plain ARM code,
|
||||
* which is known to be slower than the optimized ARM code in asm-arm/xor.h.
|
||||
*
|
||||
* #warning This code requires at least version 4.6 of GCC
|
||||
*/
|
||||
#warning This code requires at least version 4.6 of GCC
|
||||
#endif
|
||||
|
||||
#pragma GCC diagnostic ignored "-Wunused-variable"
|
||||
|
@ -2,7 +2,7 @@
|
||||
/*
|
||||
* Kernel interface for the s390 arch_random_* functions
|
||||
*
|
||||
* Copyright IBM Corp. 2017, 2020
|
||||
* Copyright IBM Corp. 2017, 2022
|
||||
*
|
||||
* Author: Harald Freudenberger <freude@de.ibm.com>
|
||||
*
|
||||
@ -14,6 +14,7 @@
|
||||
#ifdef CONFIG_ARCH_RANDOM
|
||||
|
||||
#include <linux/static_key.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/cpacf.h>
|
||||
|
||||
@ -32,7 +33,8 @@ static inline bool __must_check arch_get_random_int(unsigned int *v)
|
||||
|
||||
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
|
||||
{
|
||||
if (static_branch_likely(&s390_arch_random_available)) {
|
||||
if (static_branch_likely(&s390_arch_random_available) &&
|
||||
in_task()) {
|
||||
cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
|
||||
atomic64_add(sizeof(*v), &s390_arch_random_counter);
|
||||
return true;
|
||||
@ -42,7 +44,8 @@ static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
|
||||
|
||||
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
|
||||
{
|
||||
if (static_branch_likely(&s390_arch_random_available)) {
|
||||
if (static_branch_likely(&s390_arch_random_available) &&
|
||||
in_task()) {
|
||||
cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
|
||||
atomic64_add(sizeof(*v), &s390_arch_random_counter);
|
||||
return true;
|
||||
|
@ -1821,11 +1821,15 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
|
||||
* non-zero req_queue_pairs says that user requested a new
|
||||
* queue count via ethtool's set_channels, so use this
|
||||
* value for queues distribution across traffic classes
|
||||
* We need at least one queue pair for the interface
|
||||
* to be usable as we see in else statement.
|
||||
*/
|
||||
if (vsi->req_queue_pairs > 0)
|
||||
vsi->num_queue_pairs = vsi->req_queue_pairs;
|
||||
else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
|
||||
vsi->num_queue_pairs = pf->num_lan_msix;
|
||||
else
|
||||
vsi->num_queue_pairs = 1;
|
||||
}
|
||||
|
||||
/* Number of queues per enabled TC */
|
||||
|
@ -619,7 +619,8 @@ static int ice_lbtest_receive_frames(struct ice_ring *rx_ring)
|
||||
rx_desc = ICE_RX_DESC(rx_ring, i);
|
||||
|
||||
if (!(rx_desc->wb.status_error0 &
|
||||
cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)))
|
||||
(cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
|
||||
cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
|
||||
continue;
|
||||
|
||||
rx_buf = &rx_ring->rx_buf[i];
|
||||
|
@ -3495,10 +3495,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
|
||||
if (vsi->netdev) {
|
||||
ice_set_rx_mode(vsi->netdev);
|
||||
|
||||
err = ice_vsi_vlan_setup(vsi);
|
||||
if (vsi->type != ICE_VSI_LB) {
|
||||
err = ice_vsi_vlan_setup(vsi);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
ice_vsi_cfg_dcb_rings(vsi);
|
||||
|
||||
|
@ -1093,7 +1093,29 @@ static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
|
||||
|
||||
tx_queue = &ptp_data->channel->tx_queue[type];
|
||||
if (tx_queue && tx_queue->timestamping) {
|
||||
/* This code invokes normal driver TX code which is always
|
||||
* protected from softirqs when called from generic TX code,
|
||||
* which in turn disables preemption. Look at __dev_queue_xmit
|
||||
* which uses rcu_read_lock_bh disabling preemption for RCU
|
||||
* plus disabling softirqs. We do not need RCU reader
|
||||
* protection here.
|
||||
*
|
||||
* Although it is theoretically safe for current PTP TX/RX code
|
||||
* running without disabling softirqs, there are three good
|
||||
* reasond for doing so:
|
||||
*
|
||||
* 1) The code invoked is mainly implemented for non-PTP
|
||||
* packets and it is always executed with softirqs
|
||||
* disabled.
|
||||
* 2) This being a single PTP packet, better to not
|
||||
* interrupt its processing by softirqs which can lead
|
||||
* to high latencies.
|
||||
* 3) netdev_xmit_more checks preemption is disabled and
|
||||
* triggers a BUG_ON if not.
|
||||
*/
|
||||
local_bh_disable();
|
||||
efx_enqueue_skb(tx_queue, skb);
|
||||
local_bh_enable();
|
||||
} else {
|
||||
WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
|
||||
dev_kfree_skb_any(skb);
|
||||
|
@ -454,6 +454,7 @@ static int bcm5421_init(struct mii_phy* phy)
|
||||
int can_low_power = 1;
|
||||
if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
|
||||
can_low_power = 0;
|
||||
of_node_put(np);
|
||||
if (can_low_power) {
|
||||
/* Enable automatic low-power */
|
||||
sungem_phy_write(phy, 0x1c, 0x9002);
|
||||
|
@ -213,9 +213,15 @@ struct virtnet_info {
|
||||
/* Packet virtio header size */
|
||||
u8 hdr_len;
|
||||
|
||||
/* Work struct for refilling if we run low on memory. */
|
||||
/* Work struct for delayed refilling if we run low on memory. */
|
||||
struct delayed_work refill;
|
||||
|
||||
/* Is delayed refill enabled? */
|
||||
bool refill_enabled;
|
||||
|
||||
/* The lock to synchronize the access to refill_enabled */
|
||||
spinlock_t refill_lock;
|
||||
|
||||
/* Work struct for config space updates */
|
||||
struct work_struct config_work;
|
||||
|
||||
@ -319,6 +325,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
|
||||
return p;
|
||||
}
|
||||
|
||||
static void enable_delayed_refill(struct virtnet_info *vi)
|
||||
{
|
||||
spin_lock_bh(&vi->refill_lock);
|
||||
vi->refill_enabled = true;
|
||||
spin_unlock_bh(&vi->refill_lock);
|
||||
}
|
||||
|
||||
static void disable_delayed_refill(struct virtnet_info *vi)
|
||||
{
|
||||
spin_lock_bh(&vi->refill_lock);
|
||||
vi->refill_enabled = false;
|
||||
spin_unlock_bh(&vi->refill_lock);
|
||||
}
|
||||
|
||||
static void virtqueue_napi_schedule(struct napi_struct *napi,
|
||||
struct virtqueue *vq)
|
||||
{
|
||||
@ -1388,8 +1408,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
|
||||
}
|
||||
|
||||
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
|
||||
if (!try_fill_recv(vi, rq, GFP_ATOMIC))
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
|
||||
spin_lock(&vi->refill_lock);
|
||||
if (vi->refill_enabled)
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
spin_unlock(&vi->refill_lock);
|
||||
}
|
||||
}
|
||||
|
||||
u64_stats_update_begin(&rq->stats.syncp);
|
||||
@ -1508,6 +1532,8 @@ static int virtnet_open(struct net_device *dev)
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
int i, err;
|
||||
|
||||
enable_delayed_refill(vi);
|
||||
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
if (i < vi->curr_queue_pairs)
|
||||
/* Make sure we have some buffers: if oom use wq. */
|
||||
@ -1878,6 +1904,8 @@ static int virtnet_close(struct net_device *dev)
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
/* Make sure NAPI doesn't schedule refill work */
|
||||
disable_delayed_refill(vi);
|
||||
/* Make sure refill_work doesn't re-enable napi! */
|
||||
cancel_delayed_work_sync(&vi->refill);
|
||||
|
||||
@ -2417,6 +2445,8 @@ static int virtnet_restore_up(struct virtio_device *vdev)
|
||||
|
||||
virtio_device_ready(vdev);
|
||||
|
||||
enable_delayed_refill(vi);
|
||||
|
||||
if (netif_running(vi->dev)) {
|
||||
err = virtnet_open(vi->dev);
|
||||
if (err)
|
||||
@ -3143,6 +3173,7 @@ static int virtnet_probe(struct virtio_device *vdev)
|
||||
vdev->priv = vi;
|
||||
|
||||
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
|
||||
spin_lock_init(&vi->refill_lock);
|
||||
|
||||
/* If we can receive ANY GSO packets, we must allocate large ones. */
|
||||
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
|
||||
|
@ -26,6 +26,7 @@ static const struct usb_device_id mt7601u_device_table[] = {
|
||||
{ USB_DEVICE(0x2717, 0x4106) },
|
||||
{ USB_DEVICE(0x2955, 0x0001) },
|
||||
{ USB_DEVICE(0x2955, 0x1001) },
|
||||
{ USB_DEVICE(0x2955, 0x1003) },
|
||||
{ USB_DEVICE(0x2a5f, 0x1000) },
|
||||
{ USB_DEVICE(0x7392, 0x7710) },
|
||||
{ 0, }
|
||||
|
@ -1719,8 +1719,7 @@ out_put_budget:
|
||||
case BLK_STS_OK:
|
||||
break;
|
||||
case BLK_STS_RESOURCE:
|
||||
if (atomic_read(&sdev->device_busy) ||
|
||||
scsi_device_blocked(sdev))
|
||||
if (scsi_device_blocked(sdev))
|
||||
ret = BLK_STS_DEV_RESOURCE;
|
||||
break;
|
||||
default:
|
||||
|
@ -125,9 +125,20 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool phandle_exists(const struct device_node *np,
|
||||
const char *phandle_name, int index)
|
||||
{
|
||||
struct device_node *parse_np = of_parse_phandle(np, phandle_name, index);
|
||||
|
||||
if (parse_np)
|
||||
of_node_put(parse_np);
|
||||
|
||||
return parse_np != NULL;
|
||||
}
|
||||
|
||||
#define MAX_PROP_SIZE 32
|
||||
static int ufshcd_populate_vreg(struct device *dev, const char *name,
|
||||
struct ufs_vreg **out_vreg)
|
||||
struct ufs_vreg **out_vreg)
|
||||
{
|
||||
int ret = 0;
|
||||
char prop_name[MAX_PROP_SIZE];
|
||||
@ -140,7 +151,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
|
||||
}
|
||||
|
||||
snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
|
||||
if (!of_parse_phandle(np, prop_name, 0)) {
|
||||
if (!phandle_exists(np, prop_name, 0)) {
|
||||
dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
|
||||
__func__, prop_name);
|
||||
goto out;
|
||||
|
@ -592,8 +592,12 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
|
||||
a = (ATTR_RECORD*)((u8*)ctx->attr +
|
||||
le32_to_cpu(ctx->attr->length));
|
||||
for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
|
||||
if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
|
||||
le32_to_cpu(ctx->mrec->bytes_allocated))
|
||||
u8 *mrec_end = (u8 *)ctx->mrec +
|
||||
le32_to_cpu(ctx->mrec->bytes_allocated);
|
||||
u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
|
||||
a->name_length * sizeof(ntfschar);
|
||||
if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end ||
|
||||
name_end > mrec_end)
|
||||
break;
|
||||
ctx->attr = a;
|
||||
if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
|
||||
|
@ -411,6 +411,9 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev)
|
||||
{
|
||||
const struct inet6_dev *idev = __in6_dev_get(dev);
|
||||
|
||||
if (unlikely(!idev))
|
||||
return true;
|
||||
|
||||
return !!idev->cnf.ignore_routes_with_linkdown;
|
||||
}
|
||||
|
||||
|
@ -802,6 +802,7 @@ enum {
|
||||
};
|
||||
|
||||
void l2cap_chan_hold(struct l2cap_chan *c);
|
||||
struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c);
|
||||
void l2cap_chan_put(struct l2cap_chan *c);
|
||||
|
||||
static inline void l2cap_chan_lock(struct l2cap_chan *chan)
|
||||
|
@ -1383,7 +1383,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space,
|
||||
|
||||
static inline int tcp_win_from_space(const struct sock *sk, int space)
|
||||
{
|
||||
int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
|
||||
int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
|
||||
|
||||
return tcp_adv_win_scale <= 0 ?
|
||||
(space>>(-tcp_adv_win_scale)) :
|
||||
|
@ -110,7 +110,8 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
|
||||
}
|
||||
|
||||
/* Find channel with given SCID.
|
||||
* Returns locked channel. */
|
||||
* Returns a reference locked channel.
|
||||
*/
|
||||
static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
|
||||
u16 cid)
|
||||
{
|
||||
@ -118,15 +119,19 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
|
||||
|
||||
mutex_lock(&conn->chan_lock);
|
||||
c = __l2cap_get_chan_by_scid(conn, cid);
|
||||
if (c)
|
||||
l2cap_chan_lock(c);
|
||||
if (c) {
|
||||
/* Only lock if chan reference is not 0 */
|
||||
c = l2cap_chan_hold_unless_zero(c);
|
||||
if (c)
|
||||
l2cap_chan_lock(c);
|
||||
}
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
/* Find channel with given DCID.
|
||||
* Returns locked channel.
|
||||
* Returns a reference locked channel.
|
||||
*/
|
||||
static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
|
||||
u16 cid)
|
||||
@ -135,8 +140,12 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
|
||||
|
||||
mutex_lock(&conn->chan_lock);
|
||||
c = __l2cap_get_chan_by_dcid(conn, cid);
|
||||
if (c)
|
||||
l2cap_chan_lock(c);
|
||||
if (c) {
|
||||
/* Only lock if chan reference is not 0 */
|
||||
c = l2cap_chan_hold_unless_zero(c);
|
||||
if (c)
|
||||
l2cap_chan_lock(c);
|
||||
}
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
|
||||
return c;
|
||||
@ -161,8 +170,12 @@ static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
|
||||
|
||||
mutex_lock(&conn->chan_lock);
|
||||
c = __l2cap_get_chan_by_ident(conn, ident);
|
||||
if (c)
|
||||
l2cap_chan_lock(c);
|
||||
if (c) {
|
||||
/* Only lock if chan reference is not 0 */
|
||||
c = l2cap_chan_hold_unless_zero(c);
|
||||
if (c)
|
||||
l2cap_chan_lock(c);
|
||||
}
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
|
||||
return c;
|
||||
@ -496,6 +509,16 @@ void l2cap_chan_hold(struct l2cap_chan *c)
|
||||
kref_get(&c->kref);
|
||||
}
|
||||
|
||||
struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
|
||||
{
|
||||
BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
|
||||
|
||||
if (!kref_get_unless_zero(&c->kref))
|
||||
return NULL;
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
void l2cap_chan_put(struct l2cap_chan *c)
|
||||
{
|
||||
BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
|
||||
@ -1812,7 +1835,10 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
|
||||
src_match = !bacmp(&c->src, src);
|
||||
dst_match = !bacmp(&c->dst, dst);
|
||||
if (src_match && dst_match) {
|
||||
l2cap_chan_hold(c);
|
||||
c = l2cap_chan_hold_unless_zero(c);
|
||||
if (!c)
|
||||
continue;
|
||||
|
||||
read_unlock(&chan_list_lock);
|
||||
return c;
|
||||
}
|
||||
@ -1827,7 +1853,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
|
||||
}
|
||||
|
||||
if (c1)
|
||||
l2cap_chan_hold(c1);
|
||||
c1 = l2cap_chan_hold_unless_zero(c1);
|
||||
|
||||
read_unlock(&chan_list_lock);
|
||||
|
||||
@ -4221,6 +4247,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
|
||||
|
||||
unlock:
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -4334,6 +4361,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
|
||||
|
||||
done:
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -5062,6 +5090,7 @@ send_move_response:
|
||||
l2cap_send_move_chan_rsp(chan, result);
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -5154,6 +5183,7 @@ static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
|
||||
}
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
}
|
||||
|
||||
static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
|
||||
@ -5183,6 +5213,7 @@ static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
|
||||
l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
}
|
||||
|
||||
static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
|
||||
@ -5246,6 +5277,7 @@ static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
|
||||
l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -5281,6 +5313,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
|
||||
}
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -5653,12 +5686,11 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
|
||||
if (credits > max_credits) {
|
||||
BT_ERR("LE credits overflow");
|
||||
l2cap_send_disconn_req(chan, ECONNRESET);
|
||||
l2cap_chan_unlock(chan);
|
||||
|
||||
/* Return 0 so that we don't trigger an unnecessary
|
||||
* command reject packet.
|
||||
*/
|
||||
return 0;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
chan->tx_credits += credits;
|
||||
@ -5669,7 +5701,9 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
|
||||
if (chan->tx_credits)
|
||||
chan->ops->resume(chan);
|
||||
|
||||
unlock:
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -6983,6 +7017,7 @@ drop:
|
||||
|
||||
done:
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
}
|
||||
|
||||
static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
|
||||
@ -7386,7 +7421,7 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
|
||||
if (src_type != c->src_type)
|
||||
continue;
|
||||
|
||||
l2cap_chan_hold(c);
|
||||
c = l2cap_chan_hold_unless_zero(c);
|
||||
read_unlock(&chan_list_lock);
|
||||
return c;
|
||||
}
|
||||
|
@ -822,7 +822,7 @@ static void igmp_ifc_event(struct in_device *in_dev)
|
||||
struct net *net = dev_net(in_dev->dev);
|
||||
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
|
||||
return;
|
||||
in_dev->mr_ifc_count = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
|
||||
in_dev->mr_ifc_count = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
|
||||
igmp_ifc_start_timer(in_dev, 1);
|
||||
}
|
||||
|
||||
@ -1004,7 +1004,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
|
||||
* received value was zero, use the default or statically
|
||||
* configured value.
|
||||
*/
|
||||
in_dev->mr_qrv = ih3->qrv ?: net->ipv4.sysctl_igmp_qrv;
|
||||
in_dev->mr_qrv = ih3->qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
|
||||
in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL;
|
||||
|
||||
/* RFC3376, 8.3. Query Response Interval:
|
||||
@ -1184,7 +1184,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im,
|
||||
pmc->interface = im->interface;
|
||||
in_dev_hold(in_dev);
|
||||
pmc->multiaddr = im->multiaddr;
|
||||
pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
|
||||
pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
|
||||
pmc->sfmode = im->sfmode;
|
||||
if (pmc->sfmode == MCAST_INCLUDE) {
|
||||
struct ip_sf_list *psf;
|
||||
@ -1235,9 +1235,11 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
|
||||
swap(im->tomb, pmc->tomb);
|
||||
swap(im->sources, pmc->sources);
|
||||
for (psf = im->sources; psf; psf = psf->sf_next)
|
||||
psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
|
||||
psf->sf_crcount = in_dev->mr_qrv ?:
|
||||
READ_ONCE(net->ipv4.sysctl_igmp_qrv);
|
||||
} else {
|
||||
im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
|
||||
im->crcount = in_dev->mr_qrv ?:
|
||||
READ_ONCE(net->ipv4.sysctl_igmp_qrv);
|
||||
}
|
||||
in_dev_put(pmc->interface);
|
||||
kfree_pmc(pmc);
|
||||
@ -1344,7 +1346,7 @@ static void igmp_group_added(struct ip_mc_list *im)
|
||||
if (in_dev->dead)
|
||||
return;
|
||||
|
||||
im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
|
||||
im->unsolicit_count = READ_ONCE(net->ipv4.sysctl_igmp_qrv);
|
||||
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
|
||||
spin_lock_bh(&im->lock);
|
||||
igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
|
||||
@ -1358,7 +1360,7 @@ static void igmp_group_added(struct ip_mc_list *im)
|
||||
* IN() to IN(A).
|
||||
*/
|
||||
if (im->sfmode == MCAST_EXCLUDE)
|
||||
im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
|
||||
im->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
|
||||
|
||||
igmp_ifc_event(in_dev);
|
||||
#endif
|
||||
@ -1749,7 +1751,7 @@ static void ip_mc_reset(struct in_device *in_dev)
|
||||
|
||||
in_dev->mr_qi = IGMP_QUERY_INTERVAL;
|
||||
in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL;
|
||||
in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
|
||||
in_dev->mr_qrv = READ_ONCE(net->ipv4.sysctl_igmp_qrv);
|
||||
}
|
||||
#else
|
||||
static void ip_mc_reset(struct in_device *in_dev)
|
||||
@ -1883,7 +1885,7 @@ static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
|
||||
#ifdef CONFIG_IP_MULTICAST
|
||||
if (psf->sf_oldin &&
|
||||
!IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
|
||||
psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
|
||||
psf->sf_crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
|
||||
psf->sf_next = pmc->tomb;
|
||||
pmc->tomb = psf;
|
||||
rv = 1;
|
||||
@ -1947,7 +1949,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
|
||||
/* filter mode change */
|
||||
pmc->sfmode = MCAST_INCLUDE;
|
||||
#ifdef CONFIG_IP_MULTICAST
|
||||
pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
|
||||
pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
|
||||
in_dev->mr_ifc_count = pmc->crcount;
|
||||
for (psf = pmc->sources; psf; psf = psf->sf_next)
|
||||
psf->sf_crcount = 0;
|
||||
@ -2126,7 +2128,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
|
||||
#ifdef CONFIG_IP_MULTICAST
|
||||
/* else no filters; keep old mode for reports */
|
||||
|
||||
pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
|
||||
pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
|
||||
in_dev->mr_ifc_count = pmc->crcount;
|
||||
for (psf = pmc->sources; psf; psf = psf->sf_next)
|
||||
psf->sf_crcount = 0;
|
||||
|
@ -697,7 +697,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
|
||||
int size_goal)
|
||||
{
|
||||
return skb->len < size_goal &&
|
||||
sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
|
||||
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) &&
|
||||
!tcp_rtx_queue_empty(sk) &&
|
||||
refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
|
||||
}
|
||||
|
@ -440,7 +440,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
|
||||
*/
|
||||
void tcp_init_buffer_space(struct sock *sk)
|
||||
{
|
||||
int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win;
|
||||
int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int maxwin;
|
||||
|
||||
@ -2031,7 +2031,7 @@ void tcp_enter_loss(struct sock *sk)
|
||||
* loss recovery is underway except recurring timeout(s) on
|
||||
* the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
|
||||
*/
|
||||
tp->frto = net->ipv4.sysctl_tcp_frto &&
|
||||
tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) &&
|
||||
(new_recovery || icsk->icsk_retransmits) &&
|
||||
!inet_csk(sk)->icsk_mtup.probe_size;
|
||||
}
|
||||
@ -2915,7 +2915,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
|
||||
|
||||
static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
|
||||
{
|
||||
u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ;
|
||||
u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ;
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
|
||||
@ -3437,7 +3437,8 @@ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
|
||||
if (*last_oow_ack_time) {
|
||||
s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time);
|
||||
|
||||
if (0 <= elapsed && elapsed < net->ipv4.sysctl_tcp_invalid_ratelimit) {
|
||||
if (0 <= elapsed &&
|
||||
elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {
|
||||
NET_INC_STATS(net, mib_idx);
|
||||
return true; /* rate-limited: don't send yet! */
|
||||
}
|
||||
@ -3485,7 +3486,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
|
||||
/* Then check host-wide RFC 5961 rate limit. */
|
||||
now = jiffies / HZ;
|
||||
if (now != challenge_timestamp) {
|
||||
u32 ack_limit = net->ipv4.sysctl_tcp_challenge_ack_limit;
|
||||
u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
|
||||
u32 half = (ack_limit + 1) >> 1;
|
||||
|
||||
challenge_timestamp = now;
|
||||
@ -4261,7 +4262,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
|
||||
if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
|
||||
int mib_idx;
|
||||
|
||||
if (before(seq, tp->rcv_nxt))
|
||||
@ -4307,7 +4308,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
|
||||
tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
|
||||
|
||||
if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
|
||||
if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
|
||||
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
|
||||
|
||||
tcp_rcv_spurious_retrans(sk, skb);
|
||||
@ -5304,7 +5305,7 @@ send_now:
|
||||
}
|
||||
|
||||
if (!tcp_is_sack(tp) ||
|
||||
tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)
|
||||
tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr))
|
||||
goto send_now;
|
||||
|
||||
if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
|
||||
@ -5327,7 +5328,8 @@ send_now:
|
||||
if (tp->srtt_us && tp->srtt_us < rtt)
|
||||
rtt = tp->srtt_us;
|
||||
|
||||
delay = min_t(unsigned long, sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns,
|
||||
delay = min_t(unsigned long,
|
||||
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns),
|
||||
rtt * (NSEC_PER_USEC >> 3)/20);
|
||||
sock_hold(sk);
|
||||
hrtimer_start(&tp->compressed_ack_timer, ns_to_ktime(delay),
|
||||
|
@ -329,7 +329,7 @@ void tcp_update_metrics(struct sock *sk)
|
||||
int m;
|
||||
|
||||
sk_dst_confirm(sk);
|
||||
if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
|
||||
if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -1761,7 +1761,7 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
|
||||
|
||||
min_tso = ca_ops->min_tso_segs ?
|
||||
ca_ops->min_tso_segs(sk) :
|
||||
sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
|
||||
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
|
||||
|
||||
tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
|
||||
return min_t(u32, tso_segs, sk->sk_gso_max_segs);
|
||||
@ -2276,7 +2276,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
|
||||
sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
|
||||
if (sk->sk_pacing_status == SK_PACING_NONE)
|
||||
limit = min_t(unsigned long, limit,
|
||||
sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
|
||||
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
|
||||
limit <<= factor;
|
||||
|
||||
if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
|
||||
|
@ -22,6 +22,11 @@
|
||||
#include <linux/proc_fs.h>
|
||||
#include <net/ping.h>
|
||||
|
||||
static void ping_v6_destroy(struct sock *sk)
|
||||
{
|
||||
inet6_destroy_sock(sk);
|
||||
}
|
||||
|
||||
/* Compatibility glue so we can support IPv6 when it's compiled as a module */
|
||||
static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
|
||||
int *addr_len)
|
||||
@ -165,6 +170,7 @@ struct proto pingv6_prot = {
|
||||
.owner = THIS_MODULE,
|
||||
.init = ping_init_sock,
|
||||
.close = ping_close,
|
||||
.destroy = ping_v6_destroy,
|
||||
.connect = ip6_datagram_connect_v6_only,
|
||||
.disconnect = __udp_disconnect,
|
||||
.setsockopt = ipv6_setsockopt,
|
||||
|
@ -846,11 +846,16 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
|
||||
}
|
||||
|
||||
static int
|
||||
nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
|
||||
nfqnl_mangle(void *data, unsigned int data_len, struct nf_queue_entry *e, int diff)
|
||||
{
|
||||
struct sk_buff *nskb;
|
||||
|
||||
if (diff < 0) {
|
||||
unsigned int min_len = skb_transport_offset(e->skb);
|
||||
|
||||
if (data_len < min_len)
|
||||
return -EINVAL;
|
||||
|
||||
if (pskb_trim(e->skb, data_len))
|
||||
return -ENOMEM;
|
||||
} else if (diff > 0) {
|
||||
|
@ -224,9 +224,8 @@ static struct sctp_association *sctp_association_init(
|
||||
if (!sctp_ulpq_init(&asoc->ulpq, asoc))
|
||||
goto fail_init;
|
||||
|
||||
if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
|
||||
0, gfp))
|
||||
goto fail_init;
|
||||
if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp))
|
||||
goto stream_free;
|
||||
|
||||
/* Initialize default path MTU. */
|
||||
asoc->pathmtu = sp->pathmtu;
|
||||
|
@ -137,7 +137,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
|
||||
|
||||
ret = sctp_stream_alloc_out(stream, outcnt, gfp);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < stream->outcnt; i++)
|
||||
SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
|
||||
@ -145,22 +145,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
|
||||
handle_in:
|
||||
sctp_stream_interleave_init(stream);
|
||||
if (!incnt)
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
ret = sctp_stream_alloc_in(stream, incnt, gfp);
|
||||
if (ret)
|
||||
goto in_err;
|
||||
|
||||
goto out;
|
||||
|
||||
in_err:
|
||||
sched->free(stream);
|
||||
genradix_free(&stream->in);
|
||||
out_err:
|
||||
genradix_free(&stream->out);
|
||||
stream->outcnt = 0;
|
||||
out:
|
||||
return ret;
|
||||
return sctp_stream_alloc_in(stream, incnt, gfp);
|
||||
}
|
||||
|
||||
int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
|
||||
|
@ -163,7 +163,7 @@ int sctp_sched_set_sched(struct sctp_association *asoc,
|
||||
if (!SCTP_SO(&asoc->stream, i)->ext)
|
||||
continue;
|
||||
|
||||
ret = n->init_sid(&asoc->stream, i, GFP_KERNEL);
|
||||
ret = n->init_sid(&asoc->stream, i, GFP_ATOMIC);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
@ -230,6 +230,33 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int elf_read_program_header(Elf *elf, u64 vaddr, GElf_Phdr *phdr)
|
||||
{
|
||||
size_t i, phdrnum;
|
||||
u64 sz;
|
||||
|
||||
if (elf_getphdrnum(elf, &phdrnum))
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < phdrnum; i++) {
|
||||
if (gelf_getphdr(elf, i, phdr) == NULL)
|
||||
return -1;
|
||||
|
||||
if (phdr->p_type != PT_LOAD)
|
||||
continue;
|
||||
|
||||
sz = max(phdr->p_memsz, phdr->p_filesz);
|
||||
if (!sz)
|
||||
continue;
|
||||
|
||||
if (vaddr >= phdr->p_vaddr && (vaddr < phdr->p_vaddr + sz))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Not found any valid program header */
|
||||
return -1;
|
||||
}
|
||||
|
||||
static bool want_demangle(bool is_kernel_sym)
|
||||
{
|
||||
return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
|
||||
@ -1091,6 +1118,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
|
||||
sym.st_value);
|
||||
used_opd = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* When loading symbols in a data mapping, ABS symbols (which
|
||||
* has a value of SHN_ABS in its st_shndx) failed at
|
||||
@ -1127,11 +1155,20 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
|
||||
goto out_elf_end;
|
||||
} else if ((used_opd && runtime_ss->adjust_symbols) ||
|
||||
(!used_opd && syms_ss->adjust_symbols)) {
|
||||
GElf_Phdr phdr;
|
||||
|
||||
if (elf_read_program_header(syms_ss->elf,
|
||||
(u64)sym.st_value, &phdr)) {
|
||||
pr_warning("%s: failed to find program header for "
|
||||
"symbol: %s st_value: %#" PRIx64 "\n",
|
||||
__func__, elf_name, (u64)sym.st_value);
|
||||
continue;
|
||||
}
|
||||
pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
|
||||
"sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
|
||||
(u64)sym.st_value, (u64)shdr.sh_addr,
|
||||
(u64)shdr.sh_offset);
|
||||
sym.st_value -= shdr.sh_addr - shdr.sh_offset;
|
||||
"p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n",
|
||||
__func__, (u64)sym.st_value, (u64)phdr.p_vaddr,
|
||||
(u64)phdr.p_offset);
|
||||
sym.st_value -= phdr.p_vaddr - phdr.p_offset;
|
||||
}
|
||||
|
||||
demangled = demangle_sym(dso, kmodule, elf_name);
|
||||
|
Loading…
Reference in New Issue
Block a user