This is the 5.4.145 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmE9pNwACgkQONu9yGCS aT5g2Q//ZEiIQBvw6uZEA3z1Y6tuKFyIxxwOu24EbCTvB1oXXQX/XXQQPWori1Ny OpjuQwXJr2LW+/wEKvUEj8mTrpFD+LsZmXRLBHCw9EqqD5RURDqUZt+xh+4xtV/S 2EgF0nCO9+84wo628Lc1C2LBJZZEo/kD7LnGeln+BXwRS1FQvGfD+5KIpOR2YzqI hrCtVfO5ZQpv59PrAQkfwnfITk9BM1cwA7LCD75WEN59405ZV3mzFyTdvz8s0iGt akxmwajLNGjQ/ro567tjpsWiK7EF26mNRTMZqu1jK6h/KjU9sQ4DzCqB+p5TPh/9 mj/Rzq1lSjLodsR0OznKBqFIVaqXyTU+0cMItjos9MBsG/4GOj8ixbXdFRG99WmK bNsYucotSrE9ApYwsmqYaNcHcGeLUIsYCDFCQp3++oeF59+FA7Pp7B4bI/zcYRwY aqbfTkMzo8/e4OF0B2LCx+8r0xol1SoLwBfcP3hb7rlKp9OkSYsKrJ/29CUuINe1 YC5HdrPf2HP36jlVCll5rQa+ERaxtNSCozgwxHG/x2yeOmiVqxdE+vUUmyRidah8 DvYklCM7upUDi1ujbOwbor9R1jQSXkWMFK76EBB3GJPgguFNyczFXm8xBzfRLQvw H6YjIfnxNt+DLPn5uXIEhU7ISTkUno9i1BEd2NoeT1UiYTlk2bw= =/lic -----END PGP SIGNATURE----- Merge 5.4.145 into android11-5.4-lts Changes in 5.4.145 ext4: fix race writing to an inline_data file while its xattrs are changing fscrypt: add fscrypt_symlink_getattr() for computing st_size ext4: report correct st_size for encrypted symlinks f2fs: report correct st_size for encrypted symlinks ubifs: report correct st_size for encrypted symlinks kthread: Fix PF_KTHREAD vs to_kthread() race xtensa: fix kconfig unmet dependency warning for HAVE_FUTEX_CMPXCHG gpu: ipu-v3: Fix i.MX IPU-v3 offset calculations for (semi)planar U/V formats reset: reset-zynqmp: Fixed the argument data type qed: Fix the VF msix vectors flow net: macb: Add a NULL check on desc_ptp qede: Fix memset corruption perf/x86/intel/pt: Fix mask of num_address_ranges perf/x86/amd/ibs: Work around erratum #1197 perf/x86/amd/power: Assign pmu.module cryptoloop: add a deprecation warning ARM: 8918/2: only build return_address() if needed ALSA: hda/realtek: Workaround for conflicting SSID on ASUS ROG Strix G17 ALSA: pcm: fix divide error in snd_pcm_lib_ioctl ARC: wireup clone3 syscall media: stkwebcam: fix memory leak in stk_camera_probe igmp: Add ip_mc_list lock in ip_check_mc_rcu USB: serial: mos7720: improve OOM-handling in read_mos_reg() ipv4/icmp: l3mdev: Perform icmp error route lookup on source device routing table (v2) powerpc/boot: Delete unneeded .globl _zimage_start net: ll_temac: Remove left-over debug message mm/page_alloc: speed up the iteration of max_order Revert "r8169: avoid link-up interrupt issue on RTL8106e if user enables ASPM" x86/events/amd/iommu: Fix invalid Perf result due to IOMMU PMC power-gating Revert "btrfs: compression: don't try to compress if we don't have enough pages" ALSA: usb-audio: Add registration quirk for JBL Quantum 800 usb: host: xhci-rcar: Don't reload firmware after the completion usb: mtu3: use @mult for HS isoc or intr usb: mtu3: fix the wrong HS mult value xhci: fix unsafe memory usage in xhci tracing x86/reboot: Limit Dell Optiplex 990 quirk to early BIOS versions PCI: Call Max Payload Size-related fixup quirks early Linux 5.4.145 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ic5a02a653d255a9bf1729e728011425e45e293aa
This commit is contained in:
commit
9ccfa71ab0
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 144
|
||||
SUBLEVEL = 145
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -29,6 +29,7 @@ config ARC
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
select HAVE_ARCH_KGDB
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_COPY_THREAD_TLS
|
||||
select HAVE_DEBUG_STACKOVERFLOW
|
||||
select HAVE_FUTEX_CMPXCHG if FUTEX
|
||||
select HAVE_IOREMAP_PROT
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
int sys_clone_wrapper(int, int, int, int, int);
|
||||
int sys_clone3_wrapper(void *, size_t);
|
||||
int sys_cacheflush(uint32_t, uint32_t uint32_t);
|
||||
int sys_arc_settls(void *);
|
||||
int sys_arc_gettls(void);
|
||||
|
@ -21,6 +21,7 @@
|
||||
#define __ARCH_WANT_SET_GET_RLIMIT
|
||||
#define __ARCH_WANT_SYS_EXECVE
|
||||
#define __ARCH_WANT_SYS_CLONE
|
||||
#define __ARCH_WANT_SYS_CLONE3
|
||||
#define __ARCH_WANT_SYS_VFORK
|
||||
#define __ARCH_WANT_SYS_FORK
|
||||
#define __ARCH_WANT_TIME32_SYSCALLS
|
||||
|
@ -35,6 +35,18 @@ ENTRY(sys_clone_wrapper)
|
||||
b .Lret_from_system_call
|
||||
END(sys_clone_wrapper)
|
||||
|
||||
ENTRY(sys_clone3_wrapper)
|
||||
SAVE_CALLEE_SAVED_USER
|
||||
bl @sys_clone3
|
||||
DISCARD_CALLEE_SAVED_USER
|
||||
|
||||
GET_CURR_THR_INFO_FLAGS r10
|
||||
btst r10, TIF_SYSCALL_TRACE
|
||||
bnz tracesys_exit
|
||||
|
||||
b .Lret_from_system_call
|
||||
END(sys_clone3_wrapper)
|
||||
|
||||
ENTRY(ret_from_fork)
|
||||
; when the forked child comes here from the __switch_to function
|
||||
; r0 has the last task pointer.
|
||||
|
@ -171,9 +171,8 @@ asmlinkage void ret_from_fork(void);
|
||||
* | user_r25 |
|
||||
* ------------------ <===== END of PAGE
|
||||
*/
|
||||
int copy_thread(unsigned long clone_flags,
|
||||
unsigned long usp, unsigned long kthread_arg,
|
||||
struct task_struct *p)
|
||||
int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
|
||||
unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
|
||||
{
|
||||
struct pt_regs *c_regs; /* child's pt_regs */
|
||||
unsigned long *childksp; /* to unwind out of __switch_to() */
|
||||
@ -231,7 +230,7 @@ int copy_thread(unsigned long clone_flags,
|
||||
* set task's userland tls data ptr from 4th arg
|
||||
* clone C-lib call is difft from clone sys-call
|
||||
*/
|
||||
task_thread_info(p)->thr_ptr = regs->r3;
|
||||
task_thread_info(p)->thr_ptr = tls;
|
||||
} else {
|
||||
/* Normal fork case: set parent's TLS ptr in child */
|
||||
task_thread_info(p)->thr_ptr =
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <asm/syscalls.h>
|
||||
|
||||
#define sys_clone sys_clone_wrapper
|
||||
#define sys_clone3 sys_clone3_wrapper
|
||||
|
||||
#undef __SYSCALL
|
||||
#define __SYSCALL(nr, call) [nr] = (call),
|
||||
|
@ -17,10 +17,14 @@ CFLAGS_REMOVE_return_address.o = -pg
|
||||
# Object file lists.
|
||||
|
||||
obj-y := elf.o entry-common.o irq.o opcodes.o \
|
||||
process.o ptrace.o reboot.o return_address.o \
|
||||
process.o ptrace.o reboot.o \
|
||||
setup.o signal.o sigreturn_codes.o \
|
||||
stacktrace.o sys_arm.o time.o traps.o
|
||||
|
||||
ifneq ($(CONFIG_ARM_UNWIND),y)
|
||||
obj-$(CONFIG_FRAME_POINTER) += return_address.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_ATAGS) += atags_parse.o
|
||||
obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
|
||||
obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
|
||||
|
@ -7,8 +7,6 @@
|
||||
*/
|
||||
#include <linux/export.h>
|
||||
#include <linux/ftrace.h>
|
||||
|
||||
#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <asm/stacktrace.h>
|
||||
@ -53,6 +51,4 @@ void *return_address(unsigned int level)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
|
||||
|
||||
EXPORT_SYMBOL_GPL(return_address);
|
||||
|
@ -44,9 +44,6 @@ p_end: .long _end
|
||||
p_pstack: .long _platform_stack_top
|
||||
#endif
|
||||
|
||||
.globl _zimage_start
|
||||
/* Clang appears to require the .weak directive to be after the symbol
|
||||
* is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */
|
||||
.weak _zimage_start
|
||||
_zimage_start:
|
||||
.globl _zimage_start_lib
|
||||
|
@ -90,6 +90,7 @@ struct perf_ibs {
|
||||
unsigned long offset_mask[1];
|
||||
int offset_max;
|
||||
unsigned int fetch_count_reset_broken : 1;
|
||||
unsigned int fetch_ignore_if_zero_rip : 1;
|
||||
struct cpu_perf_ibs __percpu *pcpu;
|
||||
|
||||
struct attribute **format_attrs;
|
||||
@ -663,6 +664,10 @@ fail:
|
||||
if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
|
||||
regs.flags &= ~PERF_EFLAGS_EXACT;
|
||||
} else {
|
||||
/* Workaround for erratum #1197 */
|
||||
if (perf_ibs->fetch_ignore_if_zero_rip && !(ibs_data.regs[1]))
|
||||
goto out;
|
||||
|
||||
set_linear_ip(®s, ibs_data.regs[1]);
|
||||
regs.flags |= PERF_EFLAGS_EXACT;
|
||||
}
|
||||
@ -756,6 +761,9 @@ static __init void perf_event_ibs_init(void)
|
||||
if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18)
|
||||
perf_ibs_fetch.fetch_count_reset_broken = 1;
|
||||
|
||||
if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10)
|
||||
perf_ibs_fetch.fetch_ignore_if_zero_rip = 1;
|
||||
|
||||
perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
|
||||
|
||||
if (ibs_caps & IBS_CAPS_OPCNT) {
|
||||
|
@ -18,8 +18,6 @@
|
||||
#include "../perf_event.h"
|
||||
#include "iommu.h"
|
||||
|
||||
#define COUNTER_SHIFT 16
|
||||
|
||||
/* iommu pmu conf masks */
|
||||
#define GET_CSOURCE(x) ((x)->conf & 0xFFULL)
|
||||
#define GET_DEVID(x) (((x)->conf >> 8) & 0xFFFFULL)
|
||||
@ -285,22 +283,31 @@ static void perf_iommu_start(struct perf_event *event, int flags)
|
||||
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
|
||||
hwc->state = 0;
|
||||
|
||||
/*
|
||||
* To account for power-gating, which prevents write to
|
||||
* the counter, we need to enable the counter
|
||||
* before setting up counter register.
|
||||
*/
|
||||
perf_iommu_enable_event(event);
|
||||
|
||||
if (flags & PERF_EF_RELOAD) {
|
||||
u64 prev_raw_count = local64_read(&hwc->prev_count);
|
||||
u64 count = 0;
|
||||
struct amd_iommu *iommu = perf_event_2_iommu(event);
|
||||
|
||||
/*
|
||||
* Since the IOMMU PMU only support counting mode,
|
||||
* the counter always start with value zero.
|
||||
*/
|
||||
amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
|
||||
IOMMU_PC_COUNTER_REG, &prev_raw_count);
|
||||
IOMMU_PC_COUNTER_REG, &count);
|
||||
}
|
||||
|
||||
perf_iommu_enable_event(event);
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
}
|
||||
|
||||
static void perf_iommu_read(struct perf_event *event)
|
||||
{
|
||||
u64 count, prev, delta;
|
||||
u64 count;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct amd_iommu *iommu = perf_event_2_iommu(event);
|
||||
|
||||
@ -311,14 +318,11 @@ static void perf_iommu_read(struct perf_event *event)
|
||||
/* IOMMU pc counter register is only 48 bits */
|
||||
count &= GENMASK_ULL(47, 0);
|
||||
|
||||
prev = local64_read(&hwc->prev_count);
|
||||
if (local64_cmpxchg(&hwc->prev_count, prev, count) != prev)
|
||||
return;
|
||||
|
||||
/* Handle 48-bit counter overflow */
|
||||
delta = (count << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
|
||||
delta >>= COUNTER_SHIFT;
|
||||
local64_add(delta, &event->count);
|
||||
/*
|
||||
* Since the counter always start with value zero,
|
||||
* simply just accumulate the count for the event.
|
||||
*/
|
||||
local64_add(count, &event->count);
|
||||
}
|
||||
|
||||
static void perf_iommu_stop(struct perf_event *event, int flags)
|
||||
@ -328,15 +332,16 @@ static void perf_iommu_stop(struct perf_event *event, int flags)
|
||||
if (hwc->state & PERF_HES_UPTODATE)
|
||||
return;
|
||||
|
||||
/*
|
||||
* To account for power-gating, in which reading the counter would
|
||||
* return zero, we need to read the register before disabling.
|
||||
*/
|
||||
perf_iommu_read(event);
|
||||
hwc->state |= PERF_HES_UPTODATE;
|
||||
|
||||
perf_iommu_disable_event(event);
|
||||
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
|
||||
hwc->state |= PERF_HES_STOPPED;
|
||||
|
||||
if (hwc->state & PERF_HES_UPTODATE)
|
||||
return;
|
||||
|
||||
perf_iommu_read(event);
|
||||
hwc->state |= PERF_HES_UPTODATE;
|
||||
}
|
||||
|
||||
static int perf_iommu_add(struct perf_event *event, int flags)
|
||||
|
@ -217,6 +217,7 @@ static struct pmu pmu_class = {
|
||||
.stop = pmu_event_stop,
|
||||
.read = pmu_event_read,
|
||||
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int power_cpu_exit(unsigned int cpu)
|
||||
|
@ -62,7 +62,7 @@ static struct pt_cap_desc {
|
||||
PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)),
|
||||
PT_CAP(output_subsys, 0, CPUID_ECX, BIT(3)),
|
||||
PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)),
|
||||
PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x3),
|
||||
PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x7),
|
||||
PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000),
|
||||
PT_CAP(cycle_thresholds, 1, CPUID_EBX, 0xffff),
|
||||
PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000),
|
||||
|
@ -388,10 +388,11 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
|
||||
},
|
||||
{ /* Handle problems with rebooting on the OptiPlex 990. */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Dell OptiPlex 990",
|
||||
.ident = "Dell OptiPlex 990 BIOS A0x",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
|
||||
DMI_MATCH(DMI_BIOS_VERSION, "A0"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on Dell 300's */
|
||||
|
@ -27,7 +27,7 @@ config XTENSA
|
||||
select HAVE_DMA_CONTIGUOUS
|
||||
select HAVE_EXIT_THREAD
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_FUTEX_CMPXCHG if !MMU
|
||||
select HAVE_FUTEX_CMPXCHG if !MMU && FUTEX
|
||||
select HAVE_HW_BREAKPOINT if PERF_EVENTS
|
||||
select HAVE_IRQ_TIME_ACCOUNTING
|
||||
select HAVE_OPROFILE
|
||||
|
@ -230,7 +230,7 @@ config BLK_DEV_LOOP_MIN_COUNT
|
||||
dynamically allocated with the /dev/loop-control interface.
|
||||
|
||||
config BLK_DEV_CRYPTOLOOP
|
||||
tristate "Cryptoloop Support"
|
||||
tristate "Cryptoloop Support (DEPRECATED)"
|
||||
select CRYPTO
|
||||
select CRYPTO_CBC
|
||||
depends on BLK_DEV_LOOP
|
||||
@ -242,7 +242,7 @@ config BLK_DEV_CRYPTOLOOP
|
||||
WARNING: This device is not safe for journaled file systems like
|
||||
ext3 or Reiserfs. Please use the Device Mapper crypto module
|
||||
instead, which can be configured to be on-disk compatible with the
|
||||
cryptoloop device.
|
||||
cryptoloop device. cryptoloop support will be removed in Linux 5.16.
|
||||
|
||||
source "drivers/block/drbd/Kconfig"
|
||||
|
||||
|
@ -189,6 +189,8 @@ init_cryptoloop(void)
|
||||
|
||||
if (rc)
|
||||
printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n");
|
||||
else
|
||||
pr_warn("the cryptoloop driver has been deprecated and will be removed in in Linux 5.16\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -585,21 +585,21 @@ static const struct ipu_rgb def_bgra_16 = {
|
||||
.bits_per_pixel = 16,
|
||||
};
|
||||
|
||||
#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
|
||||
#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
|
||||
(pix->width * ((y) / 2) / 2) + (x) / 2)
|
||||
#define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
|
||||
(pix->width * pix->height / 4) + \
|
||||
(pix->width * ((y) / 2) / 2) + (x) / 2)
|
||||
#define U2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
|
||||
(pix->width * (y) / 2) + (x) / 2)
|
||||
#define V2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
|
||||
(pix->width * pix->height / 2) + \
|
||||
(pix->width * (y) / 2) + (x) / 2)
|
||||
#define UV_OFFSET(pix, x, y) ((pix->width * pix->height) + \
|
||||
(pix->width * ((y) / 2)) + (x))
|
||||
#define UV2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
|
||||
(pix->width * y) + (x))
|
||||
#define Y_OFFSET(pix, x, y) ((x) + pix->bytesperline * (y))
|
||||
#define U_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
|
||||
(pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
|
||||
#define V_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
|
||||
(pix->bytesperline * pix->height / 4) + \
|
||||
(pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
|
||||
#define U2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
|
||||
(pix->bytesperline * (y) / 2) + (x) / 2)
|
||||
#define V2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
|
||||
(pix->bytesperline * pix->height / 2) + \
|
||||
(pix->bytesperline * (y) / 2) + (x) / 2)
|
||||
#define UV_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
|
||||
(pix->bytesperline * ((y) / 2)) + (x))
|
||||
#define UV2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
|
||||
(pix->bytesperline * y) + (x))
|
||||
|
||||
#define NUM_ALPHA_CHANNELS 7
|
||||
|
||||
|
@ -1346,7 +1346,7 @@ static int stk_camera_probe(struct usb_interface *interface,
|
||||
if (!dev->isoc_ep) {
|
||||
pr_err("Could not find isoc-in endpoint\n");
|
||||
err = -ENODEV;
|
||||
goto error;
|
||||
goto error_put;
|
||||
}
|
||||
dev->vsettings.palette = V4L2_PIX_FMT_RGB565;
|
||||
dev->vsettings.mode = MODE_VGA;
|
||||
@ -1359,10 +1359,12 @@ static int stk_camera_probe(struct usb_interface *interface,
|
||||
|
||||
err = stk_register_video_device(dev);
|
||||
if (err)
|
||||
goto error;
|
||||
goto error_put;
|
||||
|
||||
return 0;
|
||||
|
||||
error_put:
|
||||
usb_put_intf(interface);
|
||||
error:
|
||||
v4l2_ctrl_handler_free(hdl);
|
||||
v4l2_device_unregister(&dev->v4l2_dev);
|
||||
|
@ -275,6 +275,12 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
|
||||
|
||||
if (GEM_BFEXT(DMA_RXVALID, desc->addr)) {
|
||||
desc_ptp = macb_ptp_desc(bp, desc);
|
||||
/* Unlikely but check */
|
||||
if (!desc_ptp) {
|
||||
dev_warn_ratelimited(&bp->pdev->dev,
|
||||
"Timestamp not supported in BD\n");
|
||||
return;
|
||||
}
|
||||
gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
|
||||
memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
|
||||
shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
|
||||
@ -307,8 +313,11 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
|
||||
if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
desc_ptp = macb_ptp_desc(queue->bp, desc);
|
||||
/* Unlikely but check */
|
||||
if (!desc_ptp)
|
||||
return -EINVAL;
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
tx_timestamp = &queue->tx_timestamps[head];
|
||||
tx_timestamp->skb = skb;
|
||||
/* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
|
||||
|
@ -559,7 +559,12 @@ static int qed_enable_msix(struct qed_dev *cdev,
|
||||
rc = cnt;
|
||||
}
|
||||
|
||||
if (rc > 0) {
|
||||
/* For VFs, we should return with an error in case we didn't get the
|
||||
* exact number of msix vectors as we requested.
|
||||
* Not doing that will lead to a crash when starting queues for
|
||||
* this VF.
|
||||
*/
|
||||
if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) {
|
||||
/* MSI-x configuration was achieved */
|
||||
int_params->out.int_mode = QED_INT_MODE_MSIX;
|
||||
int_params->out.num_vectors = rc;
|
||||
|
@ -1773,6 +1773,7 @@ static void qede_sync_free_irqs(struct qede_dev *edev)
|
||||
}
|
||||
|
||||
edev->int_info.used_cnt = 0;
|
||||
edev->int_info.msix_cnt = 0;
|
||||
}
|
||||
|
||||
static int qede_req_msix_irqs(struct qede_dev *edev)
|
||||
@ -2317,7 +2318,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
|
||||
goto out;
|
||||
err4:
|
||||
qede_sync_free_irqs(edev);
|
||||
memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
|
||||
err3:
|
||||
qede_napi_disable_remove(edev);
|
||||
err2:
|
||||
|
@ -4713,6 +4713,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
|
||||
rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
|
||||
|
||||
rtl_pcie_state_l2l3_disable(tp);
|
||||
rtl_hw_aspm_clkreq_enable(tp, true);
|
||||
}
|
||||
|
||||
static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
|
||||
|
@ -939,10 +939,8 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
wmb();
|
||||
lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
|
||||
|
||||
if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
|
||||
netdev_info(ndev, "%s -> netif_stop_queue\n", __func__);
|
||||
if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
|
||||
netif_stop_queue(ndev);
|
||||
}
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
@ -3246,12 +3246,12 @@ static void fixup_mpss_256(struct pci_dev *dev)
|
||||
{
|
||||
dev->pcie_mpss = 1; /* 256 bytes */
|
||||
}
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
|
||||
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
|
||||
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
|
||||
PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
|
||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
|
||||
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
|
||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
|
||||
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
|
||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
|
||||
PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
|
||||
|
||||
/*
|
||||
* Intel 5000 and 5100 Memory controllers have an erratum with read completion
|
||||
|
@ -46,7 +46,8 @@ static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
|
||||
unsigned long id)
|
||||
{
|
||||
struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
|
||||
int val, err;
|
||||
int err;
|
||||
u32 val;
|
||||
|
||||
err = priv->eemi_ops->reset_get_status(ZYNQMP_RESET_ID + id, &val);
|
||||
if (err)
|
||||
|
@ -197,12 +197,13 @@ static void xhci_ring_dump_segment(struct seq_file *s,
|
||||
int i;
|
||||
dma_addr_t dma;
|
||||
union xhci_trb *trb;
|
||||
char str[XHCI_MSG_MAX];
|
||||
|
||||
for (i = 0; i < TRBS_PER_SEGMENT; i++) {
|
||||
trb = &seg->trbs[i];
|
||||
dma = seg->dma + i * sizeof(*trb);
|
||||
seq_printf(s, "%pad: %s\n", &dma,
|
||||
xhci_decode_trb(le32_to_cpu(trb->generic.field[0]),
|
||||
xhci_decode_trb(str, XHCI_MSG_MAX, le32_to_cpu(trb->generic.field[0]),
|
||||
le32_to_cpu(trb->generic.field[1]),
|
||||
le32_to_cpu(trb->generic.field[2]),
|
||||
le32_to_cpu(trb->generic.field[3])));
|
||||
@ -340,9 +341,10 @@ static int xhci_portsc_show(struct seq_file *s, void *unused)
|
||||
{
|
||||
struct xhci_port *port = s->private;
|
||||
u32 portsc;
|
||||
char str[XHCI_MSG_MAX];
|
||||
|
||||
portsc = readl(port->addr);
|
||||
seq_printf(s, "%s\n", xhci_decode_portsc(portsc));
|
||||
seq_printf(s, "%s\n", xhci_decode_portsc(str, portsc));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -134,6 +134,13 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd)
|
||||
const struct soc_device_attribute *attr;
|
||||
const char *firmware_name;
|
||||
|
||||
/*
|
||||
* According to the datasheet, "Upon the completion of FW Download,
|
||||
* there is no need to write or reload FW".
|
||||
*/
|
||||
if (readl(regs + RCAR_USB3_DL_CTRL) & RCAR_USB3_DL_CTRL_FW_SUCCESS)
|
||||
return 0;
|
||||
|
||||
attr = soc_device_match(rcar_quirks_match);
|
||||
if (attr)
|
||||
quirks = (uintptr_t)attr->data;
|
||||
|
@ -25,8 +25,6 @@
|
||||
#include "xhci.h"
|
||||
#include "xhci-dbgcap.h"
|
||||
|
||||
#define XHCI_MSG_MAX 500
|
||||
|
||||
DECLARE_EVENT_CLASS(xhci_log_msg,
|
||||
TP_PROTO(struct va_format *vaf),
|
||||
TP_ARGS(vaf),
|
||||
@ -122,6 +120,7 @@ DECLARE_EVENT_CLASS(xhci_log_trb,
|
||||
__field(u32, field1)
|
||||
__field(u32, field2)
|
||||
__field(u32, field3)
|
||||
__dynamic_array(char, str, XHCI_MSG_MAX)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->type = ring->type;
|
||||
@ -131,7 +130,7 @@ DECLARE_EVENT_CLASS(xhci_log_trb,
|
||||
__entry->field3 = le32_to_cpu(trb->field[3]);
|
||||
),
|
||||
TP_printk("%s: %s", xhci_ring_type_string(__entry->type),
|
||||
xhci_decode_trb(__entry->field0, __entry->field1,
|
||||
xhci_decode_trb(__get_str(str), XHCI_MSG_MAX, __entry->field0, __entry->field1,
|
||||
__entry->field2, __entry->field3)
|
||||
)
|
||||
);
|
||||
@ -523,6 +522,7 @@ DECLARE_EVENT_CLASS(xhci_log_portsc,
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, portnum)
|
||||
__field(u32, portsc)
|
||||
__dynamic_array(char, str, XHCI_MSG_MAX)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->portnum = portnum;
|
||||
@ -530,7 +530,7 @@ DECLARE_EVENT_CLASS(xhci_log_portsc,
|
||||
),
|
||||
TP_printk("port-%d: %s",
|
||||
__entry->portnum,
|
||||
xhci_decode_portsc(__entry->portsc)
|
||||
xhci_decode_portsc(__get_str(str), __entry->portsc)
|
||||
)
|
||||
);
|
||||
|
||||
|
@ -22,6 +22,9 @@
|
||||
#include "xhci-ext-caps.h"
|
||||
#include "pci-quirks.h"
|
||||
|
||||
/* max buffer size for trace and debug messages */
|
||||
#define XHCI_MSG_MAX 500
|
||||
|
||||
/* xHCI PCI Configuration Registers */
|
||||
#define XHCI_SBRN_OFFSET (0x60)
|
||||
|
||||
@ -2217,15 +2220,14 @@ static inline char *xhci_slot_state_string(u32 state)
|
||||
}
|
||||
}
|
||||
|
||||
static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
|
||||
u32 field3)
|
||||
static inline const char *xhci_decode_trb(char *str, size_t size,
|
||||
u32 field0, u32 field1, u32 field2, u32 field3)
|
||||
{
|
||||
static char str[256];
|
||||
int type = TRB_FIELD_TO_TYPE(field3);
|
||||
|
||||
switch (type) {
|
||||
case TRB_LINK:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"LINK %08x%08x intr %d type '%s' flags %c:%c:%c:%c",
|
||||
field1, field0, GET_INTR_TARGET(field2),
|
||||
xhci_trb_type_string(type),
|
||||
@ -2242,7 +2244,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
|
||||
case TRB_HC_EVENT:
|
||||
case TRB_DEV_NOTE:
|
||||
case TRB_MFINDEX_WRAP:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"TRB %08x%08x status '%s' len %d slot %d ep %d type '%s' flags %c:%c",
|
||||
field1, field0,
|
||||
xhci_trb_comp_code_string(GET_COMP_CODE(field2)),
|
||||
@ -2255,7 +2257,8 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
|
||||
|
||||
break;
|
||||
case TRB_SETUP:
|
||||
sprintf(str, "bRequestType %02x bRequest %02x wValue %02x%02x wIndex %02x%02x wLength %d length %d TD size %d intr %d type '%s' flags %c:%c:%c",
|
||||
snprintf(str, size,
|
||||
"bRequestType %02x bRequest %02x wValue %02x%02x wIndex %02x%02x wLength %d length %d TD size %d intr %d type '%s' flags %c:%c:%c",
|
||||
field0 & 0xff,
|
||||
(field0 & 0xff00) >> 8,
|
||||
(field0 & 0xff000000) >> 24,
|
||||
@ -2272,7 +2275,8 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
|
||||
field3 & TRB_CYCLE ? 'C' : 'c');
|
||||
break;
|
||||
case TRB_DATA:
|
||||
sprintf(str, "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c",
|
||||
snprintf(str, size,
|
||||
"Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c",
|
||||
field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
|
||||
GET_INTR_TARGET(field2),
|
||||
xhci_trb_type_string(type),
|
||||
@ -2285,7 +2289,8 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
|
||||
field3 & TRB_CYCLE ? 'C' : 'c');
|
||||
break;
|
||||
case TRB_STATUS:
|
||||
sprintf(str, "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c",
|
||||
snprintf(str, size,
|
||||
"Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c",
|
||||
field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
|
||||
GET_INTR_TARGET(field2),
|
||||
xhci_trb_type_string(type),
|
||||
@ -2298,7 +2303,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
|
||||
case TRB_ISOC:
|
||||
case TRB_EVENT_DATA:
|
||||
case TRB_TR_NOOP:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c:%c",
|
||||
field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
|
||||
GET_INTR_TARGET(field2),
|
||||
@ -2315,21 +2320,21 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
|
||||
|
||||
case TRB_CMD_NOOP:
|
||||
case TRB_ENABLE_SLOT:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"%s: flags %c",
|
||||
xhci_trb_type_string(type),
|
||||
field3 & TRB_CYCLE ? 'C' : 'c');
|
||||
break;
|
||||
case TRB_DISABLE_SLOT:
|
||||
case TRB_NEG_BANDWIDTH:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"%s: slot %d flags %c",
|
||||
xhci_trb_type_string(type),
|
||||
TRB_TO_SLOT_ID(field3),
|
||||
field3 & TRB_CYCLE ? 'C' : 'c');
|
||||
break;
|
||||
case TRB_ADDR_DEV:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"%s: ctx %08x%08x slot %d flags %c:%c",
|
||||
xhci_trb_type_string(type),
|
||||
field1, field0,
|
||||
@ -2338,7 +2343,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
|
||||
field3 & TRB_CYCLE ? 'C' : 'c');
|
||||
break;
|
||||
case TRB_CONFIG_EP:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"%s: ctx %08x%08x slot %d flags %c:%c",
|
||||
xhci_trb_type_string(type),
|
||||
field1, field0,
|
||||
@ -2347,7 +2352,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
|
||||
field3 & TRB_CYCLE ? 'C' : 'c');
|
||||
break;
|
||||
case TRB_EVAL_CONTEXT:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"%s: ctx %08x%08x slot %d flags %c",
|
||||
xhci_trb_type_string(type),
|
||||
field1, field0,
|
||||
@ -2355,7 +2360,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
|
||||
field3 & TRB_CYCLE ? 'C' : 'c');
|
||||
break;
|
||||
case TRB_RESET_EP:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"%s: ctx %08x%08x slot %d ep %d flags %c:%c",
|
||||
xhci_trb_type_string(type),
|
||||
field1, field0,
|
||||
@ -2376,7 +2381,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
|
||||
field3 & TRB_CYCLE ? 'C' : 'c');
|
||||
break;
|
||||
case TRB_SET_DEQ:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"%s: deq %08x%08x stream %d slot %d ep %d flags %c",
|
||||
xhci_trb_type_string(type),
|
||||
field1, field0,
|
||||
@ -2387,14 +2392,14 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
|
||||
field3 & TRB_CYCLE ? 'C' : 'c');
|
||||
break;
|
||||
case TRB_RESET_DEV:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"%s: slot %d flags %c",
|
||||
xhci_trb_type_string(type),
|
||||
TRB_TO_SLOT_ID(field3),
|
||||
field3 & TRB_CYCLE ? 'C' : 'c');
|
||||
break;
|
||||
case TRB_FORCE_EVENT:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"%s: event %08x%08x vf intr %d vf id %d flags %c",
|
||||
xhci_trb_type_string(type),
|
||||
field1, field0,
|
||||
@ -2403,14 +2408,14 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
|
||||
field3 & TRB_CYCLE ? 'C' : 'c');
|
||||
break;
|
||||
case TRB_SET_LT:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"%s: belt %d flags %c",
|
||||
xhci_trb_type_string(type),
|
||||
TRB_TO_BELT(field3),
|
||||
field3 & TRB_CYCLE ? 'C' : 'c');
|
||||
break;
|
||||
case TRB_GET_BW:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"%s: ctx %08x%08x slot %d speed %d flags %c",
|
||||
xhci_trb_type_string(type),
|
||||
field1, field0,
|
||||
@ -2419,7 +2424,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
|
||||
field3 & TRB_CYCLE ? 'C' : 'c');
|
||||
break;
|
||||
case TRB_FORCE_HEADER:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"%s: info %08x%08x%08x pkt type %d roothub port %d flags %c",
|
||||
xhci_trb_type_string(type),
|
||||
field2, field1, field0 & 0xffffffe0,
|
||||
@ -2428,7 +2433,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
|
||||
field3 & TRB_CYCLE ? 'C' : 'c');
|
||||
break;
|
||||
default:
|
||||
sprintf(str,
|
||||
snprintf(str, size,
|
||||
"type '%s' -> raw %08x %08x %08x %08x",
|
||||
xhci_trb_type_string(type),
|
||||
field0, field1, field2, field3);
|
||||
@ -2553,9 +2558,8 @@ static inline const char *xhci_portsc_link_state_string(u32 portsc)
|
||||
return "Unknown";
|
||||
}
|
||||
|
||||
static inline const char *xhci_decode_portsc(u32 portsc)
|
||||
static inline const char *xhci_decode_portsc(char *str, u32 portsc)
|
||||
{
|
||||
static char str[256];
|
||||
int ret;
|
||||
|
||||
ret = sprintf(str, "%s %s %s Link:%s PortSpeed:%d ",
|
||||
|
@ -72,14 +72,12 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
|
||||
u32 interval = 0;
|
||||
u32 mult = 0;
|
||||
u32 burst = 0;
|
||||
int max_packet;
|
||||
int ret;
|
||||
|
||||
desc = mep->desc;
|
||||
comp_desc = mep->comp_desc;
|
||||
mep->type = usb_endpoint_type(desc);
|
||||
max_packet = usb_endpoint_maxp(desc);
|
||||
mep->maxp = max_packet & GENMASK(10, 0);
|
||||
mep->maxp = usb_endpoint_maxp(desc);
|
||||
|
||||
switch (mtu->g.speed) {
|
||||
case USB_SPEED_SUPER:
|
||||
@ -100,7 +98,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
|
||||
usb_endpoint_xfer_int(desc)) {
|
||||
interval = desc->bInterval;
|
||||
interval = clamp_val(interval, 1, 16) - 1;
|
||||
burst = (max_packet & GENMASK(12, 11)) >> 11;
|
||||
mult = usb_endpoint_maxp_mult(desc) - 1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -226,8 +226,10 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
|
||||
int status;
|
||||
|
||||
buf = kmalloc(1, GFP_KERNEL);
|
||||
if (!buf)
|
||||
if (!buf) {
|
||||
*data = 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
status = usb_control_msg(usbdev, pipe, request, requesttype, value,
|
||||
index, buf, 1, MOS_WDR_TIMEOUT);
|
||||
|
@ -543,7 +543,7 @@ again:
|
||||
* inode has not been flagged as nocompress. This flag can
|
||||
* change at any time if we discover bad compression ratios.
|
||||
*/
|
||||
if (nr_pages > 1 && inode_need_compress(inode, start, end)) {
|
||||
if (inode_need_compress(inode, start, end)) {
|
||||
WARN_ON(pages);
|
||||
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
|
||||
if (!pages) {
|
||||
|
@ -349,3 +349,47 @@ err_kfree:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fscrypt_get_symlink);
|
||||
|
||||
/**
|
||||
* fscrypt_symlink_getattr() - set the correct st_size for encrypted symlinks
|
||||
* @path: the path for the encrypted symlink being queried
|
||||
* @stat: the struct being filled with the symlink's attributes
|
||||
*
|
||||
* Override st_size of encrypted symlinks to be the length of the decrypted
|
||||
* symlink target (or the no-key encoded symlink target, if the key is
|
||||
* unavailable) rather than the length of the encrypted symlink target. This is
|
||||
* necessary for st_size to match the symlink target that userspace actually
|
||||
* sees. POSIX requires this, and some userspace programs depend on it.
|
||||
*
|
||||
* This requires reading the symlink target from disk if needed, setting up the
|
||||
* inode's encryption key if possible, and then decrypting or encoding the
|
||||
* symlink target. This makes lstat() more heavyweight than is normally the
|
||||
* case. However, decrypted symlink targets will be cached in ->i_link, so
|
||||
* usually the symlink won't have to be read and decrypted again later if/when
|
||||
* it is actually followed, readlink() is called, or lstat() is called again.
|
||||
*
|
||||
* Return: 0 on success, -errno on failure
|
||||
*/
|
||||
int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat)
|
||||
{
|
||||
struct dentry *dentry = path->dentry;
|
||||
struct inode *inode = d_inode(dentry);
|
||||
const char *link;
|
||||
DEFINE_DELAYED_CALL(done);
|
||||
|
||||
/*
|
||||
* To get the symlink target that userspace will see (whether it's the
|
||||
* decrypted target or the no-key encoded target), we can just get it in
|
||||
* the same way the VFS does during path resolution and readlink().
|
||||
*/
|
||||
link = READ_ONCE(inode->i_link);
|
||||
if (!link) {
|
||||
link = inode->i_op->get_link(dentry, inode, &done);
|
||||
if (IS_ERR(link))
|
||||
return PTR_ERR(link);
|
||||
}
|
||||
stat->size = strlen(link);
|
||||
do_delayed_call(&done);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fscrypt_symlink_getattr);
|
||||
|
@ -764,6 +764,12 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
|
||||
ext4_write_lock_xattr(inode, &no_expand);
|
||||
BUG_ON(!ext4_has_inline_data(inode));
|
||||
|
||||
/*
|
||||
* ei->i_inline_off may have changed since ext4_write_begin()
|
||||
* called ext4_try_to_write_inline_data()
|
||||
*/
|
||||
(void) ext4_find_inline_data_nolock(inode);
|
||||
|
||||
kaddr = kmap_atomic(page);
|
||||
ext4_write_inline_data(inode, &iloc, kaddr, pos, len);
|
||||
kunmap_atomic(kaddr);
|
||||
|
@ -52,10 +52,19 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
|
||||
return paddr;
|
||||
}
|
||||
|
||||
static int ext4_encrypted_symlink_getattr(const struct path *path,
|
||||
struct kstat *stat, u32 request_mask,
|
||||
unsigned int query_flags)
|
||||
{
|
||||
ext4_getattr(path, stat, request_mask, query_flags);
|
||||
|
||||
return fscrypt_symlink_getattr(path, stat);
|
||||
}
|
||||
|
||||
const struct inode_operations ext4_encrypted_symlink_inode_operations = {
|
||||
.get_link = ext4_encrypted_get_link,
|
||||
.setattr = ext4_setattr,
|
||||
.getattr = ext4_getattr,
|
||||
.getattr = ext4_encrypted_symlink_getattr,
|
||||
.listxattr = ext4_listxattr,
|
||||
};
|
||||
|
||||
|
@ -1293,9 +1293,18 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
|
||||
return target;
|
||||
}
|
||||
|
||||
static int f2fs_encrypted_symlink_getattr(const struct path *path,
|
||||
struct kstat *stat, u32 request_mask,
|
||||
unsigned int query_flags)
|
||||
{
|
||||
f2fs_getattr(path, stat, request_mask, query_flags);
|
||||
|
||||
return fscrypt_symlink_getattr(path, stat);
|
||||
}
|
||||
|
||||
const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
|
||||
.get_link = f2fs_encrypted_get_link,
|
||||
.getattr = f2fs_getattr,
|
||||
.getattr = f2fs_encrypted_symlink_getattr,
|
||||
.setattr = f2fs_setattr,
|
||||
.listxattr = f2fs_listxattr,
|
||||
};
|
||||
|
@ -1629,6 +1629,16 @@ static const char *ubifs_get_link(struct dentry *dentry,
|
||||
return fscrypt_get_symlink(inode, ui->data, ui->data_len, done);
|
||||
}
|
||||
|
||||
static int ubifs_symlink_getattr(const struct path *path, struct kstat *stat,
|
||||
u32 request_mask, unsigned int query_flags)
|
||||
{
|
||||
ubifs_getattr(path, stat, request_mask, query_flags);
|
||||
|
||||
if (IS_ENCRYPTED(d_inode(path->dentry)))
|
||||
return fscrypt_symlink_getattr(path, stat);
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct address_space_operations ubifs_file_address_operations = {
|
||||
.readpage = ubifs_readpage,
|
||||
.writepage = ubifs_writepage,
|
||||
@ -1654,7 +1664,7 @@ const struct inode_operations ubifs_file_inode_operations = {
|
||||
const struct inode_operations ubifs_symlink_inode_operations = {
|
||||
.get_link = ubifs_get_link,
|
||||
.setattr = ubifs_setattr,
|
||||
.getattr = ubifs_getattr,
|
||||
.getattr = ubifs_symlink_getattr,
|
||||
#ifdef CONFIG_UBIFS_FS_XATTR
|
||||
.listxattr = ubifs_listxattr,
|
||||
#endif
|
||||
|
@ -262,6 +262,7 @@ int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
|
||||
const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
|
||||
unsigned int max_size,
|
||||
struct delayed_call *done);
|
||||
int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat);
|
||||
static inline void fscrypt_set_ops(struct super_block *sb,
|
||||
const struct fscrypt_operations *s_cop)
|
||||
{
|
||||
@ -573,6 +574,12 @@ static inline const char *fscrypt_get_symlink(struct inode *inode,
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static inline int fscrypt_symlink_getattr(const struct path *path,
|
||||
struct kstat *stat)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void fscrypt_set_ops(struct super_block *sb,
|
||||
const struct fscrypt_operations *s_cop)
|
||||
{
|
||||
|
@ -77,6 +77,25 @@ static inline struct kthread *to_kthread(struct task_struct *k)
|
||||
return (__force void *)k->set_child_tid;
|
||||
}
|
||||
|
||||
/*
|
||||
* Variant of to_kthread() that doesn't assume @p is a kthread.
|
||||
*
|
||||
* Per construction; when:
|
||||
*
|
||||
* (p->flags & PF_KTHREAD) && p->set_child_tid
|
||||
*
|
||||
* the task is both a kthread and struct kthread is persistent. However
|
||||
* PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
|
||||
* begin_new_exec()).
|
||||
*/
|
||||
static inline struct kthread *__to_kthread(struct task_struct *p)
|
||||
{
|
||||
void *kthread = (__force void *)p->set_child_tid;
|
||||
if (kthread && !(p->flags & PF_KTHREAD))
|
||||
kthread = NULL;
|
||||
return kthread;
|
||||
}
|
||||
|
||||
void free_kthread_struct(struct task_struct *k)
|
||||
{
|
||||
struct kthread *kthread;
|
||||
@ -177,10 +196,11 @@ void *kthread_data(struct task_struct *task)
|
||||
*/
|
||||
void *kthread_probe_data(struct task_struct *task)
|
||||
{
|
||||
struct kthread *kthread = to_kthread(task);
|
||||
struct kthread *kthread = __to_kthread(task);
|
||||
void *data = NULL;
|
||||
|
||||
probe_kernel_read(&data, &kthread->data, sizeof(data));
|
||||
if (kthread)
|
||||
probe_kernel_read(&data, &kthread->data, sizeof(data));
|
||||
return data;
|
||||
}
|
||||
|
||||
@ -491,9 +511,9 @@ void kthread_set_per_cpu(struct task_struct *k, int cpu)
|
||||
set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
|
||||
}
|
||||
|
||||
bool kthread_is_per_cpu(struct task_struct *k)
|
||||
bool kthread_is_per_cpu(struct task_struct *p)
|
||||
{
|
||||
struct kthread *kthread = to_kthread(k);
|
||||
struct kthread *kthread = __to_kthread(p);
|
||||
if (!kthread)
|
||||
return false;
|
||||
|
||||
@ -1296,11 +1316,9 @@ EXPORT_SYMBOL(kthread_destroy_worker);
|
||||
*/
|
||||
void kthread_associate_blkcg(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct kthread *kthread;
|
||||
struct kthread *kthread = __to_kthread(current);
|
||||
|
||||
|
||||
if (!(current->flags & PF_KTHREAD))
|
||||
return;
|
||||
kthread = to_kthread(current);
|
||||
if (!kthread)
|
||||
return;
|
||||
|
||||
@ -1322,13 +1340,10 @@ EXPORT_SYMBOL(kthread_associate_blkcg);
|
||||
*/
|
||||
struct cgroup_subsys_state *kthread_blkcg(void)
|
||||
{
|
||||
struct kthread *kthread;
|
||||
struct kthread *kthread = __to_kthread(current);
|
||||
|
||||
if (current->flags & PF_KTHREAD) {
|
||||
kthread = to_kthread(current);
|
||||
if (kthread)
|
||||
return kthread->blkcg_css;
|
||||
}
|
||||
if (kthread)
|
||||
return kthread->blkcg_css;
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(kthread_blkcg);
|
||||
|
@ -7445,7 +7445,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
||||
return 0;
|
||||
|
||||
/* Disregard pcpu kthreads; they are where they need to be. */
|
||||
if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))
|
||||
if (kthread_is_per_cpu(p))
|
||||
return 0;
|
||||
|
||||
if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
|
||||
|
@ -918,7 +918,7 @@ static inline void __free_one_page(struct page *page,
|
||||
unsigned int max_order;
|
||||
struct capture_control *capc = task_capc(zone);
|
||||
|
||||
max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
|
||||
max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order);
|
||||
|
||||
VM_BUG_ON(!zone_is_initialized(zone));
|
||||
VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
|
||||
@ -931,7 +931,7 @@ static inline void __free_one_page(struct page *page,
|
||||
VM_BUG_ON_PAGE(bad_range(zone, page), page);
|
||||
|
||||
continue_merging:
|
||||
while (order < max_order - 1) {
|
||||
while (order < max_order) {
|
||||
if (compaction_capture(capc, page, order, migratetype)) {
|
||||
__mod_zone_freepage_state(zone, -(1 << order),
|
||||
migratetype);
|
||||
@ -957,7 +957,7 @@ continue_merging:
|
||||
pfn = combined_pfn;
|
||||
order++;
|
||||
}
|
||||
if (max_order < MAX_ORDER) {
|
||||
if (order < MAX_ORDER - 1) {
|
||||
/* If we are here, it means order is >= pageblock_order.
|
||||
* We want to prevent merge between freepages on isolate
|
||||
* pageblock and normal pageblock. Without this, pageblock
|
||||
@ -978,7 +978,7 @@ continue_merging:
|
||||
is_migrate_isolate(buddy_mt)))
|
||||
goto done_merging;
|
||||
}
|
||||
max_order++;
|
||||
max_order = order + 1;
|
||||
goto continue_merging;
|
||||
}
|
||||
|
||||
|
@ -460,6 +460,23 @@ out_bh_enable:
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* The device used for looking up which routing table to use for sending an ICMP
|
||||
* error is preferably the source whenever it is set, which should ensure the
|
||||
* icmp error can be sent to the source host, else lookup using the routing
|
||||
* table of the destination device, else use the main routing table (index 0).
|
||||
*/
|
||||
static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *route_lookup_dev = NULL;
|
||||
|
||||
if (skb->dev)
|
||||
route_lookup_dev = skb->dev;
|
||||
else if (skb_dst(skb))
|
||||
route_lookup_dev = skb_dst(skb)->dev;
|
||||
return route_lookup_dev;
|
||||
}
|
||||
|
||||
static struct rtable *icmp_route_lookup(struct net *net,
|
||||
struct flowi4 *fl4,
|
||||
struct sk_buff *skb_in,
|
||||
@ -468,6 +485,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
|
||||
int type, int code,
|
||||
struct icmp_bxm *param)
|
||||
{
|
||||
struct net_device *route_lookup_dev;
|
||||
struct rtable *rt, *rt2;
|
||||
struct flowi4 fl4_dec;
|
||||
int err;
|
||||
@ -482,7 +500,8 @@ static struct rtable *icmp_route_lookup(struct net *net,
|
||||
fl4->flowi4_proto = IPPROTO_ICMP;
|
||||
fl4->fl4_icmp_type = type;
|
||||
fl4->fl4_icmp_code = code;
|
||||
fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev);
|
||||
route_lookup_dev = icmp_get_route_lookup_dev(skb_in);
|
||||
fl4->flowi4_oif = l3mdev_master_ifindex(route_lookup_dev);
|
||||
|
||||
security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
|
||||
rt = ip_route_output_key_hash(net, fl4, skb_in);
|
||||
@ -506,7 +525,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
|
||||
if (err)
|
||||
goto relookup_failed;
|
||||
|
||||
if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev,
|
||||
if (inet_addr_type_dev_table(net, route_lookup_dev,
|
||||
fl4_dec.saddr) == RTN_LOCAL) {
|
||||
rt2 = __ip_route_output_key(net, &fl4_dec);
|
||||
if (IS_ERR(rt2))
|
||||
|
@ -2723,6 +2723,7 @@ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u
|
||||
rv = 1;
|
||||
} else if (im) {
|
||||
if (src_addr) {
|
||||
spin_lock_bh(&im->lock);
|
||||
for (psf = im->sources; psf; psf = psf->sf_next) {
|
||||
if (psf->sf_inaddr == src_addr)
|
||||
break;
|
||||
@ -2733,6 +2734,7 @@ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u
|
||||
im->sfcount[MCAST_EXCLUDE];
|
||||
else
|
||||
rv = im->sfcount[MCAST_EXCLUDE] != 0;
|
||||
spin_unlock_bh(&im->lock);
|
||||
} else
|
||||
rv = 1; /* unspecified source; tentatively allow */
|
||||
}
|
||||
|
@ -1736,7 +1736,7 @@ static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
|
||||
channels = params_channels(params);
|
||||
frame_size = snd_pcm_format_size(format, channels);
|
||||
if (frame_size > 0)
|
||||
params->fifo_size /= (unsigned)frame_size;
|
||||
params->fifo_size /= frame_size;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -9160,6 +9160,16 @@ static int patch_alc269(struct hda_codec *codec)
|
||||
|
||||
snd_hda_pick_fixup(codec, alc269_fixup_models,
|
||||
alc269_fixup_tbl, alc269_fixups);
|
||||
/* FIXME: both TX300 and ROG Strix G17 have the same SSID, and
|
||||
* the quirk breaks the latter (bko#214101).
|
||||
* Clear the wrong entry.
|
||||
*/
|
||||
if (codec->fixup_id == ALC282_FIXUP_ASUS_TX300 &&
|
||||
codec->core.vendor_id == 0x10ec0294) {
|
||||
codec_dbg(codec, "Clear wrong fixup for ASUS ROG Strix G17\n");
|
||||
codec->fixup_id = HDA_FIXUP_ID_NOT_SET;
|
||||
}
|
||||
|
||||
snd_hda_pick_pin_fixup(codec, alc269_pin_fixup_tbl, alc269_fixups, true);
|
||||
snd_hda_pick_pin_fixup(codec, alc269_fallback_pin_fixup_tbl, alc269_fixups, false);
|
||||
snd_hda_pick_fixup(codec, NULL, alc269_fixup_vendor_tbl,
|
||||
|
@ -1838,6 +1838,7 @@ static const struct registration_quirk registration_quirks[] = {
|
||||
REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */
|
||||
REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */
|
||||
REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2), /* JBL Quantum 600 */
|
||||
REG_QUIRK_ENTRY(0x0ecb, 0x1f47, 2), /* JBL Quantum 800 */
|
||||
REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2), /* JBL Quantum 400 */
|
||||
REG_QUIRK_ENTRY(0x0ecb, 0x203c, 2), /* JBL Quantum 600 */
|
||||
REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2), /* JBL Quantum 800 */
|
||||
|
Loading…
Reference in New Issue
Block a user