This is the 5.4.76 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl+qe5YACgkQONu9yGCS
 aT6bAw//VGKqKOUOva6147u3U98FFBuYMJnZwZIxqvX4PFJnSwqKmsLUoCI8bhJV
 UJ+lbbBvyNbe2DS1+YkhlHTC15U7dHIWtSM4/FC7rvgTuvjAj4epqDDu5IkOoK4W
 Pil+zV1fwnwHrcuBbb5Ydk+mS3I/sVjObAQygluQPt1D2xESkyITq/uT9Lal0hRy
 fbyfUNYrhf4Bdeyfgzr7sEDrorgzQJ+7NBDR5NTzn0j0gph4hhe1z5FWmy8jEPXM
 kKy39nTrCu5hQhEL7L0G29ZLb0s8mhMM9B7OyKHCALtdc6VqwC3WFZqkwrr/cInQ
 bDuuBMngRe+n/A5xVMmsnjFyR+znXg82HYQuqrBJ1w3S4pbV+j0dcVJ9PiusyYdR
 n81HCakatyIq9Oe64yHKIlbxslkfgUjJX+uR4LfNS7iC4ad5fV/BwdCs0z0v2oOH
 o38e5V/qQFiI442+BR6fPagYEpHxJAlteZTpdUteYUBTpQ97v76K/10fqLdGc07s
 vevP4T2t3Z1qtswY5VbU2jOkNilgnOlqIw+VSzSXp4N8jcF+TEgtSB/X18eX69oy
 wQ8+aJzNjWCOFfqbYpS+1X2X/eVzBdBrQ8rk/FMKJ0Edxwm3YpoAqHb6copODzaZ
 cBwCyhbJbHeYpbzgJkkAJEZKffy6XWmwVqtYoi52HZNB1A5ipIA=
 =Cjfz
 -----END PGP SIGNATURE-----

Merge 5.4.76 into android11-5.4-lts

Changes in 5.4.76
	drm/i915: Break up error capture compression loops with cond_resched()
	drm/i915/gt: Delay execlist processing for tgl
	drm/i915: Drop runtime-pm assert from vgpu io accessors
	ASoC: Intel: Skylake: Add alternative topology binary name
	linkage: Introduce new macros for assembler symbols
	arm64: asm: Add new-style position independent function annotations
	arm64: lib: Use modern annotations for assembly functions
	arm64: Change .weak to SYM_FUNC_START_WEAK_PI for arch/arm64/lib/mem*.S
	tipc: fix use-after-free in tipc_bcast_get_mode
	ptrace: fix task_join_group_stop() for the case when current is traced
	cadence: force nonlinear buffers to be cloned
	chelsio/chtls: fix memory leaks caused by a race
	chelsio/chtls: fix always leaking ctrl_skb
	gianfar: Replace skb_realloc_headroom with skb_cow_head for PTP
	gianfar: Account for Tx PTP timestamp in the skb headroom
	ionic: check port ptr before use
	ip_tunnel: fix over-mtu packet send fail without TUNNEL_DONT_FRAGMENT flags
	net: usb: qmi_wwan: add Telit LE910Cx 0x1230 composition
	powerpc/vnic: Extend "failover pending" window
	sctp: Fix COMM_LOST/CANT_STR_ASSOC err reporting on big-endian platforms
	sfp: Fix error handing in sfp_probe()
	Fonts: Replace discarded const qualifier
	ALSA: hda/realtek - Fixed HP headset Mic can't be detected
	ALSA: hda/realtek - Enable headphone for ASUS TM420
	ALSA: usb-audio: Add implicit feedback quirk for Zoom UAC-2
	ALSA: usb-audio: add usb vendor id as DSD-capable for Khadas devices
	ALSA: usb-audio: Add implicit feedback quirk for Qu-16
	ALSA: usb-audio: Add implicit feedback quirk for MODX
	mm: mempolicy: fix potential pte_unmap_unlock pte error
	lib/crc32test: remove extra local_irq_disable/enable
	kthread_worker: prevent queuing delayed work from timer_fn when it is being canceled
	mm: always have io_remap_pfn_range() set pgprot_decrypted()
	gfs2: Wake up when sd_glock_disposal becomes zero
	ring-buffer: Fix recursion protection transitions between interrupt context
	mtd: spi-nor: Don't copy self-pointing struct around
	ftrace: Fix recursion check for NMI test
	ftrace: Handle tracing when switching between context
	regulator: defer probe when trying to get voltage from unresolved supply
	spi: bcm2835: fix gpio cs level inversion
	tracing: Fix out of bounds write in get_trace_buf
	futex: Handle transient "ownerless" rtmutex state correctly
	ARM: dts: sun4i-a10: fix cpu_alert temperature
	arm64: dts: meson: add missing g12 rng clock
	x86/kexec: Use up-to-dated screen_info copy to fill boot params
	of: Fix reserved-memory overlap detection
	drm/sun4i: frontend: Rework a bit the phase data
	drm/sun4i: frontend: Reuse the ch0 phase for RGB formats
	drm/sun4i: frontend: Fix the scaler phase on A33
	blk-cgroup: Fix memleak on error path
	blk-cgroup: Pre-allocate tree node on blkg_conf_prep
	scsi: core: Don't start concurrent async scan on same host
	drm/amdgpu: add DID for navi10 blockchain SKU
	scsi: ibmvscsi: Fix potential race after loss of transport
	vsock: use ns_capable_noaudit() on socket create
	nvme-rdma: handle unexpected nvme completion data length
	nvmet: fix a NULL pointer dereference when tracing the flush command
	drm/vc4: drv: Add error handding for bind
	ACPI: NFIT: Fix comparison to '-ENXIO'
	usb: cdns3: gadget: suspicious implicit sign extension
	drm/nouveau/nouveau: fix the start/end range for migration
	drm/nouveau/gem: fix "refcount_t: underflow; use-after-free"
	arm64/smp: Move rcu_cpu_starting() earlier
	Revert "coresight: Make sysfs functional on topologies with per core sink"
	vt: Disable KD_FONT_OP_COPY
	fork: fix copy_process(CLONE_PARENT) race with the exiting ->real_parent
	s390/pkey: fix paes selftest failure with paes and pkey static build
	serial: 8250_mtk: Fix uart_get_baud_rate warning
	serial: txx9: add missing platform_driver_unregister() on error in serial_txx9_init
	USB: serial: cyberjack: fix write-URB completion race
	USB: serial: option: add Quectel EC200T module support
	USB: serial: option: add LE910Cx compositions 0x1203, 0x1230, 0x1231
	USB: serial: option: add Telit FN980 composition 0x1055
	tty: serial: fsl_lpuart: add LS1028A support
	tty: serial: fsl_lpuart: LS1021A has a FIFO size of 16 words, like LS1028A
	usb: dwc3: ep0: Fix delay status handling
	USB: Add NO_LPM quirk for Kingston flash drive
	usb: mtu3: fix panic in mtu3_gadget_stop()
	drm/panfrost: Fix a deadlock between the shrinker and madvise path
	ARC: stack unwinding: avoid indefinite looping
	PM: runtime: Drop runtime PM references to supplier on link removal
	PM: runtime: Drop pm_runtime_clean_up_links()
	PM: runtime: Resume the device earlier in __device_release_driver()
	xfs: flush for older, xfs specific ioctls
	perf/core: Fix a memory leak in perf_event_parse_addr_filter()
	arm64: dts: marvell: espressobin: Add ethernet switch aliases
	Linux 5.4.76

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I9e122c9984f0a80613577d71e59101a97e92c1a4
This commit is contained in:
Greg Kroah-Hartman 2020-11-10 15:10:23 +01:00
commit 0e161a13b1
115 changed files with 1131 additions and 397 deletions

View File

@ -0,0 +1,216 @@
Assembler Annotations
=====================
Copyright (c) 2017-2019 Jiri Slaby
This document describes the new macros for annotation of data and code in
assembly. In particular, it contains information about ``SYM_FUNC_START``,
``SYM_FUNC_END``, ``SYM_CODE_START``, and similar.
Rationale
---------
Some code like entries, trampolines, or boot code needs to be written in
assembly. The same as in C, such code is grouped into functions and
accompanied with data. Standard assemblers do not force users into precisely
marking these pieces as code, data, or even specifying their length.
Nevertheless, assemblers provide developers with such annotations to aid
debuggers throughout assembly. On top of that, developers also want to mark
some functions as *global* in order to be visible outside of their translation
units.
Over time, the Linux kernel has adopted macros from various projects (like
``binutils``) to facilitate such annotations. So for historic reasons,
developers have been using ``ENTRY``, ``END``, ``ENDPROC``, and other
annotations in assembly. Due to the lack of their documentation, the macros
are used in rather wrong contexts at some locations. Clearly, ``ENTRY`` was
intended to denote the beginning of global symbols (be it data or code).
``END`` used to mark the end of data or end of special functions with
*non-standard* calling convention. In contrast, ``ENDPROC`` should annotate
only ends of *standard* functions.
When these macros are used correctly, they help assemblers generate a nice
object with both sizes and types set correctly. For example, the result of
``arch/x86/lib/putuser.S``::
Num: Value Size Type Bind Vis Ndx Name
25: 0000000000000000 33 FUNC GLOBAL DEFAULT 1 __put_user_1
29: 0000000000000030 37 FUNC GLOBAL DEFAULT 1 __put_user_2
32: 0000000000000060 36 FUNC GLOBAL DEFAULT 1 __put_user_4
35: 0000000000000090 37 FUNC GLOBAL DEFAULT 1 __put_user_8
This is not only important for debugging purposes. When there are properly
annotated objects like this, tools can be run on them to generate more useful
information. In particular, on properly annotated objects, ``objtool`` can be
run to check and fix the object if needed. Currently, ``objtool`` can report
missing frame pointer setup/destruction in functions. It can also
automatically generate annotations for :doc:`ORC unwinder <x86/orc-unwinder>`
for most code. Both of these are especially important to support reliable
stack traces which are in turn necessary for :doc:`Kernel live patching
<livepatch/livepatch>`.
Caveat and Discussion
---------------------
As one might realize, there were only three macros previously. That is indeed
insufficient to cover all the combinations of cases:
* standard/non-standard function
* code/data
* global/local symbol
There was a discussion_ and instead of extending the current ``ENTRY/END*``
macros, it was decided that brand new macros should be introduced instead::
So how about using macro names that actually show the purpose, instead
of importing all the crappy, historic, essentially randomly chosen
debug symbol macro names from the binutils and older kernels?
.. _discussion: https://lkml.kernel.org/r/20170217104757.28588-1-jslaby@suse.cz
Macros Description
------------------
The new macros are prefixed with the ``SYM_`` prefix and can be divided into
three main groups:
1. ``SYM_FUNC_*`` -- to annotate C-like functions. This means functions with
standard C calling conventions, i.e. the stack contains a return address at
the predefined place and a return from the function can happen in a
standard way. When frame pointers are enabled, save/restore of frame
pointer shall happen at the start/end of a function, respectively, too.
Checking tools like ``objtool`` should ensure such marked functions conform
to these rules. The tools can also easily annotate these functions with
debugging information (like *ORC data*) automatically.
2. ``SYM_CODE_*`` -- special functions called with special stack. Be it
interrupt handlers with special stack content, trampolines, or startup
functions.
Checking tools mostly ignore checking of these functions. But some debug
information still can be generated automatically. For correct debug data,
this code needs hints like ``UNWIND_HINT_REGS`` provided by developers.
3. ``SYM_DATA*`` -- obviously data belonging to ``.data`` sections and not to
``.text``. Data do not contain instructions, so they have to be treated
specially by the tools: they should not treat the bytes as instructions,
nor assign any debug information to them.
Instruction Macros
~~~~~~~~~~~~~~~~~~
This section covers ``SYM_FUNC_*`` and ``SYM_CODE_*`` enumerated above.
* ``SYM_FUNC_START`` and ``SYM_FUNC_START_LOCAL`` are supposed to be **the
most frequent markings**. They are used for functions with standard calling
conventions -- global and local. Like in C, they both align the functions to
architecture specific ``__ALIGN`` bytes. There are also ``_NOALIGN`` variants
for special cases where developers do not want this implicit alignment.
``SYM_FUNC_START_WEAK`` and ``SYM_FUNC_START_WEAK_NOALIGN`` markings are
also offered as an assembler counterpart to the *weak* attribute known from
C.
All of these **shall** be coupled with ``SYM_FUNC_END``. First, it marks
the sequence of instructions as a function and computes its size to the
generated object file. Second, it also eases checking and processing such
object files as the tools can trivially find exact function boundaries.
So in most cases, developers should write something like in the following
example, having some asm instructions in between the macros, of course::
SYM_FUNC_START(function_hook)
... asm insns ...
SYM_FUNC_END(function_hook)
In fact, this kind of annotation corresponds to the now deprecated ``ENTRY``
and ``ENDPROC`` macros.
* ``SYM_FUNC_START_ALIAS`` and ``SYM_FUNC_START_LOCAL_ALIAS`` serve for those
who decided to have two or more names for one function. The typical use is::
SYM_FUNC_START_ALIAS(__memset)
SYM_FUNC_START(memset)
... asm insns ...
SYM_FUNC_END(memset)
SYM_FUNC_END_ALIAS(__memset)
In this example, one can call ``__memset`` or ``memset`` with the same
result, except the debug information for the instructions is generated to
the object file only once -- for the non-``ALIAS`` case.
* ``SYM_CODE_START`` and ``SYM_CODE_START_LOCAL`` should be used only in
special cases -- if you know what you are doing. This is used exclusively
for interrupt handlers and similar where the calling convention is not the C
one. ``_NOALIGN`` variants exist too. The use is the same as for the ``FUNC``
category above::
SYM_CODE_START_LOCAL(bad_put_user)
... asm insns ...
SYM_CODE_END(bad_put_user)
Again, every ``SYM_CODE_START*`` **shall** be coupled by ``SYM_CODE_END``.
To some extent, this category corresponds to deprecated ``ENTRY`` and
``END``. Except ``END`` had several other meanings too.
* ``SYM_INNER_LABEL*`` is used to denote a label inside some
``SYM_{CODE,FUNC}_START`` and ``SYM_{CODE,FUNC}_END``. They are very similar
to C labels, except they can be made global. An example of use::
SYM_CODE_START(ftrace_caller)
/* save_mcount_regs fills in first two parameters */
...
SYM_INNER_LABEL(ftrace_caller_op_ptr, SYM_L_GLOBAL)
/* Load the ftrace_ops into the 3rd parameter */
...
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
call ftrace_stub
...
retq
SYM_CODE_END(ftrace_caller)
Data Macros
~~~~~~~~~~~
Similar to instructions, there is a couple of macros to describe data in the
assembly.
* ``SYM_DATA_START`` and ``SYM_DATA_START_LOCAL`` mark the start of some data
and shall be used in conjunction with either ``SYM_DATA_END``, or
``SYM_DATA_END_LABEL``. The latter adds also a label to the end, so that
people can use ``lstack`` and (local) ``lstack_end`` in the following
example::
SYM_DATA_START_LOCAL(lstack)
.skip 4096
SYM_DATA_END_LABEL(lstack, SYM_L_LOCAL, lstack_end)
* ``SYM_DATA`` and ``SYM_DATA_LOCAL`` are variants for simple, mostly one-line
data::
SYM_DATA(HEAP, .long rm_heap)
SYM_DATA(heap_end, .long rm_stack)
In the end, they expand to ``SYM_DATA_START`` with ``SYM_DATA_END``
internally.
Support Macros
~~~~~~~~~~~~~~
All the above reduce themselves to some invocation of ``SYM_START``,
``SYM_END``, or ``SYM_ENTRY`` at last. Normally, developers should avoid using
these.
Further, in the above examples, one could see ``SYM_L_LOCAL``. There are also
``SYM_L_GLOBAL`` and ``SYM_L_WEAK``. All are intended to denote linkage of a
symbol marked by them. They are used either in ``_LABEL`` variants of the
earlier macros, or in ``SYM_START``.
Overriding Macros
~~~~~~~~~~~~~~~~~
Architecture can also override any of the macros in their own
``asm/linkage.h``, including macros specifying the type of a symbol
(``SYM_T_FUNC``, ``SYM_T_OBJECT``, and ``SYM_T_NONE``). As every macro
described in this file is surrounded by ``#ifdef`` + ``#endif``, it is enough
to define the macros differently in the aforementioned architecture-dependent
header.

View File

@ -135,6 +135,14 @@ needed).
mic/index
scheduler/index
Architecture-agnostic documentation
-----------------------------------
.. toctree::
:maxdepth: 2
asm-annotations
Architecture-specific documentation
-----------------------------------

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 75
SUBLEVEL = 76
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -112,7 +112,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
int (*consumer_fn) (unsigned int, void *), void *arg)
{
#ifdef CONFIG_ARC_DW2_UNWIND
int ret = 0;
int ret = 0, cnt = 0;
unsigned int address;
struct unwind_frame_info frame_info;
@ -132,6 +132,11 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
break;
frame_info.regs.r63 = frame_info.regs.r31;
if (cnt++ > 128) {
printk("unwinder looping too long, aborting !\n");
return 0;
}
}
return address; /* return the last address it saw */

View File

@ -143,7 +143,7 @@
trips {
cpu_alert0: cpu-alert0 {
/* milliCelsius */
temperature = <850000>;
temperature = <85000>;
hysteresis = <2000>;
type = "passive";
};

View File

@ -167,6 +167,8 @@
hwrng: rng@218 {
compatible = "amlogic,meson-rng";
reg = <0x0 0x218 0x0 0x4>;
clocks = <&clkc CLKID_RNG0>;
clock-names = "core";
};
};

View File

@ -21,6 +21,10 @@
aliases {
ethernet0 = &eth0;
/* for dsa slave device */
ethernet1 = &switch0port1;
ethernet2 = &switch0port2;
ethernet3 = &switch0port3;
serial0 = &uart0;
serial1 = &uart1;
};
@ -147,7 +151,7 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
switch0port0: port@0 {
reg = <0>;
label = "cpu";
ethernet = <&eth0>;
@ -158,19 +162,19 @@
};
};
port@1 {
switch0port1: port@1 {
reg = <1>;
label = "wan";
phy-handle = <&switch0phy0>;
};
port@2 {
switch0port2: port@2 {
reg = <2>;
label = "lan0";
phy-handle = <&switch0phy1>;
};
port@3 {
switch0port3: port@3 {
reg = <3>;
label = "lan1";
phy-handle = <&switch0phy2>;

View File

@ -462,6 +462,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.endm
/*
* Deprecated! Use SYM_FUNC_{START,START_WEAK,END}_PI instead.
* Annotate a function as position independent, i.e., safe to be called before
* the kernel virtual mapping is activated.
*/

View File

@ -4,4 +4,20 @@
#define __ALIGN .align 2
#define __ALIGN_STR ".align 2"
/*
* Annotate a function as position independent, i.e., safe to be called before
* the kernel virtual mapping is activated.
*/
#define SYM_FUNC_START_PI(x) \
SYM_FUNC_START_ALIAS(__pi_##x); \
SYM_FUNC_START(x)
#define SYM_FUNC_START_WEAK_PI(x) \
SYM_FUNC_START_ALIAS(__pi_##x); \
SYM_FUNC_START_WEAK(x)
#define SYM_FUNC_END_PI(x) \
SYM_FUNC_END(x); \
SYM_FUNC_END_ALIAS(__pi_##x)
#endif

View File

@ -218,6 +218,7 @@ asmlinkage notrace void secondary_start_kernel(void)
if (system_uses_irq_prio_masking())
init_gic_priority_masking();
rcu_cpu_starting(cpu);
preempt_disable();
trace_hardirqs_off();

View File

@ -14,7 +14,7 @@
* Parameters:
* x0 - dest
*/
ENTRY(clear_page)
SYM_FUNC_START(clear_page)
mrs x1, dczid_el0
and w1, w1, #0xf
mov x2, #4
@ -25,5 +25,5 @@ ENTRY(clear_page)
tst x0, #(PAGE_SIZE - 1)
b.ne 1b
ret
ENDPROC(clear_page)
SYM_FUNC_END(clear_page)
EXPORT_SYMBOL(clear_page)

View File

@ -19,7 +19,7 @@
*
* Alignment fixed up by hardware.
*/
ENTRY(__arch_clear_user)
SYM_FUNC_START(__arch_clear_user)
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
b.mi 2f
@ -40,7 +40,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0
ret
ENDPROC(__arch_clear_user)
SYM_FUNC_END(__arch_clear_user)
EXPORT_SYMBOL(__arch_clear_user)
.section .fixup,"ax"

View File

@ -53,12 +53,12 @@
.endm
end .req x5
ENTRY(__arch_copy_from_user)
SYM_FUNC_START(__arch_copy_from_user)
add end, x0, x2
#include "copy_template.S"
mov x0, #0 // Nothing to copy
ret
ENDPROC(__arch_copy_from_user)
SYM_FUNC_END(__arch_copy_from_user)
EXPORT_SYMBOL(__arch_copy_from_user)
.section .fixup,"ax"

View File

@ -55,12 +55,12 @@
end .req x5
ENTRY(__arch_copy_in_user)
SYM_FUNC_START(__arch_copy_in_user)
add end, x0, x2
#include "copy_template.S"
mov x0, #0
ret
ENDPROC(__arch_copy_in_user)
SYM_FUNC_END(__arch_copy_in_user)
EXPORT_SYMBOL(__arch_copy_in_user)
.section .fixup,"ax"

View File

@ -17,7 +17,7 @@
* x0 - dest
* x1 - src
*/
ENTRY(copy_page)
SYM_FUNC_START(copy_page)
alternative_if ARM64_HAS_NO_HW_PREFETCH
// Prefetch three cache lines ahead.
prfm pldl1strm, [x1, #128]
@ -75,5 +75,5 @@ alternative_else_nop_endif
stnp x16, x17, [x0, #112 - 256]
ret
ENDPROC(copy_page)
SYM_FUNC_END(copy_page)
EXPORT_SYMBOL(copy_page)

View File

@ -52,12 +52,12 @@
.endm
end .req x5
ENTRY(__arch_copy_to_user)
SYM_FUNC_START(__arch_copy_to_user)
add end, x0, x2
#include "copy_template.S"
mov x0, #0
ret
ENDPROC(__arch_copy_to_user)
SYM_FUNC_END(__arch_copy_to_user)
EXPORT_SYMBOL(__arch_copy_to_user)
.section .fixup,"ax"

View File

@ -85,17 +85,17 @@ CPU_BE( rev16 w3, w3 )
.endm
.align 5
ENTRY(crc32_le)
SYM_FUNC_START(crc32_le)
alternative_if_not ARM64_HAS_CRC32
b crc32_le_base
alternative_else_nop_endif
__crc32
ENDPROC(crc32_le)
SYM_FUNC_END(crc32_le)
.align 5
ENTRY(__crc32c_le)
SYM_FUNC_START(__crc32c_le)
alternative_if_not ARM64_HAS_CRC32
b __crc32c_le_base
alternative_else_nop_endif
__crc32 c
ENDPROC(__crc32c_le)
SYM_FUNC_END(__crc32c_le)

View File

@ -19,7 +19,7 @@
* Returns:
* x0 - address of first occurrence of 'c' or 0
*/
WEAK(memchr)
SYM_FUNC_START_WEAK_PI(memchr)
and w1, w1, #0xff
1: subs x2, x2, #1
b.mi 2f
@ -30,5 +30,5 @@ WEAK(memchr)
ret
2: mov x0, #0
ret
ENDPIPROC(memchr)
SYM_FUNC_END_PI(memchr)
EXPORT_SYMBOL_NOKASAN(memchr)

View File

@ -46,7 +46,7 @@ pos .req x11
limit_wd .req x12
mask .req x13
WEAK(memcmp)
SYM_FUNC_START_WEAK_PI(memcmp)
cbz limit, .Lret0
eor tmp1, src1, src2
tst tmp1, #7
@ -243,5 +243,5 @@ CPU_LE( rev data2, data2 )
.Lret0:
mov result, #0
ret
ENDPIPROC(memcmp)
SYM_FUNC_END_PI(memcmp)
EXPORT_SYMBOL_NOKASAN(memcmp)

View File

@ -56,12 +56,11 @@
stp \ptr, \regB, [\regC], \val
.endm
.weak memcpy
ENTRY(__memcpy)
ENTRY(memcpy)
SYM_FUNC_START_ALIAS(__memcpy)
SYM_FUNC_START_WEAK_PI(memcpy)
#include "copy_template.S"
ret
ENDPIPROC(memcpy)
SYM_FUNC_END_PI(memcpy)
EXPORT_SYMBOL(memcpy)
ENDPROC(__memcpy)
SYM_FUNC_END_ALIAS(__memcpy)
EXPORT_SYMBOL(__memcpy)

View File

@ -45,9 +45,8 @@ C_h .req x12
D_l .req x13
D_h .req x14
.weak memmove
ENTRY(__memmove)
ENTRY(memmove)
SYM_FUNC_START_ALIAS(__memmove)
SYM_FUNC_START_WEAK_PI(memmove)
cmp dstin, src
b.lo __memcpy
add tmp1, src, count
@ -184,7 +183,7 @@ ENTRY(memmove)
tst count, #0x3f
b.ne .Ltail63
ret
ENDPIPROC(memmove)
SYM_FUNC_END_PI(memmove)
EXPORT_SYMBOL(memmove)
ENDPROC(__memmove)
SYM_FUNC_END_ALIAS(__memmove)
EXPORT_SYMBOL(__memmove)

View File

@ -42,9 +42,8 @@ dst .req x8
tmp3w .req w9
tmp3 .req x9
.weak memset
ENTRY(__memset)
ENTRY(memset)
SYM_FUNC_START_ALIAS(__memset)
SYM_FUNC_START_WEAK_PI(memset)
mov dst, dstin /* Preserve return value. */
and A_lw, val, #255
orr A_lw, A_lw, A_lw, lsl #8
@ -203,7 +202,7 @@ ENTRY(memset)
ands count, count, zva_bits_x
b.ne .Ltail_maybe_long
ret
ENDPIPROC(memset)
SYM_FUNC_END_PI(memset)
EXPORT_SYMBOL(memset)
ENDPROC(__memset)
SYM_FUNC_END_ALIAS(__memset)
EXPORT_SYMBOL(__memset)

View File

@ -18,7 +18,7 @@
* Returns:
* x0 - address of first occurrence of 'c' or 0
*/
WEAK(strchr)
SYM_FUNC_START_WEAK(strchr)
and w1, w1, #0xff
1: ldrb w2, [x0], #1
cmp w2, w1
@ -28,5 +28,5 @@ WEAK(strchr)
cmp w2, w1
csel x0, x0, xzr, eq
ret
ENDPROC(strchr)
SYM_FUNC_END(strchr)
EXPORT_SYMBOL_NOKASAN(strchr)

View File

@ -48,7 +48,7 @@ tmp3 .req x9
zeroones .req x10
pos .req x11
WEAK(strcmp)
SYM_FUNC_START_WEAK_PI(strcmp)
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
@ -219,5 +219,5 @@ CPU_BE( orr syndrome, diff, has_nul )
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
ENDPIPROC(strcmp)
SYM_FUNC_END_PI(strcmp)
EXPORT_SYMBOL_NOKASAN(strcmp)

View File

@ -44,7 +44,7 @@ pos .req x12
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
WEAK(strlen)
SYM_FUNC_START_WEAK_PI(strlen)
mov zeroones, #REP8_01
bic src, srcin, #15
ands tmp1, srcin, #15
@ -111,5 +111,5 @@ CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
csinv data1, data1, xzr, le
csel data2, data2, data2a, le
b .Lrealigned
ENDPIPROC(strlen)
SYM_FUNC_END_PI(strlen)
EXPORT_SYMBOL_NOKASAN(strlen)

View File

@ -52,7 +52,7 @@ limit_wd .req x13
mask .req x14
endloop .req x15
WEAK(strncmp)
SYM_FUNC_START_WEAK_PI(strncmp)
cbz limit, .Lret0
eor tmp1, src1, src2
mov zeroones, #REP8_01
@ -295,5 +295,5 @@ CPU_BE( orr syndrome, diff, has_nul )
.Lret0:
mov result, #0
ret
ENDPIPROC(strncmp)
SYM_FUNC_END_PI(strncmp)
EXPORT_SYMBOL_NOKASAN(strncmp)

View File

@ -47,7 +47,7 @@ limit_wd .req x14
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
WEAK(strnlen)
SYM_FUNC_START_WEAK_PI(strnlen)
cbz limit, .Lhit_limit
mov zeroones, #REP8_01
bic src, srcin, #15
@ -156,5 +156,5 @@ CPU_LE( lsr tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */
.Lhit_limit:
mov len, limit
ret
ENDPIPROC(strnlen)
SYM_FUNC_END_PI(strnlen)
EXPORT_SYMBOL_NOKASAN(strnlen)

View File

@ -18,7 +18,7 @@
* Returns:
* x0 - address of last occurrence of 'c' or 0
*/
WEAK(strrchr)
SYM_FUNC_START_WEAK_PI(strrchr)
mov x3, #0
and w1, w1, #0xff
1: ldrb w2, [x0], #1
@ -29,5 +29,5 @@ WEAK(strrchr)
b 1b
2: mov x0, x3
ret
ENDPIPROC(strrchr)
SYM_FUNC_END_PI(strrchr)
EXPORT_SYMBOL_NOKASAN(strrchr)

View File

@ -7,7 +7,7 @@
#include <asm/assembler.h>
ENTRY(__ashlti3)
SYM_FUNC_START(__ashlti3)
cbz x2, 1f
mov x3, #64
sub x3, x3, x2
@ -26,10 +26,10 @@ ENTRY(__ashlti3)
lsl x1, x0, x1
mov x0, x2
ret
ENDPROC(__ashlti3)
SYM_FUNC_END(__ashlti3)
EXPORT_SYMBOL(__ashlti3)
ENTRY(__ashrti3)
SYM_FUNC_START(__ashrti3)
cbz x2, 1f
mov x3, #64
sub x3, x3, x2
@ -48,10 +48,10 @@ ENTRY(__ashrti3)
asr x0, x1, x0
mov x1, x2
ret
ENDPROC(__ashrti3)
SYM_FUNC_END(__ashrti3)
EXPORT_SYMBOL(__ashrti3)
ENTRY(__lshrti3)
SYM_FUNC_START(__lshrti3)
cbz x2, 1f
mov x3, #64
sub x3, x3, x2
@ -70,5 +70,5 @@ ENTRY(__lshrti3)
lsr x0, x1, x0
mov x1, x2
ret
ENDPROC(__lshrti3)
SYM_FUNC_END(__lshrti3)
EXPORT_SYMBOL(__lshrti3)

View File

@ -13,9 +13,13 @@
#ifdef __ASSEMBLY__
#define GLOBAL(name) \
.globl name; \
name:
/*
* GLOBAL is DEPRECATED
*
* use SYM_DATA_START, SYM_FUNC_START, SYM_INNER_LABEL, SYM_CODE_START, or
* similar
*/
#define GLOBAL(name) SYM_ENTRY(name, SYM_L_GLOBAL, SYM_A_NONE)
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16)
#define __ALIGN .p2align 4, 0x90

View File

@ -210,8 +210,7 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
params->hdr.hardware_subarch = boot_params.hdr.hardware_subarch;
/* Copying screen_info will do? */
memcpy(&params->screen_info, &boot_params.screen_info,
sizeof(struct screen_info));
memcpy(&params->screen_info, &screen_info, sizeof(struct screen_info));
/* Fill in memsize later */
params->screen_info.ext_mem_k = 0;

View File

@ -855,13 +855,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
goto fail;
}
if (radix_tree_preload(GFP_KERNEL)) {
blkg_free(new_blkg);
ret = -ENOMEM;
goto fail;
}
rcu_read_lock();
spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_check(pos, pol, q);
if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg);
goto fail_unlock;
blkg_free(new_blkg);
goto fail_preloaded;
}
if (blkg) {
@ -870,10 +877,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
blkg = blkg_create(pos, q, new_blkg);
if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg);
goto fail_unlock;
goto fail_preloaded;
}
}
radix_tree_preload_end();
if (pos == blkcg)
goto success;
}
@ -883,6 +892,8 @@ success:
ctx->body = input;
return 0;
fail_preloaded:
radix_tree_preload_end();
fail_unlock:
spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();

View File

@ -1553,7 +1553,7 @@ static ssize_t format1_show(struct device *dev,
le16_to_cpu(nfit_dcr->dcr->code));
break;
}
if (rc != ENXIO)
if (rc != -ENXIO)
break;
}
mutex_unlock(&acpi_desc->init_mutex);

View File

@ -762,8 +762,7 @@ static void __device_link_del(struct kref *kref)
dev_dbg(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
if (link->flags & DL_FLAG_PM_RUNTIME)
pm_runtime_drop_link(link->consumer);
pm_runtime_drop_link(link);
list_del_rcu(&link->s_node);
list_del_rcu(&link->c_node);
@ -777,8 +776,7 @@ static void __device_link_del(struct kref *kref)
dev_info(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
if (link->flags & DL_FLAG_PM_RUNTIME)
pm_runtime_drop_link(link->consumer);
pm_runtime_drop_link(link);
list_del(&link->s_node);
list_del(&link->c_node);

View File

@ -1126,6 +1126,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
drv = dev->driver;
if (drv) {
pm_runtime_get_sync(dev);
while (device_links_busy(dev)) {
__device_driver_unlock(dev, parent);
@ -1137,13 +1139,12 @@ static void __device_release_driver(struct device *dev, struct device *parent)
* have released the driver successfully while this one
* was waiting, so check for that.
*/
if (dev->driver != drv)
if (dev->driver != drv) {
pm_runtime_put(dev);
return;
}
}
pm_runtime_get_sync(dev);
pm_runtime_clean_up_links(dev);
driver_sysfs_remove(dev);
if (dev->bus)

View File

@ -1615,42 +1615,6 @@ void pm_runtime_remove(struct device *dev)
pm_runtime_reinit(dev);
}
/**
* pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
* @dev: Device whose driver is going to be removed.
*
* Check links from this device to any consumers and if any of them have active
* runtime PM references to the device, drop the usage counter of the device
* (as many times as needed).
*
* Links with the DL_FLAG_MANAGED flag unset are ignored.
*
* Since the device is guaranteed to be runtime-active at the point this is
* called, nothing else needs to be done here.
*
* Moreover, this is called after device_links_busy() has returned 'false', so
* the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
* therefore rpm_active can't be manipulated concurrently.
*/
void pm_runtime_clean_up_links(struct device *dev)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
device_links_read_lock_held()) {
if (!(link->flags & DL_FLAG_MANAGED))
continue;
while (refcount_dec_not_one(&link->rpm_active))
pm_runtime_put_noidle(dev);
}
device_links_read_unlock(idx);
}
/**
* pm_runtime_get_suppliers - Resume and reference-count supplier devices.
* @dev: Consumer device.
@ -1702,7 +1666,7 @@ void pm_runtime_new_link(struct device *dev)
spin_unlock_irq(&dev->power.lock);
}
void pm_runtime_drop_link(struct device *dev)
static void pm_runtime_drop_link_count(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
WARN_ON(dev->power.links_count == 0);
@ -1710,6 +1674,25 @@ void pm_runtime_drop_link(struct device *dev)
spin_unlock_irq(&dev->power.lock);
}
/**
* pm_runtime_drop_link - Prepare for device link removal.
* @link: Device link going away.
*
* Drop the link count of the consumer end of @link and decrement the supplier
* device's runtime PM usage counter as many times as needed to drop all of the
* PM runtime reference to it from the consumer.
*/
void pm_runtime_drop_link(struct device_link *link)
{
if (!(link->flags & DL_FLAG_PM_RUNTIME))
return;
pm_runtime_drop_link_count(link->consumer);
while (refcount_dec_not_one(&link->rpm_active))
pm_runtime_put(link->supplier);
}
static bool pm_runtime_need_not_resume(struct device *dev)
{
return atomic_read(&dev->power.usage_count) <= 1 &&

View File

@ -174,7 +174,7 @@ static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
{
if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
__skb_trim(skb, 0);
refcount_add(2, &skb->users);
refcount_inc(&skb->users);
} else {
skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
}

View File

@ -357,6 +357,9 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
if (ret)
goto out_notcb;
if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
goto out_notcb;
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid);
csk->wr_credits -= DIV_ROUND_UP(len, 16);
csk->wr_unacked += DIV_ROUND_UP(len, 16);

View File

@ -1011,6 +1011,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
/* Navi14 */
{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},

View File

@ -1574,6 +1574,9 @@ static void process_csb(struct intel_engine_cs *engine)
if (!inject_preempt_hang(execlists))
ring_set_paused(engine, 0);
/* XXX Magic delay for tgl */
ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
WRITE_ONCE(execlists->pending[0], NULL);
break;

View File

@ -307,6 +307,8 @@ static int compress_page(struct compress *c,
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
cond_resched();
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
@ -392,6 +394,7 @@ static int compress_page(struct compress *c,
if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
cond_resched();
return 0;
}

View File

@ -1124,6 +1124,18 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
spin_unlock(&uncore->debug->lock);
}
#define __vgpu_read(x) \
static u##x \
vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
u##x val = __raw_uncore_read##x(uncore, reg); \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val; \
}
__vgpu_read(8)
__vgpu_read(16)
__vgpu_read(32)
__vgpu_read(64)
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
assert_rpm_wakelock_held(uncore->rpm);
@ -1327,6 +1339,16 @@ __gen_reg_write_funcs(gen8);
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
#define __vgpu_write(x) \
static void \
vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
__raw_uncore_write##x(uncore, reg, val); \
}
__vgpu_write(8)
__vgpu_write(16)
__vgpu_write(32)
#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
do { \
(uncore)->funcs.mmio_writeb = x##_write8; \
@ -1647,7 +1669,10 @@ static void uncore_raw_init(struct intel_uncore *uncore)
{
GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
if (IS_GEN(uncore->i915, 5)) {
if (intel_vgpu_active(uncore->i915)) {
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
} else if (IS_GEN(uncore->i915, 5)) {
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
} else {

View File

@ -197,7 +197,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
* to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
drm_gem_object_release(&nvbo->bo.base);
kfree(nvbo);
return ret;
}

View File

@ -112,11 +112,11 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_nouveau_svm_bind *args = data;
unsigned target, cmd, priority;
unsigned long addr, end, size;
unsigned long addr, end;
struct mm_struct *mm;
args->va_start &= PAGE_MASK;
args->va_end &= PAGE_MASK;
args->va_end = ALIGN(args->va_end, PAGE_SIZE);
/* Sanity check arguments */
if (args->reserved0 || args->reserved1)
@ -125,8 +125,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
return -EINVAL;
if (args->va_start >= args->va_end)
return -EINVAL;
if (!args->npages)
return -EINVAL;
cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
@ -158,12 +156,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
if (args->stride)
return -EINVAL;
size = ((unsigned long)args->npages) << PAGE_SHIFT;
if ((args->va_start + size) <= args->va_start)
return -EINVAL;
if ((args->va_start + size) > args->va_end)
return -EINVAL;
/*
* Ok we are ask to do something sane, for now we only support migrate
* commands but we will add things like memory policy (what to do on
@ -178,7 +170,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
return -EINVAL;
}
for (addr = args->va_start, end = args->va_start + size; addr < end;) {
for (addr = args->va_start, end = args->va_end; addr < end;) {
struct vm_area_struct *vma;
unsigned long next;

View File

@ -105,14 +105,12 @@ void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
kref_put(&mapping->refcount, panfrost_gem_mapping_release);
}
void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
{
struct panfrost_gem_mapping *mapping;
mutex_lock(&bo->mappings.lock);
list_for_each_entry(mapping, &bo->mappings.list, node)
panfrost_gem_teardown_mapping(mapping);
mutex_unlock(&bo->mappings.lock);
}
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)

View File

@ -82,7 +82,7 @@ struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
struct panfrost_file_priv *priv);
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo);
void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);

View File

@ -40,18 +40,26 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
bool ret = false;
if (atomic_read(&bo->gpu_usecount))
return false;
if (!mutex_trylock(&shmem->pages_lock))
if (!mutex_trylock(&bo->mappings.lock))
return false;
panfrost_gem_teardown_mappings(bo);
if (!mutex_trylock(&shmem->pages_lock))
goto unlock_mappings;
panfrost_gem_teardown_mappings_locked(bo);
drm_gem_shmem_purge_locked(obj);
ret = true;
mutex_unlock(&shmem->pages_lock);
return true;
unlock_mappings:
mutex_unlock(&bo->mappings.lock);
return ret;
}
static unsigned long

View File

@ -407,6 +407,7 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
uint64_t modifier = fb->modifier;
unsigned int ch1_phase_idx;
u32 out_fmt_val;
u32 in_fmt_val, in_mod_val, in_ps_val;
unsigned int i;
@ -442,18 +443,19 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
* I have no idea what this does exactly, but it seems to be
* related to the scaler FIR filter phase parameters.
*/
ch1_phase_idx = (format->num_planes > 1) ? 1 : 0;
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZPHASE_REG,
frontend->data->ch_phase[0].horzphase);
frontend->data->ch_phase[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZPHASE_REG,
frontend->data->ch_phase[1].horzphase);
frontend->data->ch_phase[ch1_phase_idx]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE0_REG,
frontend->data->ch_phase[0].vertphase[0]);
frontend->data->ch_phase[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE0_REG,
frontend->data->ch_phase[1].vertphase[0]);
frontend->data->ch_phase[ch1_phase_idx]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE1_REG,
frontend->data->ch_phase[0].vertphase[1]);
frontend->data->ch_phase[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE1_REG,
frontend->data->ch_phase[1].vertphase[1]);
frontend->data->ch_phase[ch1_phase_idx]);
/*
* Checking the input format is sufficient since we currently only
@ -687,30 +689,12 @@ static const struct dev_pm_ops sun4i_frontend_pm_ops = {
};
static const struct sun4i_frontend_data sun4i_a10_frontend = {
.ch_phase = {
{
.horzphase = 0,
.vertphase = { 0, 0 },
},
{
.horzphase = 0xfc000,
.vertphase = { 0xfc000, 0xfc000 },
},
},
.ch_phase = { 0x000, 0xfc000 },
.has_coef_rdy = true,
};
static const struct sun4i_frontend_data sun8i_a33_frontend = {
.ch_phase = {
{
.horzphase = 0x400,
.vertphase = { 0x400, 0x400 },
},
{
.horzphase = 0x400,
.vertphase = { 0x400, 0x400 },
},
},
.ch_phase = { 0x400, 0xfc400 },
.has_coef_access_ctrl = true,
};

View File

@ -115,11 +115,7 @@ struct reset_control;
struct sun4i_frontend_data {
bool has_coef_access_ctrl;
bool has_coef_rdy;
struct {
u32 horzphase;
u32 vertphase[2];
} ch_phase[2];
u32 ch_phase[2];
};
struct sun4i_frontend {

View File

@ -309,6 +309,7 @@ unbind_all:
component_unbind_all(dev, drm);
gem_destroy:
vc4_gem_destroy(drm);
drm_mode_config_cleanup(drm);
vc4_bo_cache_destroy(drm);
dev_put:
drm_dev_put(drm);

View File

@ -147,8 +147,7 @@ static inline void coresight_write_reg_pair(void __iomem *addr, u64 val,
void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
struct coresight_device *
coresight_get_enabled_sink(struct coresight_device *source);
struct coresight_device *coresight_get_enabled_sink(bool reset);
struct coresight_device *coresight_get_sink_by_id(u32 id);
struct list_head *coresight_build_path(struct coresight_device *csdev,
struct coresight_device *sink);

View File

@ -481,46 +481,50 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
return csdev;
}
static struct coresight_device *
coresight_find_enabled_sink(struct coresight_device *csdev)
static int coresight_enabled_sink(struct device *dev, const void *data)
{
int i;
struct coresight_device *sink;
const bool *reset = data;
struct coresight_device *csdev = to_coresight_device(dev);
if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
csdev->activated)
return csdev;
csdev->activated) {
/*
* Now that we have a handle on the sink for this session,
* disable the sysFS "enable_sink" flag so that possible
* concurrent perf session that wish to use another sink don't
* trip on it. Doing so has no ramification for the current
* session.
*/
if (*reset)
csdev->activated = false;
/*
* Recursively explore each port found on this element.
*/
for (i = 0; i < csdev->pdata->nr_outport; i++) {
struct coresight_device *child_dev;
child_dev = csdev->pdata->conns[i].child_dev;
if (child_dev)
sink = coresight_find_enabled_sink(child_dev);
if (sink)
return sink;
return 1;
}
return NULL;
return 0;
}
/**
* coresight_get_enabled_sink - returns the first enabled sink using
* connection based search starting from the source reference
* coresight_get_enabled_sink - returns the first enabled sink found on the bus
* @deactivate: Whether the 'enable_sink' flag should be reset
*
* @source: Coresight source device reference
* When operated from perf the deactivate parameter should be set to 'true'.
* That way the "enabled_sink" flag of the sink that was selected can be reset,
* allowing for other concurrent perf sessions to choose a different sink.
*
* When operated from sysFS users have full control and as such the deactivate
* parameter should be set to 'false', hence mandating users to explicitly
* clear the flag.
*/
struct coresight_device *
coresight_get_enabled_sink(struct coresight_device *source)
struct coresight_device *coresight_get_enabled_sink(bool deactivate)
{
if (!source)
return NULL;
struct device *dev = NULL;
return coresight_find_enabled_sink(source);
dev = bus_find_device(&coresight_bustype, NULL, &deactivate,
coresight_enabled_sink);
return dev ? to_coresight_device(dev) : NULL;
}
static int coresight_sink_by_id(struct device *dev, const void *data)
@ -760,7 +764,11 @@ int coresight_enable(struct coresight_device *csdev)
goto out;
}
sink = coresight_get_enabled_sink(csdev);
/*
* Search for a valid sink for this session but don't reset the
* "enable_sink" flag in sysFS. Users get to do that explicitly.
*/
sink = coresight_get_enabled_sink(false);
if (!sink) {
ret = -EINVAL;
goto out;

View File

@ -4444,11 +4444,10 @@ static void spi_nor_sfdp_init_params(struct spi_nor *nor)
memcpy(&sfdp_params, &nor->params, sizeof(sfdp_params));
if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
if (spi_nor_parse_sfdp(nor, &nor->params)) {
memcpy(&nor->params, &sfdp_params, sizeof(nor->params));
nor->addr_width = 0;
nor->flags &= ~SNOR_F_4B_OPCODES;
} else {
memcpy(&nor->params, &sfdp_params, sizeof(nor->params));
}
}

View File

@ -1718,7 +1718,8 @@ static inline int macb_clear_csum(struct sk_buff *skb)
static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
{
bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
skb_is_nonlinear(*skb);
int padlen = ETH_ZLEN - (*skb)->len;
int headroom = skb_headroom(*skb);
int tailroom = skb_tailroom(*skb);

View File

@ -1826,20 +1826,12 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* make space for additional header when fcb is needed */
if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
struct sk_buff *skb_new;
skb_new = skb_realloc_headroom(skb, fcb_len);
if (!skb_new) {
if (fcb_len) {
if (unlikely(skb_cow_head(skb, fcb_len))) {
dev->stats.tx_errors++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (skb->sk)
skb_set_owner_w(skb_new, skb->sk);
dev_consume_skb_any(skb);
skb = skb_new;
}
/* total number of fragments in the SKB */
@ -3377,7 +3369,7 @@ static int gfar_probe(struct platform_device *ofdev)
if (dev->features & NETIF_F_IP_CSUM ||
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->needed_headroom = GMAC_FCB_LEN;
dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* Initializing some of the rx/tx queue level parameters */
for (i = 0; i < priv->num_tx_queues; i++) {

View File

@ -1109,18 +1109,27 @@ static int ibmvnic_open(struct net_device *netdev)
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
if (rc)
return rc;
goto out;
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
release_resources(adapter);
return rc;
goto out;
}
}
rc = __ibmvnic_open(netdev);
out:
/*
* If open fails due to a pending failover, set device state and
* return. Device operation will be handled by reset routine.
*/
if (rc && adapter->failover_pending) {
adapter->state = VNIC_OPEN;
rc = 0;
}
return rc;
}
@ -1842,6 +1851,13 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rwi->reset_reason);
rtnl_lock();
/*
* Now that we have the rtnl lock, clear any pending failover.
* This will ensure ibmvnic_open() has either completed or will
* block until failover is complete.
*/
if (rwi->reset_reason == VNIC_RESET_FAILOVER)
adapter->failover_pending = false;
netif_carrier_off(netdev);
adapter->reset_reason = rwi->reset_reason;
@ -2112,6 +2128,13 @@ static void __ibmvnic_reset(struct work_struct *work)
/* CHANGE_PARAM requestor holds rtnl_lock */
rc = do_change_param_reset(adapter, rwi, reset_state);
} else if (adapter->force_reset_recovery) {
/*
* Since we are doing a hard reset now, clear the
* failover_pending flag so we don't ignore any
* future MOBILITY or other resets.
*/
adapter->failover_pending = false;
/* Transport event occurred during previous reset */
if (adapter->wait_for_reset) {
/* Previous was CHANGE_PARAM; caller locked */
@ -2176,9 +2199,15 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
unsigned long flags;
int ret;
/*
* If failover is pending don't schedule any other reset.
* Instead let the failover complete. If there is already a
* a failover reset scheduled, we will detect and drop the
* duplicate reset when walking the ->rwi_list below.
*/
if (adapter->state == VNIC_REMOVING ||
adapter->state == VNIC_REMOVED ||
adapter->failover_pending) {
(adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
ret = EBUSY;
netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
goto err;
@ -4532,7 +4561,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
case IBMVNIC_CRQ_INIT:
dev_info(dev, "Partner initialized\n");
adapter->from_passive_init = true;
adapter->failover_pending = false;
if (!completion_done(&adapter->init_done)) {
complete(&adapter->init_done);
adapter->init_done_rc = -EIO;

View File

@ -125,6 +125,11 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_zero_link_mode(ks, supported);
if (!idev->port_info) {
netdev_err(netdev, "port_info not initialized\n");
return -EOPNOTSUPP;
}
/* The port_info data is found in a DMA space that the NIC keeps
* up-to-date, so there's no need to request the data from the
* NIC, we already have it in our memory space.

View File

@ -1970,7 +1970,8 @@ static int sfp_probe(struct platform_device *pdev)
continue;
sfp->gpio_irq[i] = gpiod_to_irq(sfp->gpio[i]);
if (!sfp->gpio_irq[i]) {
if (sfp->gpio_irq[i] < 0) {
sfp->gpio_irq[i] = 0;
poll = true;
continue;
}

View File

@ -1331,6 +1331,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1230, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */

View File

@ -1520,6 +1520,14 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
/* sanity checking for received data length */
if (unlikely(wc->byte_len < len)) {
dev_err(queue->ctrl->ctrl.device,
"Unexpected nvme completion length(%d)\n", wc->byte_len);
nvme_rdma_error_recovery(queue->ctrl);
return;
}
ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
/*
* AEN requests are special as they don't time out and can

View File

@ -878,8 +878,6 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->error_loc = NVMET_NO_ERROR_LOC;
req->error_slba = 0;
trace_nvmet_req_init(req, req->cmd);
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
req->error_loc = offsetof(struct nvme_common_command, flags);
@ -913,6 +911,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
if (status)
goto fail;
trace_nvmet_req_init(req, req->cmd);
if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto fail;

View File

@ -46,19 +46,12 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
return req->sq->ctrl;
}
static inline void __assign_disk_name(char *name, struct nvmet_req *req,
bool init)
static inline void __assign_req_name(char *name, struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = nvmet_req_to_ctrl(req);
struct nvmet_ns *ns;
if ((init && req->sq->qid) || (!init && req->cq->qid)) {
ns = nvmet_find_namespace(ctrl, req->cmd->rw.nsid);
strncpy(name, ns->device_path, DISK_NAME_LEN);
return;
}
memset(name, 0, DISK_NAME_LEN);
if (req->ns)
strncpy(name, req->ns->device_path, DISK_NAME_LEN);
else
memset(name, 0, DISK_NAME_LEN);
}
#endif
@ -81,7 +74,7 @@ TRACE_EVENT(nvmet_req_init,
TP_fast_assign(
__entry->cmd = cmd;
__entry->ctrl = nvmet_req_to_ctrl(req);
__assign_disk_name(__entry->disk, req, true);
__assign_req_name(__entry->disk, req);
__entry->qid = req->sq->qid;
__entry->cid = cmd->common.command_id;
__entry->opcode = cmd->common.opcode;
@ -121,7 +114,7 @@ TRACE_EVENT(nvmet_req_complete,
__entry->cid = req->cqe->command_id;
__entry->result = le64_to_cpu(req->cqe->result.u64);
__entry->status = le16_to_cpu(req->cqe->status) >> 1;
__assign_disk_name(__entry->disk, req, false);
__assign_req_name(__entry->disk, req);
),
TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
__print_ctrl_name(__entry->ctrl),

View File

@ -200,6 +200,16 @@ static int __init __rmem_cmp(const void *a, const void *b)
if (ra->base > rb->base)
return 1;
/*
* Put the dynamic allocations (address == 0, size == 0) before static
* allocations at address 0x0 so that overlap detection works
* correctly.
*/
if (ra->size < rb->size)
return -1;
if (ra->size > rb->size)
return 1;
return 0;
}
@ -217,8 +227,7 @@ static void __init __rmem_check_for_overlap(void)
this = &reserved_mem[i];
next = &reserved_mem[i + 1];
if (!(this->base && next->base))
continue;
if (this->base + this->size > next->base) {
phys_addr_t this_end, next_end;

View File

@ -4049,6 +4049,8 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev)
ret = rdev->desc->fixed_uV;
} else if (rdev->supply) {
ret = regulator_get_voltage_rdev(rdev->supply->rdev);
} else if (rdev->supply_name) {
return -EPROBE_DEFER;
} else {
return -EINVAL;
}

View File

@ -33,9 +33,6 @@ MODULE_DESCRIPTION("s390 protected key interface");
#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */
#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
/* mask of available pckmo subfunctions, fetched once at module init */
static cpacf_mask_t pckmo_functions;
/*
* debug feature data and functions
*/
@ -78,6 +75,9 @@ static int pkey_clr2protkey(u32 keytype,
const struct pkey_clrkey *clrkey,
struct pkey_protkey *protkey)
{
/* mask of available pckmo subfunctions */
static cpacf_mask_t pckmo_functions;
long fc;
int keysize;
u8 paramblock[64];
@ -101,11 +101,13 @@ static int pkey_clr2protkey(u32 keytype,
return -EINVAL;
}
/*
* Check if the needed pckmo subfunction is available.
* These subfunctions can be enabled/disabled by customers
* in the LPAR profile or may even change on the fly.
*/
/* Did we already check for PCKMO ? */
if (!pckmo_functions.bytes[0]) {
/* no, so check now */
if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
return -ENODEV;
}
/* check for the pckmo subfunction we need now */
if (!cpacf_test_func(&pckmo_functions, fc)) {
DEBUG_ERR("%s pckmo functions not available\n", __func__);
return -ENODEV;
@ -1504,7 +1506,7 @@ static struct miscdevice pkey_dev = {
*/
static int __init pkey_init(void)
{
cpacf_mask_t kmc_functions;
cpacf_mask_t func_mask;
/*
* The pckmo instruction should be available - even if we don't
@ -1512,15 +1514,15 @@ static int __init pkey_init(void)
* is also the minimum level for the kmc instructions which
* are able to work with protected keys.
*/
if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
if (!cpacf_query(CPACF_PCKMO, &func_mask))
return -ENODEV;
/* check for kmc instructions available */
if (!cpacf_query(CPACF_KMC, &kmc_functions))
if (!cpacf_query(CPACF_KMC, &func_mask))
return -ENODEV;
if (!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256))
if (!cpacf_test_func(&func_mask, CPACF_KMC_PAES_128) ||
!cpacf_test_func(&func_mask, CPACF_KMC_PAES_192) ||
!cpacf_test_func(&func_mask, CPACF_KMC_PAES_256))
return -ENODEV;
pkey_debug_init();

View File

@ -806,6 +806,22 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
}
/**
* ibmvscsi_set_request_limit - Set the adapter request_limit in response to
* an adapter failure, reset, or SRP Login. Done under host lock to prevent
* race with SCSI command submission.
* @hostdata: adapter to adjust
* @limit: new request limit
*/
static void ibmvscsi_set_request_limit(struct ibmvscsi_host_data *hostdata, int limit)
{
unsigned long flags;
spin_lock_irqsave(hostdata->host->host_lock, flags);
atomic_set(&hostdata->request_limit, limit);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
}
/**
* ibmvscsi_reset_host - Reset the connection to the server
* @hostdata: struct ibmvscsi_host_data to reset
@ -813,7 +829,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
{
scsi_block_requests(hostdata->host);
atomic_set(&hostdata->request_limit, 0);
ibmvscsi_set_request_limit(hostdata, 0);
purge_requests(hostdata, DID_ERROR);
hostdata->action = IBMVSCSI_HOST_ACTION_RESET;
@ -1146,13 +1162,13 @@ static void login_rsp(struct srp_event_struct *evt_struct)
dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
evt_struct->xfer_iu->srp.login_rej.reason);
/* Login failed. */
atomic_set(&hostdata->request_limit, -1);
ibmvscsi_set_request_limit(hostdata, -1);
return;
default:
dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
evt_struct->xfer_iu->srp.login_rsp.opcode);
/* Login failed. */
atomic_set(&hostdata->request_limit, -1);
ibmvscsi_set_request_limit(hostdata, -1);
return;
}
@ -1163,7 +1179,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
* This value is set rather than added to request_limit because
* request_limit could have been set to -1 by this client.
*/
atomic_set(&hostdata->request_limit,
ibmvscsi_set_request_limit(hostdata,
be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
/* If we had any pending I/Os, kick them */
@ -1195,13 +1211,13 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
spin_lock_irqsave(hostdata->host->host_lock, flags);
/* Start out with a request limit of 0, since this is negotiated in
* the login request we are just sending and login requests always
* get sent by the driver regardless of request_limit.
*/
atomic_set(&hostdata->request_limit, 0);
ibmvscsi_set_request_limit(hostdata, 0);
spin_lock_irqsave(hostdata->host->host_lock, flags);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
dev_info(hostdata->dev, "sent SRP login\n");
@ -1781,7 +1797,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
return;
case VIOSRP_CRQ_XPORT_EVENT: /* Hypervisor telling us the connection is closed */
scsi_block_requests(hostdata->host);
atomic_set(&hostdata->request_limit, 0);
ibmvscsi_set_request_limit(hostdata, 0);
if (crq->format == 0x06) {
/* We need to re-setup the interpartition connection */
dev_info(hostdata->dev, "Re-enabling adapter!\n");
@ -2137,12 +2153,12 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
}
hostdata->action = IBMVSCSI_HOST_ACTION_NONE;
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rc) {
atomic_set(&hostdata->request_limit, -1);
ibmvscsi_set_request_limit(hostdata, -1);
dev_err(hostdata->dev, "error after %s\n", action);
}
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
scsi_unblock_requests(hostdata->host);
}
@ -2226,7 +2242,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
init_waitqueue_head(&hostdata->work_wait_q);
hostdata->host = host;
hostdata->dev = dev;
atomic_set(&hostdata->request_limit, -1);
ibmvscsi_set_request_limit(hostdata, -1);
hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
if (map_persist_bufs(hostdata)) {

View File

@ -1715,15 +1715,16 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
*/
static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
{
struct async_scan_data *data;
struct async_scan_data *data = NULL;
unsigned long flags;
if (strncmp(scsi_scan_type, "sync", 4) == 0)
return NULL;
mutex_lock(&shost->scan_mutex);
if (shost->async_scan) {
shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
return NULL;
goto err;
}
data = kmalloc(sizeof(*data), GFP_KERNEL);
@ -1734,7 +1735,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
goto err;
init_completion(&data->prev_finished);
mutex_lock(&shost->scan_mutex);
spin_lock_irqsave(shost->host_lock, flags);
shost->async_scan = 1;
spin_unlock_irqrestore(shost->host_lock, flags);
@ -1749,6 +1749,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
return data;
err:
mutex_unlock(&shost->scan_mutex);
kfree(data);
return NULL;
}

View File

@ -1245,18 +1245,6 @@ static int bcm2835_spi_setup(struct spi_device *spi)
if (!chip)
return 0;
/*
* Retrieve the corresponding GPIO line used for CS.
* The inversion semantics will be handled by the GPIO core
* code, so we pass GPIOS_OUT_LOW for "unasserted" and
* the correct flag for inversion semantics. The SPI_CS_HIGH
* on spi->mode cannot be checked for polarity in this case
* as the flag use_gpio_descriptors enforces SPI_CS_HIGH.
*/
if (of_property_read_bool(spi->dev.of_node, "spi-cs-high"))
lflags = GPIO_ACTIVE_HIGH;
else
lflags = GPIO_ACTIVE_LOW;
spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select,
DRV_NAME,
lflags,

View File

@ -316,7 +316,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
*/
baud = tty_termios_baud_rate(termios);
serial8250_do_set_termios(port, termios, old);
serial8250_do_set_termios(port, termios, NULL);
tty_termios_encode_baud_rate(termios, baud, baud);

View File

@ -238,6 +238,7 @@ static DEFINE_IDA(fsl_lpuart_ida);
enum lpuart_type {
VF610_LPUART,
LS1021A_LPUART,
LS1028A_LPUART,
IMX7ULP_LPUART,
IMX8QXP_LPUART,
};
@ -282,11 +283,16 @@ static const struct lpuart_soc_data vf_data = {
.iotype = UPIO_MEM,
};
static const struct lpuart_soc_data ls_data = {
static const struct lpuart_soc_data ls1021a_data = {
.devtype = LS1021A_LPUART,
.iotype = UPIO_MEM32BE,
};
static const struct lpuart_soc_data ls1028a_data = {
.devtype = LS1028A_LPUART,
.iotype = UPIO_MEM32,
};
static struct lpuart_soc_data imx7ulp_data = {
.devtype = IMX7ULP_LPUART,
.iotype = UPIO_MEM32,
@ -301,7 +307,8 @@ static struct lpuart_soc_data imx8qxp_data = {
static const struct of_device_id lpuart_dt_ids[] = {
{ .compatible = "fsl,vf610-lpuart", .data = &vf_data, },
{ .compatible = "fsl,ls1021a-lpuart", .data = &ls_data, },
{ .compatible = "fsl,ls1021a-lpuart", .data = &ls1021a_data, },
{ .compatible = "fsl,ls1028a-lpuart", .data = &ls1028a_data, },
{ .compatible = "fsl,imx7ulp-lpuart", .data = &imx7ulp_data, },
{ .compatible = "fsl,imx8qxp-lpuart", .data = &imx8qxp_data, },
{ /* sentinel */ }
@ -311,6 +318,12 @@ MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
/* Forward declare this for the dma callbacks*/
static void lpuart_dma_tx_complete(void *arg);
static inline bool is_layerscape_lpuart(struct lpuart_port *sport)
{
return (sport->devtype == LS1021A_LPUART ||
sport->devtype == LS1028A_LPUART);
}
static inline bool is_imx8qxp_lpuart(struct lpuart_port *sport)
{
return sport->devtype == IMX8QXP_LPUART;
@ -1553,6 +1566,17 @@ static int lpuart32_startup(struct uart_port *port)
sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_RXSIZE_OFF) &
UARTFIFO_FIFOSIZE_MASK);
/*
* The LS1021A and LS1028A have a fixed FIFO depth of 16 words.
* Although they support the RX/TXSIZE fields, their encoding is
* different. Eg the reference manual states 0b101 is 16 words.
*/
if (is_layerscape_lpuart(sport)) {
sport->rxfifo_size = 16;
sport->txfifo_size = 16;
sport->port.fifosize = sport->txfifo_size;
}
spin_lock_irqsave(&sport->port.lock, flags);
lpuart32_setup_watermark_enable(sport);

View File

@ -1283,6 +1283,9 @@ static int __init serial_txx9_init(void)
#ifdef ENABLE_SERIAL_TXX9_PCI
ret = pci_register_driver(&serial_txx9_pci_driver);
if (ret) {
platform_driver_unregister(&serial_txx9_plat_driver);
}
#endif
if (ret == 0)
goto out;

View File

@ -4620,27 +4620,6 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
return rc;
}
static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
{
int con = op->height;
int rc;
console_lock();
if (vc->vc_mode != KD_TEXT)
rc = -EINVAL;
else if (!vc->vc_sw->con_font_copy)
rc = -ENOSYS;
else if (con < 0 || !vc_cons_allocated(con))
rc = -ENOTTY;
else if (con == vc->vc_num) /* nothing to do */
rc = 0;
else
rc = vc->vc_sw->con_font_copy(vc, con);
console_unlock();
return rc;
}
int con_font_op(struct vc_data *vc, struct console_font_op *op)
{
switch (op->op) {
@ -4651,7 +4630,8 @@ int con_font_op(struct vc_data *vc, struct console_font_op *op)
case KD_FONT_OP_SET_DEFAULT:
return con_font_default(vc, op);
case KD_FONT_OP_COPY:
return con_font_copy(vc, op);
/* was buggy and never really used */
return -EINVAL;
}
return -ENOSYS;
}

View File

@ -1057,7 +1057,7 @@ struct cdns3_trb {
#define TRB_TDL_SS_SIZE_GET(p) (((p) & GENMASK(23, 17)) >> 17)
/* transfer_len bitmasks - bits 31:24 */
#define TRB_BURST_LEN(p) (((p) << 24) & GENMASK(31, 24))
#define TRB_BURST_LEN(p) ((unsigned int)((p) << 24) & GENMASK(31, 24))
#define TRB_BURST_LEN_GET(p) (((p) & GENMASK(31, 24)) >> 24)
/* Data buffer pointer bitmasks*/

View File

@ -378,6 +378,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* Kingston DataTraveler 3.0 */
{ USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },

View File

@ -1058,10 +1058,11 @@ void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
{
unsigned int direction = !dwc->ep0_expect_in;
dwc->delayed_status = false;
if (dwc->ep0state != EP0_STATUS_PHASE)
return;
dwc->delayed_status = false;
__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
}

View File

@ -587,6 +587,7 @@ static int mtu3_gadget_stop(struct usb_gadget *g)
spin_unlock_irqrestore(&mtu->lock, flags);
synchronize_irq(mtu->irq);
return 0;
}

View File

@ -357,11 +357,12 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
struct device *dev = &port->dev;
int status = urb->status;
unsigned long flags;
bool resubmitted = false;
set_bit(0, &port->write_urbs_free);
if (status) {
dev_dbg(dev, "%s - nonzero write bulk status received: %d\n",
__func__, status);
set_bit(0, &port->write_urbs_free);
return;
}
@ -394,6 +395,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
goto exit;
}
resubmitted = true;
dev_dbg(dev, "%s - priv->wrsent=%d\n", __func__, priv->wrsent);
dev_dbg(dev, "%s - priv->wrfilled=%d\n", __func__, priv->wrfilled);
@ -410,6 +413,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
exit:
spin_unlock_irqrestore(&priv->lock, flags);
if (!resubmitted)
set_bit(0, &port->write_urbs_free);
usb_serial_port_softint(port);
}

View File

@ -250,6 +250,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
#define QUECTEL_PRODUCT_EC200T 0x6026
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@ -1117,6 +1118,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
@ -1189,6 +1191,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1054, 0xff), /* Telit FT980-KS */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff), /* Telit FN980 (PCIe) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@ -1201,6 +1205,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff), /* Telit LE910Cx (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@ -1215,6 +1221,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1230, 0xff), /* Telit LE910Cx (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1261),

View File

@ -873,7 +873,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
out_free:
kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(cachep, gl);
atomic_dec(&sdp->sd_glock_disposal);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
out:
return ret;

View File

@ -622,7 +622,6 @@ xfs_ioc_space(
error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
if (error)
goto out_unlock;
inode_dio_wait(inode);
switch (bf->l_whence) {
case 0: /*SEEK_SET*/
@ -668,6 +667,31 @@ xfs_ioc_space(
goto out_unlock;
}
/*
* Must wait for all AIO to complete before we continue as AIO can
* change the file size on completion without holding any locks we
* currently hold. We must do this first because AIO can update both
* the on disk and in memory inode sizes, and the operations that follow
* require the in-memory size to be fully up-to-date.
*/
inode_dio_wait(inode);
/*
* Now that AIO and DIO has drained we can flush and (if necessary)
* invalidate the cached range over the first operation we are about to
* run. We include zero range here because it starts with a hole punch
* over the target range.
*/
switch (cmd) {
case XFS_IOC_ZERO_RANGE:
case XFS_IOC_UNRESVSP:
case XFS_IOC_UNRESVSP64:
error = xfs_flush_unmap_range(ip, bf->l_start, bf->l_len);
if (error)
goto out_unlock;
break;
}
switch (cmd) {
case XFS_IOC_ZERO_RANGE:
flags |= XFS_PREALLOC_SET;

View File

@ -1159,10 +1159,6 @@ static inline bool arch_has_pfn_modify_check(void)
#endif /* !__ASSEMBLY__ */
#ifndef io_remap_pfn_range
#define io_remap_pfn_range remap_pfn_range
#endif
#ifndef has_transparent_hugepage
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define has_transparent_hugepage() 1

View File

@ -75,32 +75,58 @@
#ifdef __ASSEMBLY__
/* SYM_T_FUNC -- type used by assembler to mark functions */
#ifndef SYM_T_FUNC
#define SYM_T_FUNC STT_FUNC
#endif
/* SYM_T_OBJECT -- type used by assembler to mark data */
#ifndef SYM_T_OBJECT
#define SYM_T_OBJECT STT_OBJECT
#endif
/* SYM_T_NONE -- type used by assembler to mark entries of unknown type */
#ifndef SYM_T_NONE
#define SYM_T_NONE STT_NOTYPE
#endif
/* SYM_A_* -- align the symbol? */
#define SYM_A_ALIGN ALIGN
#define SYM_A_NONE /* nothing */
/* SYM_L_* -- linkage of symbols */
#define SYM_L_GLOBAL(name) .globl name
#define SYM_L_WEAK(name) .weak name
#define SYM_L_LOCAL(name) /* nothing */
#ifndef LINKER_SCRIPT
#define ALIGN __ALIGN
#define ALIGN_STR __ALIGN_STR
/* === DEPRECATED annotations === */
#ifndef GLOBAL
/* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */
#define GLOBAL(name) \
.globl name ASM_NL \
name:
#endif
#ifndef ENTRY
/* deprecated, use SYM_FUNC_START */
#define ENTRY(name) \
.globl name ASM_NL \
ALIGN ASM_NL \
name:
SYM_FUNC_START(name)
#endif
#endif /* LINKER_SCRIPT */
#ifndef WEAK
/* deprecated, use SYM_FUNC_START_WEAK* */
#define WEAK(name) \
.weak name ASM_NL \
ALIGN ASM_NL \
name:
SYM_FUNC_START_WEAK(name)
#endif
#ifndef END
/* deprecated, use SYM_FUNC_END, SYM_DATA_END, or SYM_END */
#define END(name) \
.size name, .-name
#endif
@ -110,11 +136,214 @@
* static analysis tools such as stack depth analyzer.
*/
#ifndef ENDPROC
/* deprecated, use SYM_FUNC_END */
#define ENDPROC(name) \
.type name, @function ASM_NL \
END(name)
SYM_FUNC_END(name)
#endif
/* === generic annotations === */
/* SYM_ENTRY -- use only if you have to for non-paired symbols */
#ifndef SYM_ENTRY
#define SYM_ENTRY(name, linkage, align...) \
linkage(name) ASM_NL \
align ASM_NL \
name:
#endif
/* SYM_START -- use only if you have to */
#ifndef SYM_START
#define SYM_START(name, linkage, align...) \
SYM_ENTRY(name, linkage, align)
#endif
/* SYM_END -- use only if you have to */
#ifndef SYM_END
#define SYM_END(name, sym_type) \
.type name sym_type ASM_NL \
.size name, .-name
#endif
/* === code annotations === */
/*
* FUNC -- C-like functions (proper stack frame etc.)
* CODE -- non-C code (e.g. irq handlers with different, special stack etc.)
*
* Objtool validates stack for FUNC, but not for CODE.
* Objtool generates debug info for both FUNC & CODE, but needs special
* annotations for each CODE's start (to describe the actual stack frame).
*
* ALIAS -- does not generate debug info -- the aliased function will
*/
/* SYM_INNER_LABEL_ALIGN -- only for labels in the middle of code */
#ifndef SYM_INNER_LABEL_ALIGN
#define SYM_INNER_LABEL_ALIGN(name, linkage) \
.type name SYM_T_NONE ASM_NL \
SYM_ENTRY(name, linkage, SYM_A_ALIGN)
#endif
/* SYM_INNER_LABEL -- only for labels in the middle of code */
#ifndef SYM_INNER_LABEL
#define SYM_INNER_LABEL(name, linkage) \
.type name SYM_T_NONE ASM_NL \
SYM_ENTRY(name, linkage, SYM_A_NONE)
#endif
/*
* SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for one
* function
*/
#ifndef SYM_FUNC_START_LOCAL_ALIAS
#define SYM_FUNC_START_LOCAL_ALIAS(name) \
SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
#endif
/*
* SYM_FUNC_START_ALIAS -- use where there are two global names for one
* function
*/
#ifndef SYM_FUNC_START_ALIAS
#define SYM_FUNC_START_ALIAS(name) \
SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#endif
/* SYM_FUNC_START -- use for global functions */
#ifndef SYM_FUNC_START
/*
* The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two
* later.
*/
#define SYM_FUNC_START(name) \
SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#endif
/* SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment */
#ifndef SYM_FUNC_START_NOALIGN
#define SYM_FUNC_START_NOALIGN(name) \
SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
#endif
/* SYM_FUNC_START_LOCAL -- use for local functions */
#ifndef SYM_FUNC_START_LOCAL
/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */
#define SYM_FUNC_START_LOCAL(name) \
SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
#endif
/* SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment */
#ifndef SYM_FUNC_START_LOCAL_NOALIGN
#define SYM_FUNC_START_LOCAL_NOALIGN(name) \
SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
#endif
/* SYM_FUNC_START_WEAK -- use for weak functions */
#ifndef SYM_FUNC_START_WEAK
#define SYM_FUNC_START_WEAK(name) \
SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN)
#endif
/* SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment */
#ifndef SYM_FUNC_START_WEAK_NOALIGN
#define SYM_FUNC_START_WEAK_NOALIGN(name) \
SYM_START(name, SYM_L_WEAK, SYM_A_NONE)
#endif
/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */
#ifndef SYM_FUNC_END_ALIAS
#define SYM_FUNC_END_ALIAS(name) \
SYM_END(name, SYM_T_FUNC)
#endif
/*
* SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
* SYM_FUNC_START_WEAK, ...
*/
#ifndef SYM_FUNC_END
/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */
#define SYM_FUNC_END(name) \
SYM_END(name, SYM_T_FUNC)
#endif
/* SYM_CODE_START -- use for non-C (special) functions */
#ifndef SYM_CODE_START
#define SYM_CODE_START(name) \
SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#endif
/* SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o alignment */
#ifndef SYM_CODE_START_NOALIGN
#define SYM_CODE_START_NOALIGN(name) \
SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
#endif
/* SYM_CODE_START_LOCAL -- use for local non-C (special) functions */
#ifndef SYM_CODE_START_LOCAL
#define SYM_CODE_START_LOCAL(name) \
SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
#endif
/*
* SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special) functions,
* w/o alignment
*/
#ifndef SYM_CODE_START_LOCAL_NOALIGN
#define SYM_CODE_START_LOCAL_NOALIGN(name) \
SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
#endif
/* SYM_CODE_END -- the end of SYM_CODE_START_LOCAL, SYM_CODE_START, ... */
#ifndef SYM_CODE_END
#define SYM_CODE_END(name) \
SYM_END(name, SYM_T_NONE)
#endif
/* === data annotations === */
/* SYM_DATA_START -- global data symbol */
#ifndef SYM_DATA_START
#define SYM_DATA_START(name) \
SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
#endif
/* SYM_DATA_START -- local data symbol */
#ifndef SYM_DATA_START_LOCAL
#define SYM_DATA_START_LOCAL(name) \
SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
#endif
/* SYM_DATA_END -- the end of SYM_DATA_START symbol */
#ifndef SYM_DATA_END
#define SYM_DATA_END(name) \
SYM_END(name, SYM_T_OBJECT)
#endif
/* SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol */
#ifndef SYM_DATA_END_LABEL
#define SYM_DATA_END_LABEL(name, linkage, label) \
linkage(label) ASM_NL \
.type label SYM_T_OBJECT ASM_NL \
label: \
SYM_END(name, SYM_T_OBJECT)
#endif
/* SYM_DATA -- start+end wrapper around simple global data */
#ifndef SYM_DATA
#define SYM_DATA(name, data...) \
SYM_DATA_START(name) ASM_NL \
data ASM_NL \
SYM_DATA_END(name)
#endif
/* SYM_DATA_LOCAL -- start+end wrapper around simple local data */
#ifndef SYM_DATA_LOCAL
#define SYM_DATA_LOCAL(name, data...) \
SYM_DATA_START_LOCAL(name) ASM_NL \
data ASM_NL \
SYM_DATA_END(name)
#endif
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_LINKAGE_H */

View File

@ -2600,6 +2600,15 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
#ifndef io_remap_pfn_range
static inline int io_remap_pfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn,
unsigned long size, pgprot_t prot)
{
return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
}
#endif
static inline vm_fault_t vmf_error(int err)
{
if (err == -ENOMEM)

View File

@ -54,11 +54,10 @@ extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
extern void pm_runtime_clean_up_links(struct device *dev);
extern void pm_runtime_get_suppliers(struct device *dev);
extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev);
extern void pm_runtime_drop_link(struct device *dev);
extern void pm_runtime_drop_link(struct device_link *link);
static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
{
@ -173,11 +172,10 @@ static inline u64 pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
static inline void pm_runtime_clean_up_links(struct device *dev) {}
static inline void pm_runtime_get_suppliers(struct device *dev) {}
static inline void pm_runtime_put_suppliers(struct device *dev) {}
static inline void pm_runtime_new_link(struct device *dev) {}
static inline void pm_runtime_drop_link(struct device *dev) {}
static inline void pm_runtime_drop_link(struct device_link *link) {}
#endif /* !CONFIG_PM */

View File

@ -9431,6 +9431,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
int fpos = token == IF_SRC_FILE ? 2 : 1;
kfree(filename);
filename = match_strdup(&args[fpos]);
if (!filename) {
ret = -ENOMEM;
@ -9477,16 +9478,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
*/
ret = -EOPNOTSUPP;
if (!event->ctx->task)
goto fail_free_name;
goto fail;
/* look up the path and grab its inode */
ret = kern_path(filename, LOOKUP_FOLLOW,
&filter->path);
if (ret)
goto fail_free_name;
kfree(filename);
filename = NULL;
goto fail;
ret = -EINVAL;
if (!filter->path.dentry ||
@ -9506,13 +9504,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
if (state != IF_STATE_ACTION)
goto fail;
kfree(filename);
kfree(orig);
return 0;
fail_free_name:
kfree(filename);
fail:
kfree(filename);
free_filters_list(filters);
kfree(orig);

View File

@ -2112,14 +2112,9 @@ static __latent_entropy struct task_struct *copy_process(
/* ok, now we should be set up.. */
p->pid = pid_nr(pid);
if (clone_flags & CLONE_THREAD) {
p->exit_signal = -1;
p->group_leader = current->group_leader;
p->tgid = current->tgid;
} else {
if (clone_flags & CLONE_PARENT)
p->exit_signal = current->group_leader->exit_signal;
else
p->exit_signal = args->exit_signal;
p->group_leader = p;
p->tgid = p->pid;
}
@ -2164,9 +2159,14 @@ static __latent_entropy struct task_struct *copy_process(
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
p->parent_exec_id = current->parent_exec_id;
if (clone_flags & CLONE_THREAD)
p->exit_signal = -1;
else
p->exit_signal = current->group_leader->exit_signal;
} else {
p->real_parent = current;
p->parent_exec_id = current->self_exec_id;
p->exit_signal = args->exit_signal;
}
klp_copy_process(p);

View File

@ -2515,10 +2515,22 @@ retry:
}
/*
* Since we just failed the trylock; there must be an owner.
* The trylock just failed, so either there is an owner or
* there is a higher priority waiter than this one.
*/
newowner = rt_mutex_owner(&pi_state->pi_mutex);
BUG_ON(!newowner);
/*
* If the higher priority waiter has not yet taken over the
* rtmutex then newowner is NULL. We can't return here with
* that state because it's inconsistent vs. the user space
* state. So drop the locks and try again. It's a valid
* situation and not any different from the other retry
* conditions.
*/
if (unlikely(!newowner)) {
err = -EAGAIN;
goto handle_err;
}
} else {
WARN_ON_ONCE(argowner != current);
if (oldowner == current) {

View File

@ -873,7 +873,8 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
/* Move the work from worker->delayed_work_list. */
WARN_ON_ONCE(list_empty(&work->node));
list_del_init(&work->node);
kthread_insert_work(worker, work, &worker->work_list);
if (!work->canceling)
kthread_insert_work(worker, work, &worker->work_list);
raw_spin_unlock_irqrestore(&worker->lock, flags);
}

View File

@ -391,16 +391,17 @@ static bool task_participate_group_stop(struct task_struct *task)
void task_join_group_stop(struct task_struct *task)
{
unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
struct signal_struct *sig = current->signal;
if (sig->group_stop_count) {
sig->group_stop_count++;
mask |= JOBCTL_STOP_CONSUME;
} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
return;
/* Have the new thread join an on-going signal group stop */
unsigned long jobctl = current->jobctl;
if (jobctl & JOBCTL_STOP_PENDING) {
struct signal_struct *sig = current->signal;
unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
if (task_set_jobctl_pending(task, signr | gstop)) {
sig->group_stop_count++;
}
}
task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
}
/*

View File

@ -422,14 +422,16 @@ struct rb_event_info {
/*
* Used for which event context the event is in.
* NMI = 0
* IRQ = 1
* SOFTIRQ = 2
* NORMAL = 3
* TRANSITION = 0
* NMI = 1
* IRQ = 2
* SOFTIRQ = 3
* NORMAL = 4
*
* See trace_recursive_lock() comment below for more details.
*/
enum {
RB_CTX_TRANSITION,
RB_CTX_NMI,
RB_CTX_IRQ,
RB_CTX_SOFTIRQ,
@ -2660,10 +2662,10 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
* a bit of overhead in something as critical as function tracing,
* we use a bitmask trick.
*
* bit 0 = NMI context
* bit 1 = IRQ context
* bit 2 = SoftIRQ context
* bit 3 = normal context.
* bit 1 = NMI context
* bit 2 = IRQ context
* bit 3 = SoftIRQ context
* bit 4 = normal context.
*
* This works because this is the order of contexts that can
* preempt other contexts. A SoftIRQ never preempts an IRQ
@ -2686,6 +2688,30 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
* The least significant bit can be cleared this way, and it
* just so happens that it is the same bit corresponding to
* the current context.
*
* Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
* is set when a recursion is detected at the current context, and if
* the TRANSITION bit is already set, it will fail the recursion.
* This is needed because there's a lag between the changing of
* interrupt context and updating the preempt count. In this case,
* a false positive will be found. To handle this, one extra recursion
* is allowed, and this is done by the TRANSITION bit. If the TRANSITION
* bit is already set, then it is considered a recursion and the function
* ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
*
* On the trace_recursive_unlock(), the TRANSITION bit will be the first
* to be cleared. Even if it wasn't the context that set it. That is,
* if an interrupt comes in while NORMAL bit is set and the ring buffer
* is called before preempt_count() is updated, since the check will
* be on the NORMAL bit, the TRANSITION bit will then be set. If an
* NMI then comes in, it will set the NMI bit, but when the NMI code
* does the trace_recursive_unlock() it will clear the TRANSTION bit
* and leave the NMI bit set. But this is fine, because the interrupt
* code that set the TRANSITION bit will then clear the NMI bit when it
* calls trace_recursive_unlock(). If another NMI comes in, it will
* set the TRANSITION bit and continue.
*
* Note: The TRANSITION bit only handles a single transition between context.
*/
static __always_inline int
@ -2701,8 +2727,16 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
bit = pc & NMI_MASK ? RB_CTX_NMI :
pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
return 1;
if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
/*
* It is possible that this was called by transitioning
* between interrupt context, and preempt_count() has not
* been updated yet. In this case, use the TRANSITION bit.
*/
bit = RB_CTX_TRANSITION;
if (val & (1 << (bit + cpu_buffer->nest)))
return 1;
}
val |= (1 << (bit + cpu_buffer->nest));
cpu_buffer->current_context = val;
@ -2717,8 +2751,8 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->current_context - (1 << cpu_buffer->nest);
}
/* The recursive locking above uses 4 bits */
#define NESTED_BITS 4
/* The recursive locking above uses 5 bits */
#define NESTED_BITS 5
/**
* ring_buffer_nest_start - Allow to trace while nested

View File

@ -3012,7 +3012,7 @@ static char *get_trace_buf(void)
/* Interrupts must see nesting incremented before we use the buffer */
barrier();
return &buffer->buffer[buffer->nesting][0];
return &buffer->buffer[buffer->nesting - 1][0];
}
static void put_trace_buf(void)

View File

@ -592,6 +592,12 @@ enum {
* function is called to clear it.
*/
TRACE_GRAPH_NOTRACE_BIT,
/*
* When transitioning between context, the preempt_count() may
* not be correct. Allow for a single recursion to cover this case.
*/
TRACE_TRANSITION_BIT,
};
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
@ -646,14 +652,27 @@ static __always_inline int trace_test_and_set_recursion(int start, int max)
return 0;
bit = trace_get_context_bit() + start;
if (unlikely(val & (1 << bit)))
return -1;
if (unlikely(val & (1 << bit))) {
/*
* It could be that preempt_count has not been updated during
* a switch between contexts. Allow for a single recursion.
*/
bit = TRACE_TRANSITION_BIT;
if (trace_recursion_test(bit))
return -1;
trace_recursion_set(bit);
barrier();
return bit + 1;
}
/* Normal check passed, clear the transition to allow it again */
trace_recursion_clear(TRACE_TRANSITION_BIT);
val |= 1 << bit;
current->trace_recursion = val;
barrier();
return bit;
return bit + 1;
}
static __always_inline void trace_clear_recursion(int bit)
@ -663,6 +682,7 @@ static __always_inline void trace_clear_recursion(int bit)
if (!bit)
return;
bit--;
bit = 1 << bit;
val &= ~bit;

View File

@ -492,8 +492,13 @@ trace_selftest_function_recursion(void)
unregister_ftrace_function(&test_rec_probe);
ret = -1;
if (trace_selftest_recursion_cnt != 1) {
pr_cont("*callback not called once (%d)* ",
/*
* Recursion allows for transitions between context,
* and may call the callback twice.
*/
if (trace_selftest_recursion_cnt != 1 &&
trace_selftest_recursion_cnt != 2) {
pr_cont("*callback not called once (or twice) (%d)* ",
trace_selftest_recursion_cnt);
goto out;
}

View File

@ -683,7 +683,6 @@ static int __init crc32c_test(void)
/* reduce OS noise */
local_irq_save(flags);
local_irq_disable();
nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
@ -694,7 +693,6 @@ static int __init crc32c_test(void)
nsec = ktime_get_ns() - nsec;
local_irq_restore(flags);
local_irq_enable();
pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
@ -768,7 +766,6 @@ static int __init crc32_test(void)
/* reduce OS noise */
local_irq_save(flags);
local_irq_disable();
nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
@ -783,7 +780,6 @@ static int __init crc32_test(void)
nsec = ktime_get_ns() - nsec;
local_irq_restore(flags);
local_irq_enable();
pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
CRC_LE_BITS, CRC_BE_BITS);

View File

@ -8,7 +8,7 @@
#define FONTDATAMAX 9216
static struct font_data fontdata_10x18 = {
static const struct font_data fontdata_10x18 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, 0x00, /* 0000000000 */

View File

@ -3,7 +3,7 @@
#define FONTDATAMAX 2560
static struct font_data fontdata_6x10 = {
static const struct font_data fontdata_6x10 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */

View File

@ -9,7 +9,7 @@
#define FONTDATAMAX (11*256)
static struct font_data fontdata_6x11 = {
static const struct font_data fontdata_6x11 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */

View File

@ -8,7 +8,7 @@
#define FONTDATAMAX 3584
static struct font_data fontdata_7x14 = {
static const struct font_data fontdata_7x14 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 0000000 */

View File

@ -10,7 +10,7 @@
#define FONTDATAMAX 4096
static struct font_data fontdata_8x16 = {
static const struct font_data fontdata_8x16 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */

View File

@ -9,7 +9,7 @@
#define FONTDATAMAX 2048
static struct font_data fontdata_8x8 = {
static const struct font_data fontdata_8x8 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */

Some files were not shown because too many files have changed in this diff Show More