This is the 5.4.28 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl57B4gACgkQONu9yGCS
 aT6KWQ//ToF4D+fDl1Muf4xuT83HnXe1yA1XFlyC54ZmEaxGFnFMSAgAvitBR7HC
 GEczXlvYbbVJl646ynTmX/LFz2d+V0i2zGv2QKN3xfd8GtDrAq/s+Ffhneaskk1k
 gKkIUzOyBq0nEAq5vXbCT4LbQDYGLw8BxxvurLim/YywSy5sfnw+hKkYE7cVVSOa
 rTIdt5qnuL4pxD8VeCAakoU6SajoxfFqS8pX79LC1UPY+++OaVcJjyJvM4APw0Kr
 e2E1BaeZxxCyY57pQY2oqhjG3cCfIcmfln19JRzMCVNo9+J3MEjZI5EUqP/Zcjwz
 1V5FHqDmqMGfA9cn+CexDk79bTKW5+YOyYMEG2RJjV5alWJtvv3Wj6dRPVPpBhnJ
 O627IuIVGMsHuiEbjziKczaTtYYU5GTpJF+7COH0Jnud0q1w3/RjaJXspnb2Yozh
 L9BFqc4aonD+AyW2NXTxuuvc3hnD2YSVgLectm7LF/TbM2YFJVu4tounelFsGG6I
 CPH2VF0Tn+yNR2iWf8igvopvYPCjv+QFGXU6kFGxQxLQFTnXHqoO2sPF4awXx7Hv
 XF8LrJzPwissX5BbPyhUSIl0FEcmQi6UzSN1/6fpxVq+092OGVacMWpZvwqjUOV/
 3k/OrmcYsu7i2UUbms47YHAK0PRkL2ogKgxgcSO4aNZ7MfkXkm8=
 =kp1m
 -----END PGP SIGNATURE-----

Merge 5.4.28 into android-5.4

Changes in 5.4.28
	locks: fix a potential use-after-free problem when wakeup a waiter
	locks: reinstate locks_delete_block optimization
	spi: spi-omap2-mcspi: Support probe deferral for DMA channels
	drm/mediatek: Find the cursor plane instead of hard coding it
	phy: ti: gmii-sel: fix set of copy-paste errors
	phy: ti: gmii-sel: do not fail in case of gmii
	ARM: dts: dra7-l4: mark timer13-16 as pwm capable
	spi: qup: call spi_qup_pm_resume_runtime before suspending
	powerpc: Include .BTF section
	cifs: fix potential mismatch of UNC paths
	cifs: add missing mount option to /proc/mounts
	ARM: dts: dra7: Add "dma-ranges" property to PCIe RC DT nodes
	spi: pxa2xx: Add CS control clock quirk
	spi/zynqmp: remove entry that causes a cs glitch
	drm/exynos: dsi: propagate error value and silence meaningless warning
	drm/exynos: dsi: fix workaround for the legacy clock name
	drm/exynos: hdmi: don't leak enable HDMI_EN regulator if probe fails
	drivers/perf: fsl_imx8_ddr: Correct the CLEAR bit definition
	drivers/perf: arm_pmu_acpi: Fix incorrect checking of gicc pointer
	altera-stapl: altera_get_note: prevent write beyond end of 'key'
	dm bio record: save/restore bi_end_io and bi_integrity
	dm integrity: use dm_bio_record and dm_bio_restore
	riscv: avoid the PIC offset of static percpu data in module beyond 2G limits
	ASoC: stm32: sai: manage rebind issue
	spi: spi_register_controller(): free bus id on error paths
	riscv: Force flat memory model with no-mmu
	riscv: Fix range looking for kernel image memblock
	drm/amdgpu: clean wptr on wb when gpu recovery
	drm/amd/display: Clear link settings on MST disable connector
	drm/amd/display: fix dcc swath size calculations on dcn1
	xenbus: req->body should be updated before req->state
	xenbus: req->err should be updated before req->state
	block, bfq: fix overwrite of bfq_group pointer in bfq_find_set_group()
	parse-maintainers: Mark as executable
	binderfs: use refcount for binder control devices too
	Revert "drm/fbdev: Fallback to non tiled mode if all tiles not present"
	USB: Disable LPM on WD19's Realtek Hub
	usb: quirks: add NO_LPM quirk for RTL8153 based ethernet adapters
	USB: serial: option: add ME910G1 ECM composition 0x110b
	usb: host: xhci-plat: add a shutdown
	USB: serial: pl2303: add device-id for HP LD381
	usb: xhci: apply XHCI_SUSPEND_DELAY to AMD XHCI controller 1022:145c
	usb: typec: ucsi: displayport: Fix NULL pointer dereference
	usb: typec: ucsi: displayport: Fix a potential race during registration
	USB: cdc-acm: fix close_delay and closing_wait units in TIOCSSERIAL
	USB: cdc-acm: fix rounding error in TIOCSSERIAL
	ALSA: line6: Fix endless MIDI read loop
	ALSA: hda/realtek - Enable headset mic of Acer X2660G with ALC662
	ALSA: hda/realtek - Enable the headset of Acer N50-600 with ALC662
	ALSA: seq: virmidi: Fix running status after receiving sysex
	ALSA: seq: oss: Fix running status after receiving sysex
	ALSA: pcm: oss: Avoid plugin buffer overflow
	ALSA: pcm: oss: Remove WARNING from snd_pcm_plug_alloc() checks
	tty: fix compat TIOCGSERIAL leaking uninitialized memory
	tty: fix compat TIOCGSERIAL checking wrong function ptr
	iio: chemical: sps30: fix missing triggered buffer dependency
	iio: st_sensors: remap SMO8840 to LIS2DH12
	iio: trigger: stm32-timer: disable master mode when stopping
	iio: accel: adxl372: Set iio_chan BE
	iio: magnetometer: ak8974: Fix negative raw values in sysfs
	iio: adc: stm32-dfsdm: fix sleep in atomic context
	iio: adc: at91-sama5d2_adc: fix differential channels in triggered mode
	iio: light: vcnl4000: update sampling periods for vcnl4200
	iio: light: vcnl4000: update sampling periods for vcnl4040
	mmc: rtsx_pci: Fix support for speed-modes that relies on tuning
	mmc: sdhci-of-at91: fix cd-gpios for SAMA5D2
	mmc: sdhci-cadence: set SDHCI_QUIRK2_PRESET_VALUE_BROKEN for UniPhier
	CIFS: fiemap: do not return EINVAL if get nothing
	kbuild: Disable -Wpointer-to-enum-cast
	staging: rtl8188eu: Add device id for MERCUSYS MW150US v2
	staging: greybus: loopback_test: fix poll-mask build breakage
	staging/speakup: fix get_word non-space look-ahead
	intel_th: msu: Fix the unexpected state warning
	intel_th: Fix user-visible error codes
	intel_th: pci: Add Elkhart Lake CPU support
	modpost: move the namespace field in Module.symvers last
	rtc: max8907: add missing select REGMAP_IRQ
	arm64: compat: Fix syscall number of compat_clock_getres
	xhci: Do not open code __print_symbolic() in xhci trace events
	btrfs: fix log context list corruption after rename whiteout error
	drm/amd/amdgpu: Fix GPR read from debugfs (v2)
	drm/lease: fix WARNING in idr_destroy
	stm class: sys-t: Fix the use of time_after()
	memcg: fix NULL pointer dereference in __mem_cgroup_usage_unregister_event
	mm, memcg: fix corruption on 64-bit divisor in memory.high throttling
	mm, memcg: throttle allocators based on ancestral memory.high
	mm/hotplug: fix hot remove failure in SPARSEMEM|!VMEMMAP case
	mm: do not allow MADV_PAGEOUT for CoW pages
	epoll: fix possible lost wakeup on epoll_ctl() path
	mm: slub: be more careful about the double cmpxchg of freelist
	mm, slub: prevent kmalloc_node crashes and memory leaks
	page-flags: fix a crash at SetPageError(THP_SWAP)
	x86/mm: split vmalloc_sync_all()
	futex: Fix inode life-time issue
	futex: Unbreak futex hashing
	ALSA: hda/realtek: Fix pop noise on ALC225
	arm64: smp: fix smp_send_stop() behaviour
	arm64: smp: fix crash_smp_send_stop() behaviour
	nvmet-tcp: set MSG_MORE only if we actually have more to send
	drm/bridge: dw-hdmi: fix AVI frame colorimetry
	staging: greybus: loopback_test: fix potential path truncation
	staging: greybus: loopback_test: fix potential path truncations
	Linux 5.4.28

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I5d9d15d6236d8ab7374205c6ceda7efa7a9abb70
This commit is contained in:
Greg Kroah-Hartman 2020-03-25 16:12:11 +01:00
commit 2341be6d9d
101 changed files with 772 additions and 513 deletions

View File

@ -470,9 +470,9 @@ build.
The syntax of the Module.symvers file is::
<CRC> <Symbol> <Namespace> <Module> <Export Type>
<CRC> <Symbol> <Module> <Export Type> <Namespace>
0xe1cc2a05 usb_stor_suspend USB_STORAGE drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL
0xe1cc2a05 usb_stor_suspend drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL USB_STORAGE
The fields are separated by tabs and values may be empty (e.g.
if no namespace is defined for an exported symbol).

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 27
SUBLEVEL = 28
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -3413,6 +3413,7 @@
clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER13_CLKCTRL 24>;
clock-names = "fck";
interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
ti,timer-pwm;
};
};
@ -3441,6 +3442,7 @@
clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER14_CLKCTRL 24>;
clock-names = "fck";
interrupts = <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>;
ti,timer-pwm;
};
};
@ -3469,6 +3471,7 @@
clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>;
clock-names = "fck";
interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
ti,timer-pwm;
};
};
@ -3497,6 +3500,7 @@
clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>;
clock-names = "fck";
interrupts = <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>;
ti,timer-pwm;
};
};

View File

@ -184,6 +184,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
@ -238,6 +239,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;

View File

@ -25,8 +25,8 @@
#define __NR_compat_gettimeofday 78
#define __NR_compat_sigreturn 119
#define __NR_compat_rt_sigreturn 173
#define __NR_compat_clock_getres 247
#define __NR_compat_clock_gettime 263
#define __NR_compat_clock_getres 264
#define __NR_compat_clock_gettime64 403
#define __NR_compat_clock_getres_time64 406

View File

@ -959,11 +959,22 @@ void tick_broadcast(const struct cpumask *mask)
}
#endif
/*
* The number of CPUs online, not counting this CPU (which may not be
* fully online and so not counted in num_online_cpus()).
*/
static inline unsigned int num_other_online_cpus(void)
{
unsigned int this_cpu_online = cpu_online(smp_processor_id());
return num_online_cpus() - this_cpu_online;
}
void smp_send_stop(void)
{
unsigned long timeout;
if (num_online_cpus() > 1) {
if (num_other_online_cpus()) {
cpumask_t mask;
cpumask_copy(&mask, cpu_online_mask);
@ -976,10 +987,10 @@ void smp_send_stop(void)
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC;
while (num_online_cpus() > 1 && timeout--)
while (num_other_online_cpus() && timeout--)
udelay(1);
if (num_online_cpus() > 1)
if (num_other_online_cpus())
pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
cpumask_pr_args(cpu_online_mask));
@ -1002,7 +1013,11 @@ void crash_smp_send_stop(void)
cpus_stopped = 1;
if (num_online_cpus() == 1) {
/*
* If this cpu is the only one alive at this point in time, online or
* not, there are no stop messages to be sent around, so just back out.
*/
if (num_other_online_cpus() == 0) {
sdei_mask_local_cpu();
return;
}
@ -1010,7 +1025,7 @@ void crash_smp_send_stop(void)
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
pr_crit("SMP: stopping secondary CPUs\n");
smp_cross_call(&mask, IPI_CPU_CRASH_STOP);

View File

@ -326,6 +326,12 @@ SECTIONS
*(.branch_lt)
}
#ifdef CONFIG_DEBUG_INFO_BTF
.BTF : AT(ADDR(.BTF) - LOAD_OFFSET) {
*(.BTF)
}
#endif
.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
__start_opd = .;
KEEP(*(.opd))

View File

@ -101,6 +101,7 @@ config ARCH_FLATMEM_ENABLE
config ARCH_SPARSEMEM_ENABLE
def_bool y
depends on MMU
select SPARSEMEM_VMEMMAP_ENABLE
config ARCH_SELECT_MEMORY_MODEL

View File

@ -8,6 +8,10 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
#include <linux/sizes.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
{
@ -386,3 +390,15 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
return 0;
}
#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
#define VMALLOC_MODULE_START \
max(PFN_ALIGN((unsigned long)&_end - SZ_2G), VMALLOC_START)
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, VMALLOC_MODULE_START,
VMALLOC_END, GFP_KERNEL,
PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif

View File

@ -98,7 +98,7 @@ void __init setup_bootmem(void)
for_each_memblock(memory, reg) {
phys_addr_t end = reg->base + reg->size;
if (reg->base <= vmlinux_end && vmlinux_end <= end) {
if (reg->base <= vmlinux_start && vmlinux_end <= end) {
mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
/*

View File

@ -189,7 +189,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
return pmd_k;
}
void vmalloc_sync_all(void)
static void vmalloc_sync(void)
{
unsigned long address;
@ -216,6 +216,16 @@ void vmalloc_sync_all(void)
}
}
void vmalloc_sync_mappings(void)
{
vmalloc_sync();
}
void vmalloc_sync_unmappings(void)
{
vmalloc_sync();
}
/*
* 32-bit:
*
@ -318,11 +328,23 @@ out:
#else /* CONFIG_X86_64: */
void vmalloc_sync_all(void)
void vmalloc_sync_mappings(void)
{
/*
* 64-bit mappings might allocate new p4d/pud pages
* that need to be propagated to all tasks' PGDs.
*/
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
}
void vmalloc_sync_unmappings(void)
{
/*
* Unmappings never allocate or free p4d/pud pages.
* No work is required here.
*/
}
/*
* 64-bit:
*

View File

@ -593,12 +593,13 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
*/
entity = &bfqg->entity;
for_each_entity(entity) {
bfqg = container_of(entity, struct bfq_group, entity);
if (bfqg != bfqd->root_group) {
parent = bfqg_parent(bfqg);
struct bfq_group *curr_bfqg = container_of(entity,
struct bfq_group, entity);
if (curr_bfqg != bfqd->root_group) {
parent = bfqg_parent(curr_bfqg);
if (!parent)
parent = bfqd->root_group;
bfq_group_set_parent(bfqg, parent);
bfq_group_set_parent(curr_bfqg, parent);
}
}

View File

@ -171,7 +171,7 @@ int ghes_estatus_pool_init(int num_ghes)
* New allocation must be visible in all pgd before it can be found by
* an NMI allocating from the pool.
*/
vmalloc_sync_all();
vmalloc_sync_mappings();
rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
if (rc)

View File

@ -448,6 +448,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
inode->i_uid = info->root_uid;
inode->i_gid = info->root_gid;
refcount_set(&device->ref, 1);
device->binderfs_inode = inode;
device->miscdev.minor = minor;

View File

@ -694,11 +694,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
ssize_t result = 0;
uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
if (size & 3 || *pos & 3)
if (size > 4096 || size & 3 || *pos & 3)
return -EINVAL;
/* decode offset */
offset = *pos & GENMASK_ULL(11, 0);
offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
se = (*pos & GENMASK_ULL(19, 12)) >> 12;
sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
@ -729,7 +729,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
while (size) {
uint32_t value;
value = data[offset++];
value = data[result >> 2];
r = put_user(value, (uint32_t *)buf);
if (r) {
result = r;

View File

@ -3555,6 +3555,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
/* reset ring buffer */
ring->wptr = 0;
atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
} else {
amdgpu_ring_clear_ring(ring);

View File

@ -3756,6 +3756,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
/* reset ring buffer */
ring->wptr = 0;
atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
} else {
amdgpu_ring_clear_ring(ring);

View File

@ -379,6 +379,7 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
aconnector->dc_sink);
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->dc_link->cur_link_settings.lane_count = 0;
}
drm_connector_unregister(connector);

View File

@ -840,8 +840,8 @@ static void hubbub1_det_request_size(
hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
swath_bytes_horz_wc = height * blk256_height * bpe;
swath_bytes_vert_wc = width * blk256_width * bpe;
swath_bytes_horz_wc = width * blk256_height * bpe;
swath_bytes_vert_wc = height * blk256_width * bpe;
*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
false : /* full 256B request */

View File

@ -1576,28 +1576,34 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
frame.colorspace = HDMI_COLORSPACE_RGB;
/* Set up colorimetry */
switch (hdmi->hdmi_data.enc_out_encoding) {
case V4L2_YCBCR_ENC_601:
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
else
if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
switch (hdmi->hdmi_data.enc_out_encoding) {
case V4L2_YCBCR_ENC_601:
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
else
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
case V4L2_YCBCR_ENC_709:
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
else
frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
break;
default: /* Carries no data */
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
}
} else {
frame.colorimetry = HDMI_COLORIMETRY_NONE;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
case V4L2_YCBCR_ENC_709:
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
else
frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
break;
default: /* Carries no data */
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
}
frame.scan_mode = HDMI_SCAN_MODE_NONE;

View File

@ -114,33 +114,6 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
return NULL;
}
static struct drm_display_mode *
drm_connector_get_tiled_mode(struct drm_connector *connector)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
mode->vdisplay == connector->tile_v_size)
return mode;
}
return NULL;
}
static struct drm_display_mode *
drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
mode->vdisplay == connector->tile_v_size)
continue;
return mode;
}
return NULL;
}
static struct drm_display_mode *
drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int height)
{
@ -375,15 +348,8 @@ static bool drm_client_target_preferred(struct drm_connector **connectors,
struct drm_connector *connector;
u64 conn_configured = 0;
int tile_pass = 0;
int num_tiled_conns = 0;
int i;
for (i = 0; i < connector_count; i++) {
if (connectors[i]->has_tile &&
connectors[i]->status == connector_status_connected)
num_tiled_conns++;
}
retry:
for (i = 0; i < connector_count; i++) {
connector = connectors[i];
@ -433,28 +399,6 @@ retry:
list_for_each_entry(modes[i], &connector->modes, head)
break;
}
/*
* In case of tiled mode if all tiles not present fallback to
* first available non tiled mode.
* After all tiles are present, try to find the tiled mode
* for all and if tiled mode not present due to fbcon size
* limitations, use first non tiled mode only for
* tile 0,0 and set to no mode for all other tiles.
*/
if (connector->has_tile) {
if (num_tiled_conns <
connector->num_h_tile * connector->num_v_tile ||
(connector->tile_h_loc == 0 &&
connector->tile_v_loc == 0 &&
!drm_connector_get_tiled_mode(connector))) {
DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
connector->base.id);
modes[i] = drm_connector_fallback_non_tiled_mode(connector);
} else {
modes[i] = drm_connector_get_tiled_mode(connector);
}
}
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
conn_configured |= BIT_ULL(i);
@ -572,7 +516,6 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
bool fallback = true, ret = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
int num_tiled_conns = 0;
struct drm_modeset_acquire_ctx ctx;
if (!drm_drv_uses_atomic_modeset(dev))
@ -590,11 +533,6 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
memcpy(save_enabled, enabled, count);
mask = GENMASK(count - 1, 0);
conn_configured = 0;
for (i = 0; i < count; i++) {
if (connectors[i]->has_tile &&
connectors[i]->status == connector_status_connected)
num_tiled_conns++;
}
retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) {
@ -694,16 +632,6 @@ retry:
connector->name);
modes[i] = &connector->state->crtc->mode;
}
/*
* In case of tiled modes, if all tiles are not present
* then fallback to a non tiled mode.
*/
if (connector->has_tile &&
num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
connector->base.id);
modes[i] = drm_connector_fallback_non_tiled_mode(connector);
}
crtcs[i] = new_crtc;
DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",

View File

@ -542,10 +542,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
}
DRM_DEBUG_LEASE("Creating lease\n");
/* lessee will take the ownership of leases */
lessee = drm_lease_create(lessor, &leases);
if (IS_ERR(lessee)) {
ret = PTR_ERR(lessee);
idr_destroy(&leases);
goto out_leases;
}
@ -580,7 +582,6 @@ out_lessee:
out_leases:
put_unused_fd(fd);
idr_destroy(&leases);
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
return ret;

View File

@ -1750,8 +1750,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
dsi->supplies);
if (ret) {
dev_info(dev, "failed to get regulators: %d\n", ret);
return -EPROBE_DEFER;
if (ret != -EPROBE_DEFER)
dev_info(dev, "failed to get regulators: %d\n", ret);
return ret;
}
dsi->clks = devm_kcalloc(dev,
@ -1764,9 +1765,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
if (IS_ERR(dsi->clks[i])) {
if (strcmp(clk_names[i], "sclk_mipi") == 0) {
strcpy(clk_names[i], OLD_SCLK_MIPI_CLK_NAME);
i--;
continue;
dsi->clks[i] = devm_clk_get(dev,
OLD_SCLK_MIPI_CLK_NAME);
if (!IS_ERR(dsi->clks[i]))
continue;
}
dev_info(dev, "failed to get the clock: %s\n",

View File

@ -1802,18 +1802,10 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en");
if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) {
if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV)
if (IS_ERR(hdata->reg_hdmi_en))
return PTR_ERR(hdata->reg_hdmi_en);
ret = regulator_enable(hdata->reg_hdmi_en);
if (ret) {
DRM_DEV_ERROR(dev,
"failed to enable hdmi-en regulator\n");
return ret;
}
}
return hdmi_bridge_init(hdata);
}
@ -2020,6 +2012,15 @@ static int hdmi_probe(struct platform_device *pdev)
}
}
if (!IS_ERR(hdata->reg_hdmi_en)) {
ret = regulator_enable(hdata->reg_hdmi_en);
if (ret) {
DRM_DEV_ERROR(dev,
"failed to enable hdmi-en regulator\n");
goto err_hdmiphy;
}
}
pm_runtime_enable(dev);
audio_infoframe = &hdata->audio.infoframe;
@ -2044,7 +2045,8 @@ err_unregister_audio:
err_rpm_disable:
pm_runtime_disable(dev);
if (!IS_ERR(hdata->reg_hdmi_en))
regulator_disable(hdata->reg_hdmi_en);
err_hdmiphy:
if (hdata->hdmiphy_port)
put_device(&hdata->hdmiphy_port->dev);

View File

@ -496,10 +496,18 @@ static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
static int mtk_drm_crtc_init(struct drm_device *drm,
struct mtk_drm_crtc *mtk_crtc,
struct drm_plane *primary,
struct drm_plane *cursor, unsigned int pipe)
unsigned int pipe)
{
int ret;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
int i, ret;
for (i = 0; i < mtk_crtc->layer_nr; i++) {
if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
primary = &mtk_crtc->planes[i];
else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
cursor = &mtk_crtc->planes[i];
}
ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
&mtk_crtc_funcs, NULL);
@ -608,9 +616,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
return ret;
}
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
NULL, pipe);
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe);
if (ret < 0)
return ret;

View File

@ -718,9 +718,6 @@ static int msc_win_set_lockout(struct msc_window *win,
if (old != expect) {
ret = -EINVAL;
dev_warn_ratelimited(msc_dev(win->msc),
"expected lockout state %d, got %d\n",
expect, old);
goto unlock;
}
@ -741,6 +738,10 @@ unlock:
/* from intel_th_msc_window_unlock(), don't warn if not locked */
if (expect == WIN_LOCKED && old == new)
return 0;
dev_warn_ratelimited(msc_dev(win->msc),
"expected lockout state %d, got %d\n",
expect, old);
}
return ret;
@ -760,7 +761,7 @@ static int msc_configure(struct msc *msc)
lockdep_assert_held(&msc->buf_mutex);
if (msc->mode > MSC_MODE_MULTI)
return -ENOTSUPP;
return -EINVAL;
if (msc->mode == MSC_MODE_MULTI) {
if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
@ -1294,7 +1295,7 @@ static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
} else if (msc->mode == MSC_MODE_MULTI) {
ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
} else {
ret = -ENOTSUPP;
ret = -EINVAL;
}
if (!ret) {
@ -1530,7 +1531,7 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
if (ret >= 0)
*ppos = iter->offset;
} else {
ret = -ENOTSUPP;
ret = -EINVAL;
}
put_count:

View File

@ -234,6 +234,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
/* Elkhart Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
/* Elkhart Lake */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),

View File

@ -238,7 +238,7 @@ static struct configfs_attribute *sys_t_policy_attrs[] = {
static inline bool sys_t_need_ts(struct sys_t_output *op)
{
if (op->node.ts_interval &&
time_after(op->ts_jiffies + op->node.ts_interval, jiffies)) {
time_after(jiffies, op->ts_jiffies + op->node.ts_interval)) {
op->ts_jiffies = jiffies;
return true;
@ -250,8 +250,8 @@ static inline bool sys_t_need_ts(struct sys_t_output *op)
static bool sys_t_need_clock_sync(struct sys_t_output *op)
{
if (op->node.clocksync_interval &&
time_after(op->clocksync_jiffies + op->node.clocksync_interval,
jiffies)) {
time_after(jiffies,
op->clocksync_jiffies + op->node.clocksync_interval)) {
op->clocksync_jiffies = jiffies;
return true;

View File

@ -237,6 +237,7 @@ static const struct adxl372_axis_lookup adxl372_axis_lookup_table[] = {
.realbits = 12, \
.storagebits = 16, \
.shift = 4, \
.endianness = IIO_BE, \
}, \
}

View File

@ -114,7 +114,7 @@ MODULE_DEVICE_TABLE(of, st_accel_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id st_accel_acpi_match[] = {
{"SMO8840", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
{"SMO8840", (kernel_ulong_t)LIS2DH12_ACCEL_DEV_NAME},
{"SMO8A90", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
{ },
};

View File

@ -723,6 +723,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
u32 cor;
if (!chan)
continue;
@ -731,6 +732,20 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
chan->type == IIO_PRESSURE)
continue;
if (state) {
cor = at91_adc_readl(st, AT91_SAMA5D2_COR);
if (chan->differential)
cor |= (BIT(chan->channel) |
BIT(chan->channel2)) <<
AT91_SAMA5D2_COR_DIFF_OFFSET;
else
cor &= ~(BIT(chan->channel) <<
AT91_SAMA5D2_COR_DIFF_OFFSET);
at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
}
if (state) {
at91_adc_writel(st, AT91_SAMA5D2_CHER,
BIT(chan->channel));

View File

@ -842,31 +842,6 @@ static inline void stm32_dfsdm_process_data(struct stm32_dfsdm_adc *adc,
}
}
static irqreturn_t stm32_dfsdm_adc_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
int available = stm32_dfsdm_adc_dma_residue(adc);
while (available >= indio_dev->scan_bytes) {
s32 *buffer = (s32 *)&adc->rx_buf[adc->bufi];
stm32_dfsdm_process_data(adc, buffer);
iio_push_to_buffers_with_timestamp(indio_dev, buffer,
pf->timestamp);
available -= indio_dev->scan_bytes;
adc->bufi += indio_dev->scan_bytes;
if (adc->bufi >= adc->buf_sz)
adc->bufi = 0;
}
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
static void stm32_dfsdm_dma_buffer_done(void *data)
{
struct iio_dev *indio_dev = data;
@ -874,11 +849,6 @@ static void stm32_dfsdm_dma_buffer_done(void *data)
int available = stm32_dfsdm_adc_dma_residue(adc);
size_t old_pos;
if (indio_dev->currentmode & INDIO_BUFFER_TRIGGERED) {
iio_trigger_poll_chained(indio_dev->trig);
return;
}
/*
* FIXME: In Kernel interface does not support cyclic DMA buffer,and
* offers only an interface to push data samples per samples.
@ -906,7 +876,15 @@ static void stm32_dfsdm_dma_buffer_done(void *data)
adc->bufi = 0;
old_pos = 0;
}
/* regular iio buffer without trigger */
/*
* In DMA mode the trigger services of IIO are not used
* (e.g. no call to iio_trigger_poll).
* Calling irq handler associated to the hardware trigger is not
* relevant as the conversions have already been done. Data
* transfers are performed directly in DMA callback instead.
* This implementation avoids to call trigger irq handler that
* may sleep, in an atomic context (DMA irq handler context).
*/
if (adc->dev_data->type == DFSDM_IIO)
iio_push_to_buffers(indio_dev, buffer);
}
@ -1517,8 +1495,7 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
}
ret = iio_triggered_buffer_setup(indio_dev,
&iio_pollfunc_store_time,
&stm32_dfsdm_adc_trigger_handler,
&iio_pollfunc_store_time, NULL,
&stm32_dfsdm_buffer_setup_ops);
if (ret) {
stm32_dfsdm_dma_release(indio_dev);

View File

@ -91,6 +91,8 @@ config SPS30
tristate "SPS30 particulate matter sensor"
depends on I2C
select CRC8
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say Y here to build support for the Sensirion SPS30 particulate
matter sensor.

View File

@ -167,16 +167,17 @@ static int vcnl4200_init(struct vcnl4000_data *data)
data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
switch (id) {
case VCNL4200_PROD_ID:
/* Integration time is 50ms, but the experiments */
/* show 54ms in total. */
data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
/* Default wait time is 50ms, add 20% tolerance. */
data->vcnl4200_al.sampling_rate = ktime_set(0, 60000 * 1000);
/* Default wait time is 4.8ms, add 20% tolerance. */
data->vcnl4200_ps.sampling_rate = ktime_set(0, 5760 * 1000);
data->al_scale = 24000;
break;
case VCNL4040_PROD_ID:
/* Integration time is 80ms, add 10ms. */
data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000);
data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000);
/* Default wait time is 80ms, add 20% tolerance. */
data->vcnl4200_al.sampling_rate = ktime_set(0, 96000 * 1000);
/* Default wait time is 5ms, add 20% tolerance. */
data->vcnl4200_ps.sampling_rate = ktime_set(0, 6000 * 1000);
data->al_scale = 120000;
break;
}

View File

@ -564,7 +564,7 @@ static int ak8974_read_raw(struct iio_dev *indio_dev,
* We read all axes and discard all but one, for optimized
* reading, use the triggered buffer.
*/
*val = le16_to_cpu(hw_values[chan->address]);
*val = (s16)le16_to_cpu(hw_values[chan->address]);
ret = IIO_VAL_INT;
}

View File

@ -161,7 +161,8 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
return 0;
}
static void stm32_timer_stop(struct stm32_timer_trigger *priv)
static void stm32_timer_stop(struct stm32_timer_trigger *priv,
struct iio_trigger *trig)
{
u32 ccer, cr1;
@ -179,6 +180,12 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv)
regmap_write(priv->regmap, TIM_PSC, 0);
regmap_write(priv->regmap, TIM_ARR, 0);
/* Force disable master mode */
if (stm32_timer_is_trgo2_name(trig->name))
regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, 0);
else
regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS, 0);
/* Make sure that registers are updated */
regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
}
@ -197,7 +204,7 @@ static ssize_t stm32_tt_store_frequency(struct device *dev,
return ret;
if (freq == 0) {
stm32_timer_stop(priv);
stm32_timer_stop(priv, trig);
} else {
ret = stm32_timer_start(priv, trig, freq);
if (ret)

View File

@ -20,8 +20,13 @@
struct dm_bio_details {
struct gendisk *bi_disk;
u8 bi_partno;
int __bi_remaining;
unsigned long bi_flags;
struct bvec_iter bi_iter;
bio_end_io_t *bi_end_io;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload *bi_integrity;
#endif
};
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
@ -30,6 +35,11 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
bd->bi_partno = bio->bi_partno;
bd->bi_flags = bio->bi_flags;
bd->bi_iter = bio->bi_iter;
bd->__bi_remaining = atomic_read(&bio->__bi_remaining);
bd->bi_end_io = bio->bi_end_io;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
bd->bi_integrity = bio_integrity(bio);
#endif
}
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
@ -38,6 +48,11 @@ static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
bio->bi_partno = bd->bi_partno;
bio->bi_flags = bd->bi_flags;
bio->bi_iter = bd->bi_iter;
atomic_set(&bio->__bi_remaining, bd->__bi_remaining);
bio->bi_end_io = bd->bi_end_io;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
bio->bi_integrity = bd->bi_integrity;
#endif
}
#endif

View File

@ -6,6 +6,8 @@
* This file is released under the GPL.
*/
#include "dm-bio-record.h"
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/device-mapper.h>
@ -292,11 +294,7 @@ struct dm_integrity_io {
struct completion *completion;
struct gendisk *orig_bi_disk;
u8 orig_bi_partno;
bio_end_io_t *orig_bi_end_io;
struct bio_integrity_payload *orig_bi_integrity;
struct bvec_iter orig_bi_iter;
struct dm_bio_details bio_details;
};
struct journal_completion {
@ -1447,14 +1445,9 @@ static void integrity_end_io(struct bio *bio)
{
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
bio->bi_iter = dio->orig_bi_iter;
bio->bi_disk = dio->orig_bi_disk;
bio->bi_partno = dio->orig_bi_partno;
if (dio->orig_bi_integrity) {
bio->bi_integrity = dio->orig_bi_integrity;
dm_bio_restore(&dio->bio_details, bio);
if (bio->bi_integrity)
bio->bi_opf |= REQ_INTEGRITY;
}
bio->bi_end_io = dio->orig_bi_end_io;
if (dio->completion)
complete(dio->completion);
@ -1539,7 +1532,7 @@ static void integrity_metadata(struct work_struct *w)
}
}
__bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
unsigned pos;
char *mem, *checksums_ptr;
@ -1583,7 +1576,7 @@ again:
if (likely(checksums != checksums_onstack))
kfree(checksums);
} else {
struct bio_integrity_payload *bip = dio->orig_bi_integrity;
struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
if (bip) {
struct bio_vec biv;
@ -2002,20 +1995,13 @@ offload_to_thread:
} else
dio->completion = NULL;
dio->orig_bi_iter = bio->bi_iter;
dio->orig_bi_disk = bio->bi_disk;
dio->orig_bi_partno = bio->bi_partno;
dm_bio_record(&dio->bio_details, bio);
bio_set_dev(bio, ic->dev->bdev);
dio->orig_bi_integrity = bio_integrity(bio);
bio->bi_integrity = NULL;
bio->bi_opf &= ~REQ_INTEGRITY;
dio->orig_bi_end_io = bio->bi_end_io;
bio->bi_end_io = integrity_end_io;
bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
generic_make_request(bio);
if (need_sync_io) {

View File

@ -2112,8 +2112,8 @@ exit_done:
return status;
}
static int altera_get_note(u8 *p, s32 program_size,
s32 *offset, char *key, char *value, int length)
static int altera_get_note(u8 *p, s32 program_size, s32 *offset,
char *key, char *value, int keylen, int vallen)
/*
* Gets key and value of NOTE fields in the JBC file.
* Can be called in two modes: if offset pointer is NULL,
@ -2170,7 +2170,7 @@ static int altera_get_note(u8 *p, s32 program_size,
&p[note_table + (8 * i) + 4])];
if (value != NULL)
strlcpy(value, value_ptr, length);
strlcpy(value, value_ptr, vallen);
}
}
@ -2189,13 +2189,13 @@ static int altera_get_note(u8 *p, s32 program_size,
strlcpy(key, &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i)])],
length);
keylen);
if (value != NULL)
strlcpy(value, &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i) + 4])],
length);
vallen);
*offset = i + 1;
}
@ -2449,7 +2449,7 @@ int altera_init(struct altera_config *config, const struct firmware *fw)
__func__, (format_version == 2) ? "Jam STAPL" :
"pre-standardized Jam 1.1");
while (altera_get_note((u8 *)fw->data, fw->size,
&offset, key, value, 256) == 0)
&offset, key, value, 32, 256) == 0)
printk(KERN_INFO "%s: NOTE \"%s\" = \"%s\"\n",
__func__, key, value);
}

View File

@ -394,7 +394,7 @@ static const struct pcr_ops rts522a_pcr_ops = {
void rts522a_init_params(struct rtsx_pcr *pcr)
{
rts5227_init_params(pcr);
pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
pcr->option.ocp_en = 1;

View File

@ -618,6 +618,7 @@ static const struct pcr_ops rts524a_pcr_ops = {
void rts524a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
pcr->option.ltr_l1off_snooze_sspwrgate =
LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
@ -733,6 +734,7 @@ static const struct pcr_ops rts525a_pcr_ops = {
void rts525a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11);
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
pcr->option.ltr_l1off_snooze_sspwrgate =
LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;

View File

@ -663,7 +663,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
pcr->ic_version = rts5260_get_ic_version(pcr);

View File

@ -606,19 +606,22 @@ static int sd_change_phase(struct realtek_pci_sdmmc *host,
u8 sample_point, bool rx)
{
struct rtsx_pcr *pcr = host->pcr;
u16 SD_VP_CTL = 0;
dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n",
__func__, rx ? "RX" : "TX", sample_point);
rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
if (rx)
if (rx) {
SD_VP_CTL = SD_VPRX_CTL;
rtsx_pci_write_register(pcr, SD_VPRX_CTL,
PHASE_SELECT_MASK, sample_point);
else
} else {
SD_VP_CTL = SD_VPTX_CTL;
rtsx_pci_write_register(pcr, SD_VPTX_CTL,
PHASE_SELECT_MASK, sample_point);
rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET,
}
rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET, 0);
rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET,
PHASE_NOT_RESET);
rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0);
rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);

View File

@ -11,6 +11,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include "sdhci-pltfm.h"
@ -235,6 +236,11 @@ static const struct sdhci_ops sdhci_cdns_ops = {
.set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
};
static const struct sdhci_pltfm_data sdhci_cdns_uniphier_pltfm_data = {
.ops = &sdhci_cdns_ops,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = {
.ops = &sdhci_cdns_ops,
};
@ -334,6 +340,7 @@ static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc,
static int sdhci_cdns_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
const struct sdhci_pltfm_data *data;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_cdns_priv *priv;
struct clk *clk;
@ -350,8 +357,12 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
if (ret)
return ret;
data = of_device_get_match_data(dev);
if (!data)
data = &sdhci_cdns_pltfm_data;
nr_phy_params = sdhci_cdns_phy_param_count(dev->of_node);
host = sdhci_pltfm_init(pdev, &sdhci_cdns_pltfm_data,
host = sdhci_pltfm_init(pdev, data,
struct_size(priv, phy_params, nr_phy_params));
if (IS_ERR(host)) {
ret = PTR_ERR(host);
@ -431,7 +442,10 @@ static const struct dev_pm_ops sdhci_cdns_pm_ops = {
};
static const struct of_device_id sdhci_cdns_match[] = {
{ .compatible = "socionext,uniphier-sd4hc" },
{
.compatible = "socionext,uniphier-sd4hc",
.data = &sdhci_cdns_uniphier_pltfm_data,
},
{ .compatible = "cdns,sd4hc" },
{ /* sentinel */ }
};

View File

@ -118,7 +118,8 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
{
sdhci_reset(host, mask);
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
|| mmc_gpio_get_cd(host->mmc) >= 0)
sdhci_at91_set_force_card_detect(host);
}
@ -397,8 +398,11 @@ static int sdhci_at91_probe(struct platform_device *pdev)
* detection procedure using the SDMCC_CD signal is bypassed.
* This bit is reset when a software reset for all command is performed
* so we need to implement our own reset function to set back this bit.
*
* WA: SAMA5D2 doesn't drive CMD if using CD GPIO line.
*/
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
|| mmc_gpio_get_cd(host->mmc) >= 0)
sdhci_at91_set_force_card_detect(host);
pm_runtime_put_autosuspend(&pdev->dev);

View File

@ -515,7 +515,7 @@ static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
return 1;
}
static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
{
struct nvmet_tcp_queue *queue = cmd->queue;
int ret;
@ -523,9 +523,15 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
while (cmd->cur_sg) {
struct page *page = sg_page(cmd->cur_sg);
u32 left = cmd->cur_sg->length - cmd->offset;
int flags = MSG_DONTWAIT;
if ((!last_in_batch && cmd->queue->send_list_len) ||
cmd->wbytes_done + left < cmd->req.transfer_len ||
queue->data_digest || !queue->nvme_sq.sqhd_disabled)
flags |= MSG_MORE;
ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
left, MSG_DONTWAIT | MSG_MORE);
left, flags);
if (ret <= 0)
return ret;
@ -660,7 +666,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
}
if (cmd->state == NVMET_TCP_SEND_DATA) {
ret = nvmet_try_send_data(cmd);
ret = nvmet_try_send_data(cmd, last_in_batch);
if (ret <= 0)
goto done_send;
}

View File

@ -24,8 +24,6 @@ static int arm_pmu_acpi_register_irq(int cpu)
int gsi, trigger;
gicc = acpi_cpu_get_madt_gicc(cpu);
if (WARN_ON(!gicc))
return -EINVAL;
gsi = gicc->performance_interrupt;
@ -64,11 +62,10 @@ static void arm_pmu_acpi_unregister_irq(int cpu)
int gsi;
gicc = acpi_cpu_get_madt_gicc(cpu);
if (!gicc)
return;
gsi = gicc->performance_interrupt;
acpi_unregister_gsi(gsi);
if (gsi)
acpi_unregister_gsi(gsi);
}
#if IS_ENABLED(CONFIG_ARM_SPE_PMU)

View File

@ -327,9 +327,10 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
if (enable) {
/*
* must disable first, then enable again
* otherwise, cycle counter will not work
* if previous state is enabled.
* cycle counter is special which should firstly write 0 then
* write 1 into CLEAR bit to clear it. Other counters only
* need write 0 into CLEAR bit and it turns out to be 1 by
* hardware. Below enable flow is harmless for all counters.
*/
writel(0, pmu->base + reg);
val = CNTL_EN | CNTL_CLEAR;
@ -337,7 +338,8 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
writel(val, pmu->base + reg);
} else {
/* Disable counter */
writel(0, pmu->base + reg);
val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK;
writel(val, pmu->base + reg);
}
}

View File

@ -80,20 +80,20 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
break;
case PHY_INTERFACE_MODE_MII:
mode = AM33XX_GMII_SEL_MODE_MII;
case PHY_INTERFACE_MODE_GMII:
gmii_sel_mode = AM33XX_GMII_SEL_MODE_MII;
break;
default:
dev_warn(dev,
"port%u: unsupported mode: \"%s\". Defaulting to MII.\n",
if_phy->id, phy_modes(rgmii_id));
dev_warn(dev, "port%u: unsupported mode: \"%s\"\n",
if_phy->id, phy_modes(submode));
return -EINVAL;
}
if_phy->phy_if_mode = submode;
dev_dbg(dev, "%s id:%u mode:%u rgmii_id:%d rmii_clk_ext:%d\n",
__func__, if_phy->id, mode, rgmii_id,
__func__, if_phy->id, submode, rgmii_id,
if_phy->rmii_clock_external);
regfield = if_phy->fields[PHY_GMII_SEL_PORT_MODE];

View File

@ -327,6 +327,7 @@ config RTC_DRV_MAX6900
config RTC_DRV_MAX8907
tristate "Maxim MAX8907"
depends on MFD_MAX8907 || COMPILE_TEST
select REGMAP_IRQ
help
If you say yes here you will get support for the
RTC of Maxim MAX8907 PMIC.

View File

@ -985,20 +985,12 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
* Note that we currently allow DMA only if we get a channel
* for both rx and tx. Otherwise we'll do PIO for both rx and tx.
*/
static int omap2_mcspi_request_dma(struct spi_device *spi)
static int omap2_mcspi_request_dma(struct omap2_mcspi *mcspi,
struct omap2_mcspi_dma *mcspi_dma)
{
struct spi_master *master = spi->master;
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
int ret = 0;
mcspi = spi_master_get_devdata(master);
mcspi_dma = mcspi->dma_channels + spi->chip_select;
init_completion(&mcspi_dma->dma_rx_completion);
init_completion(&mcspi_dma->dma_tx_completion);
mcspi_dma->dma_rx = dma_request_chan(&master->dev,
mcspi_dma->dma_rx = dma_request_chan(mcspi->dev,
mcspi_dma->dma_rx_ch_name);
if (IS_ERR(mcspi_dma->dma_rx)) {
ret = PTR_ERR(mcspi_dma->dma_rx);
@ -1006,7 +998,7 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
goto no_dma;
}
mcspi_dma->dma_tx = dma_request_chan(&master->dev,
mcspi_dma->dma_tx = dma_request_chan(mcspi->dev,
mcspi_dma->dma_tx_ch_name);
if (IS_ERR(mcspi_dma->dma_tx)) {
ret = PTR_ERR(mcspi_dma->dma_tx);
@ -1015,20 +1007,40 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
mcspi_dma->dma_rx = NULL;
}
init_completion(&mcspi_dma->dma_rx_completion);
init_completion(&mcspi_dma->dma_tx_completion);
no_dma:
return ret;
}
static void omap2_mcspi_release_dma(struct spi_master *master)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
struct omap2_mcspi_dma *mcspi_dma;
int i;
for (i = 0; i < master->num_chipselect; i++) {
mcspi_dma = &mcspi->dma_channels[i];
if (mcspi_dma->dma_rx) {
dma_release_channel(mcspi_dma->dma_rx);
mcspi_dma->dma_rx = NULL;
}
if (mcspi_dma->dma_tx) {
dma_release_channel(mcspi_dma->dma_tx);
mcspi_dma->dma_tx = NULL;
}
}
}
static int omap2_mcspi_setup(struct spi_device *spi)
{
int ret;
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
struct omap2_mcspi_dma *mcspi_dma;
struct omap2_mcspi_cs *cs = spi->controller_state;
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
if (!cs) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
@ -1053,13 +1065,6 @@ static int omap2_mcspi_setup(struct spi_device *spi)
}
}
if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
ret = omap2_mcspi_request_dma(spi);
if (ret)
dev_warn(&spi->dev, "not using DMA for McSPI (%d)\n",
ret);
}
ret = pm_runtime_get_sync(mcspi->dev);
if (ret < 0) {
pm_runtime_put_noidle(mcspi->dev);
@ -1076,12 +1081,8 @@ static int omap2_mcspi_setup(struct spi_device *spi)
static void omap2_mcspi_cleanup(struct spi_device *spi)
{
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
struct omap2_mcspi_cs *cs;
mcspi = spi_master_get_devdata(spi->master);
if (spi->controller_state) {
/* Unlink controller state from context save list */
cs = spi->controller_state;
@ -1090,19 +1091,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
kfree(cs);
}
if (spi->chip_select < spi->master->num_chipselect) {
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
if (mcspi_dma->dma_rx) {
dma_release_channel(mcspi_dma->dma_rx);
mcspi_dma->dma_rx = NULL;
}
if (mcspi_dma->dma_tx) {
dma_release_channel(mcspi_dma->dma_tx);
mcspi_dma->dma_tx = NULL;
}
}
if (gpio_is_valid(spi->cs_gpio))
gpio_free(spi->cs_gpio);
}
@ -1313,6 +1301,9 @@ static bool omap2_mcspi_can_dma(struct spi_master *master,
if (spi_controller_is_slave(master))
return true;
master->dma_rx = mcspi_dma->dma_rx;
master->dma_tx = mcspi_dma->dma_tx;
return (xfer->len >= DMA_MIN_BYTES);
}
@ -1475,6 +1466,11 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
for (i = 0; i < master->num_chipselect; i++) {
sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
status = omap2_mcspi_request_dma(mcspi,
&mcspi->dma_channels[i]);
if (status == -EPROBE_DEFER)
goto free_master;
}
status = platform_get_irq(pdev, 0);
@ -1512,6 +1508,7 @@ disable_pm:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
free_master:
omap2_mcspi_release_dma(master);
spi_master_put(master);
return status;
}
@ -1521,6 +1518,8 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
omap2_mcspi_release_dma(master);
pm_runtime_dont_use_autosuspend(mcspi->dev);
pm_runtime_put_sync(mcspi->dev);
pm_runtime_disable(&pdev->dev);

View File

@ -68,6 +68,10 @@ MODULE_ALIAS("platform:pxa2xx-spi");
#define LPSS_CAPS_CS_EN_SHIFT 9
#define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT)
#define LPSS_PRIV_CLOCK_GATE 0x38
#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3
#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3
struct lpss_config {
/* LPSS offset from drv_data->ioaddr */
unsigned offset;
@ -84,6 +88,8 @@ struct lpss_config {
unsigned cs_sel_shift;
unsigned cs_sel_mask;
unsigned cs_num;
/* Quirks */
unsigned cs_clk_stays_gated : 1;
};
/* Keep these sorted with enum pxa_ssp_type */
@ -154,6 +160,7 @@ static const struct lpss_config lpss_platforms[] = {
.tx_threshold_hi = 56,
.cs_sel_shift = 8,
.cs_sel_mask = 3 << 8,
.cs_clk_stays_gated = true,
},
};
@ -381,6 +388,22 @@ static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
else
value |= LPSS_CS_CONTROL_CS_HIGH;
__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
if (config->cs_clk_stays_gated) {
u32 clkgate;
/*
* Changing CS alone when dynamic clock gating is on won't
* actually flip CS at that time. This ruins SPI transfers
* that specify delays, or have no data. Toggle the clock mode
* to force on briefly to poke the CS pin to move.
*/
clkgate = __lpss_ssp_read_priv(drv_data, LPSS_PRIV_CLOCK_GATE);
value = (clkgate & ~LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK) |
LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON;
__lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, value);
__lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, clkgate);
}
}
static void cs_assert(struct spi_device *spi)

View File

@ -1217,6 +1217,11 @@ static int spi_qup_suspend(struct device *device)
struct spi_qup *controller = spi_master_get_devdata(master);
int ret;
if (pm_runtime_suspended(device)) {
ret = spi_qup_pm_resume_runtime(device);
if (ret)
return ret;
}
ret = spi_master_suspend(master);
if (ret)
return ret;
@ -1225,10 +1230,8 @@ static int spi_qup_suspend(struct device *device)
if (ret)
return ret;
if (!pm_runtime_suspended(device)) {
clk_disable_unprepare(controller->cclk);
clk_disable_unprepare(controller->iclk);
}
clk_disable_unprepare(controller->cclk);
clk_disable_unprepare(controller->iclk);
return 0;
}

View File

@ -401,9 +401,6 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
/* Dummy generic FIFO entry */
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
/* Manually start the generic FIFO command */
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |

View File

@ -2452,7 +2452,7 @@ int spi_register_controller(struct spi_controller *ctlr)
if (ctlr->use_gpio_descriptors) {
status = spi_get_gpio_descs(ctlr);
if (status)
return status;
goto free_bus_id;
/*
* A controller using GPIO descriptors always
* supports SPI_CS_HIGH if need be.
@ -2462,7 +2462,7 @@ int spi_register_controller(struct spi_controller *ctlr)
/* Legacy code path for GPIOs from DT */
status = of_spi_get_gpio_numbers(ctlr);
if (status)
return status;
goto free_bus_id;
}
}
@ -2470,17 +2470,14 @@ int spi_register_controller(struct spi_controller *ctlr)
* Even if it's just one always-selected device, there must
* be at least one chipselect.
*/
if (!ctlr->num_chipselect)
return -EINVAL;
if (!ctlr->num_chipselect) {
status = -EINVAL;
goto free_bus_id;
}
status = device_add(&ctlr->dev);
if (status < 0) {
/* free bus id */
mutex_lock(&board_lock);
idr_remove(&spi_master_idr, ctlr->bus_num);
mutex_unlock(&board_lock);
goto done;
}
if (status < 0)
goto free_bus_id;
dev_dbg(dev, "registered %s %s\n",
spi_controller_is_slave(ctlr) ? "slave" : "master",
dev_name(&ctlr->dev));
@ -2496,11 +2493,7 @@ int spi_register_controller(struct spi_controller *ctlr)
status = spi_controller_initialize_queue(ctlr);
if (status) {
device_del(&ctlr->dev);
/* free bus id */
mutex_lock(&board_lock);
idr_remove(&spi_master_idr, ctlr->bus_num);
mutex_unlock(&board_lock);
goto done;
goto free_bus_id;
}
}
/* add statistics */
@ -2515,7 +2508,12 @@ int spi_register_controller(struct spi_controller *ctlr)
/* Register devices from the device tree and ACPI */
of_register_spi_devices(ctlr);
acpi_register_spi_devices(ctlr);
done:
return status;
free_bus_id:
mutex_lock(&board_lock);
idr_remove(&spi_master_idr, ctlr->bus_num);
mutex_unlock(&board_lock);
return status;
}
EXPORT_SYMBOL_GPL(spi_register_controller);

View File

@ -19,6 +19,7 @@
#include <signal.h>
#define MAX_NUM_DEVICES 10
#define MAX_SYSFS_PREFIX 0x80
#define MAX_SYSFS_PATH 0x200
#define CSV_MAX_LINE 0x1000
#define SYSFS_MAX_INT 0x20
@ -67,7 +68,7 @@ struct loopback_results {
};
struct loopback_device {
char name[MAX_SYSFS_PATH];
char name[MAX_STR_LEN];
char sysfs_entry[MAX_SYSFS_PATH];
char debugfs_entry[MAX_SYSFS_PATH];
struct loopback_results results;
@ -93,8 +94,8 @@ struct loopback_test {
int stop_all;
int poll_count;
char test_name[MAX_STR_LEN];
char sysfs_prefix[MAX_SYSFS_PATH];
char debugfs_prefix[MAX_SYSFS_PATH];
char sysfs_prefix[MAX_SYSFS_PREFIX];
char debugfs_prefix[MAX_SYSFS_PREFIX];
struct timespec poll_timeout;
struct loopback_device devices[MAX_NUM_DEVICES];
struct loopback_results aggregate_results;
@ -637,7 +638,7 @@ baddir:
static int open_poll_files(struct loopback_test *t)
{
struct loopback_device *dev;
char buf[MAX_STR_LEN];
char buf[MAX_SYSFS_PATH + MAX_STR_LEN];
char dummy;
int fds_idx = 0;
int i;
@ -655,7 +656,7 @@ static int open_poll_files(struct loopback_test *t)
goto err;
}
read(t->fds[fds_idx].fd, &dummy, 1);
t->fds[fds_idx].events = EPOLLERR|EPOLLPRI;
t->fds[fds_idx].events = POLLERR | POLLPRI;
t->fds[fds_idx].revents = 0;
fds_idx++;
}
@ -748,7 +749,7 @@ static int wait_for_complete(struct loopback_test *t)
}
for (i = 0; i < t->poll_count; i++) {
if (t->fds[i].revents & EPOLLPRI) {
if (t->fds[i].revents & POLLPRI) {
/* Dummy read to clear the event */
read(t->fds[i].fd, &dummy, 1);
number_of_events++;
@ -907,10 +908,10 @@ int main(int argc, char *argv[])
t.iteration_max = atoi(optarg);
break;
case 'S':
snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg);
break;
case 'D':
snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg);
break;
case 'm':
t.mask = atol(optarg);
@ -961,10 +962,10 @@ int main(int argc, char *argv[])
}
if (!strcmp(t.sysfs_prefix, ""))
snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", sysfs_prefix);
snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", sysfs_prefix);
if (!strcmp(t.debugfs_prefix, ""))
snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", debugfs_prefix);
snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", debugfs_prefix);
ret = find_loopback_devices(&t);
if (ret)

View File

@ -38,6 +38,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
{USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
{USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
{} /* Terminating entry */

View File

@ -561,7 +561,7 @@ static u_long get_word(struct vc_data *vc)
return 0;
} else if (tmpx < vc->vc_cols - 2 &&
(ch == SPACE || ch == 0 || (ch < 0x100 && IS_WDLM(ch))) &&
get_char(vc, (u_short *)&tmp_pos + 1, &temp) > SPACE) {
get_char(vc, (u_short *)tmp_pos + 1, &temp) > SPACE) {
tmp_pos += 2;
tmpx++;
} else {

View File

@ -2731,9 +2731,11 @@ static int compat_tty_tiocgserial(struct tty_struct *tty,
struct serial_struct32 v32;
struct serial_struct v;
int err;
memset(&v, 0, sizeof(struct serial_struct));
if (!tty->ops->set_serial)
memset(&v, 0, sizeof(v));
memset(&v32, 0, sizeof(v32));
if (!tty->ops->get_serial)
return -ENOTTY;
err = tty->ops->get_serial(tty, &v);
if (!err) {

View File

@ -896,10 +896,10 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
ss->xmit_fifo_size = acm->writesize;
ss->baud_base = le32_to_cpu(acm->line.dwDTERate);
ss->close_delay = acm->port.close_delay / 10;
ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
acm->port.closing_wait / 10;
jiffies_to_msecs(acm->port.closing_wait) / 10;
return 0;
}
@ -907,24 +907,32 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
{
struct acm *acm = tty->driver_data;
unsigned int closing_wait, close_delay;
unsigned int old_closing_wait, old_close_delay;
int retval = 0;
close_delay = ss->close_delay * 10;
close_delay = msecs_to_jiffies(ss->close_delay * 10);
closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
ASYNC_CLOSING_WAIT_NONE :
msecs_to_jiffies(ss->closing_wait * 10);
/* we must redo the rounding here, so that the values match */
old_close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
old_closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
jiffies_to_msecs(acm->port.closing_wait) / 10;
mutex_lock(&acm->port.mutex);
if (!capable(CAP_SYS_ADMIN)) {
if ((close_delay != acm->port.close_delay) ||
(closing_wait != acm->port.closing_wait))
if ((ss->close_delay != old_close_delay) ||
(ss->closing_wait != old_closing_wait)) {
if (!capable(CAP_SYS_ADMIN))
retval = -EPERM;
else
retval = -EOPNOTSUPP;
} else {
acm->port.close_delay = close_delay;
acm->port.closing_wait = closing_wait;
}
else {
acm->port.close_delay = close_delay;
acm->port.closing_wait = closing_wait;
}
} else
retval = -EOPNOTSUPP;
mutex_unlock(&acm->port.mutex);
return retval;

View File

@ -378,6 +378,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
/* Realtek hub in Dell WD19 (Type-C) */
{ USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
/* Generic RTL8153 based ethernet adapters */
{ USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
/* Action Semiconductor flash disk */
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },

View File

@ -135,7 +135,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_AMD_PLL_FIX;
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
(pdev->device == 0x15e0 ||
(pdev->device == 0x145c ||
pdev->device == 0x15e0 ||
pdev->device == 0x15e1 ||
pdev->device == 0x43bb))
xhci->quirks |= XHCI_SUSPEND_DELAY;

View File

@ -445,6 +445,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
static struct platform_driver usb_xhci_driver = {
.probe = xhci_plat_probe,
.remove = xhci_plat_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xhci-hcd",
.pm = &xhci_plat_pm_ops,

View File

@ -289,23 +289,12 @@ DECLARE_EVENT_CLASS(xhci_log_urb,
),
TP_printk("ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x",
__entry->epnum, __entry->dir_in ? "in" : "out",
({ char *s;
switch (__entry->type) {
case USB_ENDPOINT_XFER_INT:
s = "intr";
break;
case USB_ENDPOINT_XFER_CONTROL:
s = "control";
break;
case USB_ENDPOINT_XFER_BULK:
s = "bulk";
break;
case USB_ENDPOINT_XFER_ISOC:
s = "isoc";
break;
default:
s = "UNKNOWN";
} s; }), __entry->urb, __entry->pipe, __entry->slot_id,
__print_symbolic(__entry->type,
{ USB_ENDPOINT_XFER_INT, "intr" },
{ USB_ENDPOINT_XFER_CONTROL, "control" },
{ USB_ENDPOINT_XFER_BULK, "bulk" },
{ USB_ENDPOINT_XFER_ISOC, "isoc" }),
__entry->urb, __entry->pipe, __entry->slot_id,
__entry->actual, __entry->length, __entry->num_mapped_sgs,
__entry->num_sgs, __entry->stream, __entry->flags
)

View File

@ -1183,6 +1183,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110b, 0xff), /* Telit ME910G1 (ECM) */
.driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),

View File

@ -93,6 +93,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD381_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },

View File

@ -124,6 +124,7 @@
#define HP_LM920_PRODUCT_ID 0x026b
#define HP_TD620_PRODUCT_ID 0x0956
#define HP_LD960_PRODUCT_ID 0x0b39
#define HP_LD381_PRODUCT_ID 0x0f7f
#define HP_LCM220_PRODUCT_ID 0x3139
#define HP_LCM960_PRODUCT_ID 0x3239
#define HP_LD220_PRODUCT_ID 0x3524

View File

@ -273,6 +273,9 @@ void ucsi_displayport_remove_partner(struct typec_altmode *alt)
return;
dp = typec_altmode_get_drvdata(alt);
if (!dp)
return;
dp->data.conf = 0;
dp->data.status = 0;
dp->initialized = false;
@ -287,6 +290,8 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
struct typec_altmode *alt;
struct ucsi_dp *dp;
mutex_lock(&con->lock);
/* We can't rely on the firmware with the capabilities. */
desc->vdo |= DP_CAP_DP_SIGNALING | DP_CAP_RECEPTACLE;
@ -295,12 +300,15 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
desc->vdo |= all_assignments << 16;
alt = typec_port_register_altmode(con->port, desc);
if (IS_ERR(alt))
if (IS_ERR(alt)) {
mutex_unlock(&con->lock);
return alt;
}
dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
if (!dp) {
typec_unregister_altmode(alt);
mutex_unlock(&con->lock);
return ERR_PTR(-ENOMEM);
}
@ -313,5 +321,7 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
alt->ops = &ucsi_displayport_ops;
typec_altmode_set_drvdata(alt, dp);
mutex_unlock(&con->lock);
return alt;
}

View File

@ -313,6 +313,8 @@ static int process_msg(void)
req->msg.type = state.msg.type;
req->msg.len = state.msg.len;
req->body = state.body;
/* write body, then update state */
virt_wmb();
req->state = xb_req_state_got_reply;
req->cb(req);
} else
@ -395,6 +397,8 @@ static int process_writes(void)
if (state.req->state == xb_req_state_aborted)
kfree(state.req);
else {
/* write err, then update state */
virt_wmb();
state.req->state = xb_req_state_got_reply;
wake_up(&state.req->wq);
}

View File

@ -191,8 +191,11 @@ static bool xenbus_ok(void)
static bool test_reply(struct xb_req_data *req)
{
if (req->state == xb_req_state_got_reply || !xenbus_ok())
if (req->state == xb_req_state_got_reply || !xenbus_ok()) {
/* read req->state before all other fields */
virt_rmb();
return true;
}
/* Make sure to reread req->state each time. */
barrier();
@ -202,7 +205,7 @@ static bool test_reply(struct xb_req_data *req)
static void *read_reply(struct xb_req_data *req)
{
while (req->state != xb_req_state_got_reply) {
do {
wait_event(req->wq, test_reply(req));
if (!xenbus_ok())
@ -216,7 +219,7 @@ static void *read_reply(struct xb_req_data *req)
if (req->err)
return ERR_PTR(req->err);
}
} while (req->state != xb_req_state_got_reply);
return req->body;
}

View File

@ -10142,6 +10142,10 @@ out_fail:
ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx);
if (ret)
commit_transaction = true;
} else if (sync_log) {
mutex_lock(&root->log_mutex);
list_del(&ctx.list);
mutex_unlock(&root->log_mutex);
}
if (commit_transaction) {
ret = btrfs_commit_transaction(trans);

View File

@ -324,6 +324,8 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
if (full_path == NULL)
goto cdda_exit;
convert_delimiter(full_path, '\\');
cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
if (!cifs_sb_master_tlink(cifs_sb)) {

View File

@ -530,6 +530,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
if (tcon->seal)
seq_puts(s, ",seal");
else if (tcon->ses->server->ignore_signature)
seq_puts(s, ",signloosely");
if (tcon->nocase)
seq_puts(s, ",nocase");
if (tcon->local_lease)

View File

@ -1173,7 +1173,8 @@ try_again:
rc = posix_lock_file(file, flock, NULL);
up_write(&cinode->lock_sem);
if (rc == FILE_LOCK_DEFERRED) {
rc = wait_event_interruptible(flock->fl_wait, !flock->fl_blocker);
rc = wait_event_interruptible(flock->fl_wait,
list_empty(&flock->fl_blocked_member));
if (!rc)
goto try_again;
locks_delete_block(flock);

View File

@ -3252,7 +3252,7 @@ static int smb3_fiemap(struct cifs_tcon *tcon,
if (rc)
goto out;
if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) {
rc = -EINVAL;
goto out;
}

View File

@ -1882,9 +1882,9 @@ fetch_events:
waiter = true;
init_waitqueue_entry(&wait, current);
spin_lock_irq(&ep->wq.lock);
write_lock_irq(&ep->lock);
__add_wait_queue_exclusive(&ep->wq, &wait);
spin_unlock_irq(&ep->wq.lock);
write_unlock_irq(&ep->lock);
}
for (;;) {
@ -1933,9 +1933,9 @@ send_events:
goto fetch_events;
if (waiter) {
spin_lock_irq(&ep->wq.lock);
write_lock_irq(&ep->lock);
__remove_wait_queue(&ep->wq, &wait);
spin_unlock_irq(&ep->wq.lock);
write_unlock_irq(&ep->lock);
}
return res;

View File

@ -138,6 +138,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_sb = sb;
inode->i_blkbits = sb->s_blocksize_bits;
inode->i_flags = 0;
atomic64_set(&inode->i_sequence, 0);
atomic_set(&inode->i_count, 1);
inode->i_op = &empty_iops;
inode->i_fop = &no_open_fops;

View File

@ -725,7 +725,6 @@ static void __locks_delete_block(struct file_lock *waiter)
{
locks_delete_global_blocked(waiter);
list_del_init(&waiter->fl_blocked_member);
waiter->fl_blocker = NULL;
}
static void __locks_wake_up_blocks(struct file_lock *blocker)
@ -740,6 +739,13 @@ static void __locks_wake_up_blocks(struct file_lock *blocker)
waiter->fl_lmops->lm_notify(waiter);
else
wake_up(&waiter->fl_wait);
/*
* The setting of fl_blocker to NULL marks the "done"
* point in deleting a block. Paired with acquire at the top
* of locks_delete_block().
*/
smp_store_release(&waiter->fl_blocker, NULL);
}
}
@ -754,24 +760,41 @@ int locks_delete_block(struct file_lock *waiter)
int status = -ENOENT;
/*
* If fl_blocker is NULL, it won't be set again as this thread
* "owns" the lock and is the only one that might try to claim
* the lock. So it is safe to test fl_blocker locklessly.
* Also if fl_blocker is NULL, this waiter is not listed on
* fl_blocked_requests for some lock, so no other request can
* be added to the list of fl_blocked_requests for this
* request. So if fl_blocker is NULL, it is safe to
* locklessly check if fl_blocked_requests is empty. If both
* of these checks succeed, there is no need to take the lock.
* If fl_blocker is NULL, it won't be set again as this thread "owns"
* the lock and is the only one that might try to claim the lock.
*
* We use acquire/release to manage fl_blocker so that we can
* optimize away taking the blocked_lock_lock in many cases.
*
* The smp_load_acquire guarantees two things:
*
* 1/ that fl_blocked_requests can be tested locklessly. If something
* was recently added to that list it must have been in a locked region
* *before* the locked region when fl_blocker was set to NULL.
*
* 2/ that no other thread is accessing 'waiter', so it is safe to free
* it. __locks_wake_up_blocks is careful not to touch waiter after
* fl_blocker is released.
*
* If a lockless check of fl_blocker shows it to be NULL, we know that
* no new locks can be inserted into its fl_blocked_requests list, and
* can avoid doing anything further if the list is empty.
*/
if (waiter->fl_blocker == NULL &&
if (!smp_load_acquire(&waiter->fl_blocker) &&
list_empty(&waiter->fl_blocked_requests))
return status;
spin_lock(&blocked_lock_lock);
if (waiter->fl_blocker)
status = 0;
__locks_wake_up_blocks(waiter);
__locks_delete_block(waiter);
/*
* The setting of fl_blocker to NULL marks the "done" point in deleting
* a block. Paired with acquire at the top of this function.
*/
smp_store_release(&waiter->fl_blocker, NULL);
spin_unlock(&blocked_lock_lock);
return status;
}
@ -1364,7 +1387,8 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
error = posix_lock_inode(inode, fl, NULL);
if (error != FILE_LOCK_DEFERRED)
break;
error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
error = wait_event_interruptible(fl->fl_wait,
list_empty(&fl->fl_blocked_member));
if (error)
break;
}
@ -1449,7 +1473,8 @@ int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
error = posix_lock_inode(inode, &fl, NULL);
if (error != FILE_LOCK_DEFERRED)
break;
error = wait_event_interruptible(fl.fl_wait, !fl.fl_blocker);
error = wait_event_interruptible(fl.fl_wait,
list_empty(&fl.fl_blocked_member));
if (!error) {
/*
* If we've been sleeping someone might have
@ -1652,7 +1677,8 @@ restart:
locks_dispose_list(&dispose);
error = wait_event_interruptible_timeout(new_fl->fl_wait,
!new_fl->fl_blocker, break_time);
list_empty(&new_fl->fl_blocked_member),
break_time);
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
@ -2136,7 +2162,8 @@ static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
error = flock_lock_inode(inode, fl);
if (error != FILE_LOCK_DEFERRED)
break;
error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
error = wait_event_interruptible(fl->fl_wait,
list_empty(&fl->fl_blocked_member));
if (error)
break;
}
@ -2413,7 +2440,8 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd,
error = vfs_lock_file(filp, cmd, fl, NULL);
if (error != FILE_LOCK_DEFERRED)
break;
error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
error = wait_event_interruptible(fl->fl_wait,
list_empty(&fl->fl_blocked_member));
if (error)
break;
}

View File

@ -698,6 +698,7 @@ struct inode {
struct rcu_head i_rcu;
};
atomic64_t i_version;
atomic64_t i_sequence; /* see futex */
atomic_t i_count;
atomic_t i_dio_count;
atomic_t i_writecount;

View File

@ -31,23 +31,26 @@ struct task_struct;
union futex_key {
struct {
u64 i_seq;
unsigned long pgoff;
struct inode *inode;
int offset;
unsigned int offset;
} shared;
struct {
union {
struct mm_struct *mm;
u64 __tmp;
};
unsigned long address;
struct mm_struct *mm;
int offset;
unsigned int offset;
} private;
struct {
u64 ptr;
unsigned long word;
void *ptr;
int offset;
unsigned int offset;
} both;
};
#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } }
#ifdef CONFIG_FUTEX
enum {

View File

@ -311,7 +311,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
PAGEFLAG(Referenced, referenced, PF_HEAD)
TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
__SETPAGEFLAG(Referenced, referenced, PF_HEAD)

View File

@ -126,8 +126,9 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);
void vmalloc_sync_all(void);
void vmalloc_sync_mappings(void);
void vmalloc_sync_unmappings(void);
/*
* Lowlevel-APIs (not for driver use!)
*/

View File

@ -385,9 +385,9 @@ static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
*/
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
u32 hash = jhash2((u32*)&key->both.word,
(sizeof(key->both.word)+sizeof(key->both.ptr))/4,
u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
key->both.offset);
return &futex_queues[hash & (futex_hashsize - 1)];
}
@ -429,7 +429,7 @@ static void get_futex_key_refs(union futex_key *key)
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
ihold(key->shared.inode); /* implies smp_mb(); (B) */
smp_mb(); /* explicit smp_mb(); (B) */
break;
case FUT_OFF_MMSHARED:
futex_get_mm(key); /* implies smp_mb(); (B) */
@ -463,7 +463,6 @@ static void drop_futex_key_refs(union futex_key *key)
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
iput(key->shared.inode);
break;
case FUT_OFF_MMSHARED:
mmdrop(key->private.mm);
@ -505,6 +504,46 @@ futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
return timeout;
}
/*
* Generate a machine wide unique identifier for this inode.
*
* This relies on u64 not wrapping in the life-time of the machine; which with
* 1ns resolution means almost 585 years.
*
* This further relies on the fact that a well formed program will not unmap
* the file while it has a (shared) futex waiting on it. This mapping will have
* a file reference which pins the mount and inode.
*
* If for some reason an inode gets evicted and read back in again, it will get
* a new sequence number and will _NOT_ match, even though it is the exact same
* file.
*
* It is important that match_futex() will never have a false-positive, esp.
* for PI futexes that can mess up the state. The above argues that false-negatives
* are only possible for malformed programs.
*/
static u64 get_inode_sequence_number(struct inode *inode)
{
static atomic64_t i_seq;
u64 old;
/* Does the inode already have a sequence number? */
old = atomic64_read(&inode->i_sequence);
if (likely(old))
return old;
for (;;) {
u64 new = atomic64_add_return(1, &i_seq);
if (WARN_ON_ONCE(!new))
continue;
old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
if (old)
return old;
return new;
}
}
/**
* get_futex_key() - Get parameters which are the keys for a futex
* @uaddr: virtual address of the futex
@ -517,9 +556,15 @@ futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
*
* The key words are stored in @key on success.
*
* For shared mappings, it's (page->index, file_inode(vma->vm_file),
* offset_within_page). For private mappings, it's (uaddr, current->mm).
* We can usually work out the index without swapping in the page.
* For shared mappings (when @fshared), the key is:
* ( inode->i_sequence, page->index, offset_within_page )
* [ also see get_inode_sequence_number() ]
*
* For private mappings (or when !@fshared), the key is:
* ( current->mm, address, 0 )
*
* This allows (cross process, where applicable) identification of the futex
* without keeping the page pinned for the duration of the FUTEX_WAIT.
*
* lock_page() might sleep, the caller should not hold a spinlock.
*/
@ -659,8 +704,6 @@ again:
key->private.mm = mm;
key->private.address = address;
get_futex_key_refs(key); /* implies smp_mb(); (B) */
} else {
struct inode *inode;
@ -692,40 +735,14 @@ again:
goto again;
}
/*
* Take a reference unless it is about to be freed. Previously
* this reference was taken by ihold under the page lock
* pinning the inode in place so i_lock was unnecessary. The
* only way for this check to fail is if the inode was
* truncated in parallel which is almost certainly an
* application bug. In such a case, just retry.
*
* We are not calling into get_futex_key_refs() in file-backed
* cases, therefore a successful atomic_inc return below will
* guarantee that get_futex_key() will still imply smp_mb(); (B).
*/
if (!atomic_inc_not_zero(&inode->i_count)) {
rcu_read_unlock();
put_page(page);
goto again;
}
/* Should be impossible but lets be paranoid for now */
if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
err = -EFAULT;
rcu_read_unlock();
iput(inode);
goto out;
}
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
key->shared.inode = inode;
key->shared.i_seq = get_inode_sequence_number(inode);
key->shared.pgoff = basepage_index(tail);
rcu_read_unlock();
}
get_futex_key_refs(key); /* implies smp_mb(); (B) */
out:
put_page(page);
return err;

View File

@ -554,7 +554,7 @@ NOKPROBE_SYMBOL(notify_die);
int register_die_notifier(struct notifier_block *nb)
{
vmalloc_sync_all();
vmalloc_sync_mappings();
return atomic_notifier_chain_register(&die_chain, nb);
}
EXPORT_SYMBOL_GPL(register_die_notifier);

View File

@ -335,12 +335,14 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
}
page = pmd_page(orig_pmd);
/* Do not interfere with other mappings of this page */
if (page_mapcount(page) != 1)
goto huge_unlock;
if (next - addr != HPAGE_PMD_SIZE) {
int err;
if (page_mapcount(page) != 1)
goto huge_unlock;
get_page(page);
spin_unlock(ptl);
lock_page(page);
@ -426,6 +428,10 @@ regular_page:
continue;
}
/* Do not interfere with other mappings of this page */
if (page_mapcount(page) != 1)
continue;
VM_BUG_ON_PAGE(PageTransCompound(page), page);
if (pte_young(ptent)) {

View File

@ -2414,28 +2414,41 @@ static void high_work_func(struct work_struct *work)
#define MEMCG_DELAY_SCALING_SHIFT 14
/*
* Scheduled by try_charge() to be executed from the userland return path
* and reclaims memory over the high limit.
* Get the number of jiffies that we should penalise a mischievous cgroup which
* is exceeding its memory.high by checking both it and its ancestors.
*/
void mem_cgroup_handle_over_high(void)
static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
unsigned int nr_pages)
{
unsigned long usage, high, clamped_high;
unsigned long pflags;
unsigned long penalty_jiffies, overage;
unsigned int nr_pages = current->memcg_nr_pages_over_high;
struct mem_cgroup *memcg;
unsigned long penalty_jiffies;
u64 max_overage = 0;
if (likely(!nr_pages))
return;
do {
unsigned long usage, high;
u64 overage;
memcg = get_mem_cgroup_from_mm(current->mm);
reclaim_high(memcg, nr_pages, GFP_KERNEL);
current->memcg_nr_pages_over_high = 0;
usage = page_counter_read(&memcg->memory);
high = READ_ONCE(memcg->high);
/*
* Prevent division by 0 in overage calculation by acting as if
* it was a threshold of 1 page
*/
high = max(high, 1UL);
overage = usage - high;
overage <<= MEMCG_DELAY_PRECISION_SHIFT;
overage = div64_u64(overage, high);
if (overage > max_overage)
max_overage = overage;
} while ((memcg = parent_mem_cgroup(memcg)) &&
!mem_cgroup_is_root(memcg));
if (!max_overage)
return 0;
/*
* memory.high is breached and reclaim is unable to keep up. Throttle
* allocators proactively to slow down excessive growth.
*
* We use overage compared to memory.high to calculate the number of
* jiffies to sleep (penalty_jiffies). Ideally this value should be
* fairly lenient on small overages, and increasingly harsh when the
@ -2443,24 +2456,9 @@ void mem_cgroup_handle_over_high(void)
* its crazy behaviour, so we exponentially increase the delay based on
* overage amount.
*/
usage = page_counter_read(&memcg->memory);
high = READ_ONCE(memcg->high);
if (usage <= high)
goto out;
/*
* Prevent division by 0 in overage calculation by acting as if it was a
* threshold of 1 page
*/
clamped_high = max(high, 1UL);
overage = div_u64((u64)(usage - high) << MEMCG_DELAY_PRECISION_SHIFT,
clamped_high);
penalty_jiffies = ((u64)overage * overage * HZ)
>> (MEMCG_DELAY_PRECISION_SHIFT + MEMCG_DELAY_SCALING_SHIFT);
penalty_jiffies = max_overage * max_overage * HZ;
penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
/*
* Factor in the task's own contribution to the overage, such that four
@ -2477,7 +2475,32 @@ void mem_cgroup_handle_over_high(void)
* application moving forwards and also permit diagnostics, albeit
* extremely slowly.
*/
penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
return min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
}
/*
* Scheduled by try_charge() to be executed from the userland return path
* and reclaims memory over the high limit.
*/
void mem_cgroup_handle_over_high(void)
{
unsigned long penalty_jiffies;
unsigned long pflags;
unsigned int nr_pages = current->memcg_nr_pages_over_high;
struct mem_cgroup *memcg;
if (likely(!nr_pages))
return;
memcg = get_mem_cgroup_from_mm(current->mm);
reclaim_high(memcg, nr_pages, GFP_KERNEL);
current->memcg_nr_pages_over_high = 0;
/*
* memory.high is breached and reclaim is unable to keep up. Throttle
* allocators proactively to slow down excessive growth.
*/
penalty_jiffies = calculate_high_delay(memcg, nr_pages);
/*
* Don't sleep if the amount of jiffies this memcg owes us is so low
@ -4151,7 +4174,7 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
struct mem_cgroup_thresholds *thresholds;
struct mem_cgroup_threshold_ary *new;
unsigned long usage;
int i, j, size;
int i, j, size, entries;
mutex_lock(&memcg->thresholds_lock);
@ -4171,14 +4194,20 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
__mem_cgroup_threshold(memcg, type == _MEMSWAP);
/* Calculate new number of threshold */
size = 0;
size = entries = 0;
for (i = 0; i < thresholds->primary->size; i++) {
if (thresholds->primary->entries[i].eventfd != eventfd)
size++;
else
entries++;
}
new = thresholds->spare;
/* If no items related to eventfd have been cleared, nothing to do */
if (!entries)
goto unlock;
/* Set thresholds array to NULL if we don't have thresholds */
if (!size) {
kfree(new);

View File

@ -359,10 +359,14 @@ void vm_unmap_aliases(void)
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
/*
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
* have one.
* Implement a stub for vmalloc_sync_[un]mapping() if the architecture
* chose not to have one.
*/
void __weak vmalloc_sync_all(void)
void __weak vmalloc_sync_mappings(void)
{
}
void __weak vmalloc_sync_unmappings(void)
{
}

View File

@ -1953,8 +1953,6 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
if (node == NUMA_NO_NODE)
searchnode = numa_mem_id();
else if (!node_present_pages(node))
searchnode = node_to_mem_node(node);
object = get_partial_node(s, get_node(s, searchnode), c, flags);
if (object || node != NUMA_NO_NODE)
@ -2543,17 +2541,27 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
struct page *page;
page = c->page;
if (!page)
if (!page) {
/*
* if the node is not online or has no normal memory, just
* ignore the node constraint
*/
if (unlikely(node != NUMA_NO_NODE &&
!node_state(node, N_NORMAL_MEMORY)))
node = NUMA_NO_NODE;
goto new_slab;
}
redo:
if (unlikely(!node_match(page, node))) {
int searchnode = node;
if (node != NUMA_NO_NODE && !node_present_pages(node))
searchnode = node_to_mem_node(node);
if (unlikely(!node_match(page, searchnode))) {
/*
* same as above but node_match() being false already
* implies node != NUMA_NO_NODE
*/
if (!node_state(node, N_NORMAL_MEMORY)) {
node = NUMA_NO_NODE;
goto redo;
} else {
stat(s, ALLOC_NODE_MISMATCH);
deactivate_slab(s, page, c->freelist, c);
goto new_slab;
@ -2977,11 +2985,13 @@ redo:
barrier();
if (likely(page == c->page)) {
set_freepointer(s, tail_obj, c->freelist);
void **freelist = READ_ONCE(c->freelist);
set_freepointer(s, tail_obj, freelist);
if (unlikely(!this_cpu_cmpxchg_double(
s->cpu_slab->freelist, s->cpu_slab->tid,
c->freelist, tid,
freelist, tid,
head, next_tid(tid)))) {
note_cmpxchg_failure("slab_free", s, tid);

View File

@ -742,6 +742,7 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
struct mem_section *ms = __pfn_to_section(pfn);
bool section_is_early = early_section(ms);
struct page *memmap = NULL;
bool empty;
unsigned long *subsection_map = ms->usage
? &ms->usage->subsection_map[0] : NULL;
@ -772,7 +773,8 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
* For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified
*/
bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) {
empty = bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION);
if (empty) {
unsigned long section_nr = pfn_to_section_nr(pfn);
/*
@ -787,13 +789,15 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
ms->usage = NULL;
}
memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
ms->section_mem_map = (unsigned long)NULL;
}
if (section_is_early && memmap)
free_map_bootmem(memmap);
else
depopulate_section_memmap(pfn, nr_pages, altmap);
if (empty)
ms->section_mem_map = (unsigned long)NULL;
}
static struct page * __meminit section_activate(int nid, unsigned long pfn,

View File

@ -1259,7 +1259,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
* First make sure the mappings are removed from all page-tables
* before they are freed.
*/
vmalloc_sync_all();
vmalloc_sync_unmappings();
/*
* TODO: to calculate a flush range without looping.
@ -3050,16 +3050,19 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
EXPORT_SYMBOL(remap_vmalloc_range);
/*
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
* have one.
* Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose
* not to have one.
*
* The purpose of this function is to make sure the vmalloc area
* mappings are identical in all page-tables in the system.
*/
void __weak vmalloc_sync_all(void)
void __weak vmalloc_sync_mappings(void)
{
}
void __weak vmalloc_sync_unmappings(void)
{
}
static int f(pte_t *pte, unsigned long addr, void *data)
{

View File

@ -48,6 +48,7 @@ KBUILD_CFLAGS += -Wno-initializer-overrides
KBUILD_CFLAGS += -Wno-format
KBUILD_CFLAGS += -Wno-sign-compare
KBUILD_CFLAGS += -Wno-format-zero-length
KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
endif
endif

View File

@ -94,7 +94,7 @@ if (defined $opt{'o'}) {
#
while ( <$module_symvers> ) {
chomp;
my (undef, $symbol, $namespace, $module, $gpl) = split('\t');
my (undef, $symbol, $module, $gpl, $namespace) = split('\t');
$SYMBOL { $symbol } = [ $module , "0" , $symbol, $gpl];
}
close($module_symvers);

View File

@ -2441,7 +2441,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
}
/* parse Module.symvers file. line format:
* 0x12345678<tab>symbol<tab>module[[<tab>export]<tab>something]
* 0x12345678<tab>symbol<tab>module<tab>export<tab>namespace
**/
static void read_dump(const char *fname, unsigned int kernel)
{
@ -2454,7 +2454,7 @@ static void read_dump(const char *fname, unsigned int kernel)
return;
while ((line = get_next_line(&pos, file, size))) {
char *symname, *namespace, *modname, *d, *export, *end;
char *symname, *namespace, *modname, *d, *export;
unsigned int crc;
struct module *mod;
struct symbol *s;
@ -2462,16 +2462,16 @@ static void read_dump(const char *fname, unsigned int kernel)
if (!(symname = strchr(line, '\t')))
goto fail;
*symname++ = '\0';
if (!(namespace = strchr(symname, '\t')))
goto fail;
*namespace++ = '\0';
if (!(modname = strchr(namespace, '\t')))
if (!(modname = strchr(symname, '\t')))
goto fail;
*modname++ = '\0';
if ((export = strchr(modname, '\t')) != NULL)
*export++ = '\0';
if (export && ((end = strchr(export, '\t')) != NULL))
*end = '\0';
if (!(export = strchr(modname, '\t')))
goto fail;
*export++ = '\0';
if (!(namespace = strchr(export, '\t')))
goto fail;
*namespace++ = '\0';
crc = strtoul(line, &d, 16);
if (*symname == '\0' || *modname == '\0' || *d != '\0')
goto fail;
@ -2523,9 +2523,9 @@ static void write_dump(const char *fname)
namespace = symbol->namespace;
buf_printf(&buf, "0x%08x\t%s\t%s\t%s\t%s\n",
symbol->crc, symbol->name,
namespace ? namespace : "",
symbol->module->name,
export_str(symbol->export));
export_str(symbol->export),
namespace ? namespace : "");
}
symbol = symbol->next;
}

0
scripts/parse-maintainers.pl Normal file → Executable file
View File

View File

@ -111,7 +111,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames)
while (plugin->next) {
if (plugin->dst_frames)
frames = plugin->dst_frames(plugin, frames);
if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0))
if ((snd_pcm_sframes_t)frames <= 0)
return -ENXIO;
plugin = plugin->next;
err = snd_pcm_plugin_alloc(plugin, frames);
@ -123,7 +123,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames)
while (plugin->prev) {
if (plugin->src_frames)
frames = plugin->src_frames(plugin, frames);
if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0))
if ((snd_pcm_sframes_t)frames <= 0)
return -ENXIO;
plugin = plugin->prev;
err = snd_pcm_plugin_alloc(plugin, frames);
@ -209,6 +209,8 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
plugin = snd_pcm_plug_last(plug);
while (plugin && drv_frames > 0) {
if (drv_frames > plugin->buf_frames)
drv_frames = plugin->buf_frames;
plugin_prev = plugin->prev;
if (plugin->src_frames)
drv_frames = plugin->src_frames(plugin, drv_frames);
@ -220,6 +222,8 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
plugin_next = plugin->next;
if (plugin->dst_frames)
drv_frames = plugin->dst_frames(plugin, drv_frames);
if (drv_frames > plugin->buf_frames)
drv_frames = plugin->buf_frames;
plugin = plugin_next;
}
} else
@ -248,11 +252,15 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc
if (frames < 0)
return frames;
}
if (frames > plugin->buf_frames)
frames = plugin->buf_frames;
plugin = plugin_next;
}
} else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
plugin = snd_pcm_plug_last(plug);
while (plugin) {
if (frames > plugin->buf_frames)
frames = plugin->buf_frames;
plugin_prev = plugin->prev;
if (plugin->src_frames) {
frames = plugin->src_frames(plugin, frames);

View File

@ -602,6 +602,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq
len = snd_seq_oss_timer_start(dp->timer);
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
snd_midi_event_reset_decode(mdev->coder);
} else {
len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
if (len > 0)

View File

@ -81,6 +81,7 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
continue;
snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)snd_rawmidi_receive, vmidi->substream);
snd_midi_event_reset_decode(vmidi->parser);
} else {
len = snd_midi_event_decode(vmidi->parser, msg, sizeof(msg), ev);
if (len > 0)

View File

@ -8080,6 +8080,8 @@ static int patch_alc269(struct hda_codec *codec)
spec->gen.mixer_nid = 0;
break;
case 0x10ec0225:
codec->power_save_node = 1;
/* fall through */
case 0x10ec0295:
case 0x10ec0299:
spec->codec_variant = ALC269_TYPE_ALC225;
@ -8639,6 +8641,8 @@ enum {
ALC669_FIXUP_ACER_ASPIRE_ETHOS,
ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET,
ALC671_FIXUP_HP_HEADSET_MIC2,
ALC662_FIXUP_ACER_X2660G_HEADSET_MODE,
ALC662_FIXUP_ACER_NITRO_HEADSET_MODE,
};
static const struct hda_fixup alc662_fixups[] = {
@ -8984,6 +8988,25 @@ static const struct hda_fixup alc662_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc671_fixup_hp_headset_mic2,
},
[ALC662_FIXUP_ACER_X2660G_HEADSET_MODE] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
{ 0x1a, 0x02a1113c }, /* use as headset mic, without its own jack detect */
{ }
},
.chained = true,
.chain_id = ALC662_FIXUP_USI_FUNC
},
[ALC662_FIXUP_ACER_NITRO_HEADSET_MODE] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
{ 0x1a, 0x01a11140 }, /* use as headset mic, without its own jack detect */
{ 0x1b, 0x0221144f },
{ }
},
.chained = true,
.chain_id = ALC662_FIXUP_USI_FUNC
},
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@ -8995,6 +9018,8 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x1025, 0x123c, "Acer Nitro N50-600", ALC662_FIXUP_ACER_NITRO_HEADSET_MODE),
SND_PCI_QUIRK(0x1025, 0x124e, "Acer 2660G", ALC662_FIXUP_ACER_X2660G_HEADSET_MODE),
SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05fe, "Dell XPS 15", ALC668_FIXUP_DELL_XPS13),

View File

@ -1543,20 +1543,20 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
return ret;
}
ret = devm_snd_soc_register_component(&pdev->dev, &stm32_component,
&sai->cpu_dai_drv, 1);
ret = snd_dmaengine_pcm_register(&pdev->dev, conf, 0);
if (ret) {
dev_err(&pdev->dev, "Could not register pcm dma\n");
return ret;
}
ret = snd_soc_register_component(&pdev->dev, &stm32_component,
&sai->cpu_dai_drv, 1);
if (ret)
return ret;
if (STM_SAI_PROTOCOL_IS_SPDIF(sai))
conf = &stm32_sai_pcm_config_spdif;
ret = devm_snd_dmaengine_pcm_register(&pdev->dev, conf, 0);
if (ret) {
dev_err(&pdev->dev, "Could not register pcm dma\n");
return ret;
}
return 0;
}
@ -1565,6 +1565,8 @@ static int stm32_sai_sub_remove(struct platform_device *pdev)
struct stm32_sai_sub_data *sai = dev_get_drvdata(&pdev->dev);
clk_unprepare(sai->pdata->pclk);
snd_dmaengine_pcm_unregister(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
return 0;
}

View File

@ -305,7 +305,7 @@ static void line6_data_received(struct urb *urb)
line6_midibuf_read(mb, line6->buffer_message,
LINE6_MIDI_MESSAGE_MAXLEN);
if (done == 0)
if (done <= 0)
break;
line6->message_length = done;

Some files were not shown because too many files have changed in this diff Show More