This is the 5.4.48 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl7wXk8ACgkQONu9yGCS
 aT5uyhAA1EoV9ROPRt8Vw1fzlDIrRA5X2T+FCGXskg2kKWehVHAvge4U76nZ16+i
 aYcBX3lAmN7GGVw+/GiRHf9QpiwOUF5f3ZUQZ0KuLS1gcuaXx+VC1h5yyunx3tm1
 CI01B2p+GQ3jABWopnhsujMVAeWjbD18NqY+a+xOzTn8CCyLAli+LiviWCR/apQp
 p4r6++eevWo1yMDlJGNGoMYsFcxChWhtlnDQKWCsIDCN3I1cinGz8wopiv93WqRH
 Sz3wb1YMuhXb10usNZcZFaSvDGf5XSaMxpRkyNSxN7CLv8LzbovXQOE+fFDGAYxd
 lUCjRK0wFBMzRSeZ2iGYqqQf5xyYKb6hNmViGprdqwR2c3MBHN/Xs5aDLqJEgHkr
 OXzZLyHUngRfp3GpagFGV6q06S6fgb9ca/7FuT4Hn8Z3tb5Xt7b/KlPcW3VymiSt
 I37itASNA/Qs6Njl4tDd9GjwbcOAs+s/XabasU+pXscOkf3o8fYMy2krisy176D/
 AXtRTLq4pc42I8c3tv5uCNz7Zje/qytKSPErNRBAedvOu5JX7ab6hgULPH4N7r0N
 Di/LyKqYw+ZBa4AfzcsvlR3wJLWqni+aFj5yppSrNkH7kNzZGLmlw8xIo8v1CFYw
 T86b13WmHPqvyFWQLpX5WCEYu0OCw5YCUyQXSsLZN5oC7gAwC7U=
 =FSdI
 -----END PGP SIGNATURE-----

Merge 5.4.48 into android-5.4-stable

Changes in 5.4.48
	ACPI: GED: use correct trigger type field in _Exx / _Lxx handling
	drm/amdgpu: fix and cleanup amdgpu_gem_object_close v4
	ath10k: Fix the race condition in firmware dump work queue
	drm: bridge: adv7511: Extend list of audio sample rates
	media: staging: imgu: do not hold spinlock during freeing mmu page table
	media: imx: imx7-mipi-csis: Cleanup and fix subdev pad format handling
	crypto: ccp -- don't "select" CONFIG_DMADEVICES
	media: vicodec: Fix error codes in probe function
	media: si2157: Better check for running tuner in init
	objtool: Ignore empty alternatives
	spi: spi-mem: Fix Dual/Quad modes on Octal-capable devices
	drm/amdgpu: Init data to avoid oops while reading pp_num_states.
	arm64/kernel: Fix range on invalidating dcache for boot page tables
	libbpf: Fix memory leak and possible double-free in hashmap__clear
	spi: pxa2xx: Apply CS clk quirk to BXT
	x86,smap: Fix smap_{save,restore}() alternatives
	sched/fair: Refill bandwidth before scaling
	net: atlantic: make hw_get_regs optional
	net: ena: fix error returning in ena_com_get_hash_function()
	efi/libstub/x86: Work around LLVM ELF quirk build regression
	ath10k: remove the max_sched_scan_reqs value
	arm64: cacheflush: Fix KGDB trap detection
	media: staging: ipu3: Fix stale list entries on parameter queue failure
	rtw88: fix an issue about leak system resources
	spi: dw: Zero DMA Tx and Rx configurations on stack
	ACPICA: Dispatcher: add status checks
	block: alloc map and request for new hardware queue
	arm64: insn: Fix two bugs in encoding 32-bit logical immediates
	block: reset mapping if failed to update hardware queue count
	drm: rcar-du: Set primary plane zpos immutably at initializing
	lockdown: Allow unprivileged users to see lockdown status
	ixgbe: Fix XDP redirect on archs with PAGE_SIZE above 4K
	platform/x86: dell-laptop: don't register micmute LED if there is no token
	MIPS: Loongson: Build ATI Radeon GPU driver as module
	Bluetooth: Add SCO fallback for invalid LMP parameters error
	kgdb: Disable WARN_CONSOLE_UNLOCKED for all kgdb
	kgdb: Prevent infinite recursive entries to the debugger
	pmu/smmuv3: Clear IRQ affinity hint on device removal
	ACPI/IORT: Fix PMCG node single ID mapping handling
	mips: Fix cpu_has_mips64r1/2 activation for MIPS32 CPUs
	spi: dw: Enable interrupts in accordance with DMA xfer mode
	clocksource: dw_apb_timer: Make CPU-affiliation being optional
	clocksource: dw_apb_timer_of: Fix missing clockevent timers
	media: dvbdev: Fix tuner->demod media controller link
	btrfs: account for trans_block_rsv in may_commit_transaction
	btrfs: do not ignore error from btrfs_next_leaf() when inserting checksums
	ARM: 8978/1: mm: make act_mm() respect THREAD_SIZE
	batman-adv: Revert "disable ethtool link speed detection when auto negotiation off"
	ice: Fix memory leak
	ice: Fix for memory leaks and modify ICE_FREE_CQ_BUFS
	mmc: meson-mx-sdio: trigger a soft reset after a timeout or CRC error
	Bluetooth: btmtkuart: Improve exception handling in btmtuart_probe()
	spi: dw: Fix Rx-only DMA transfers
	x86/kvm/hyper-v: Explicitly align hcall param for kvm_hyperv_exit
	net: vmxnet3: fix possible buffer overflow caused by bad DMA value in vmxnet3_get_rss()
	x86: fix vmap arguments in map_irq_stack
	staging: android: ion: use vmap instead of vm_map_ram
	ath10k: fix kernel null pointer dereference
	media: staging/intel-ipu3: Implement lock for stream on/off operations
	spi: Respect DataBitLength field of SpiSerialBusV2() ACPI resource
	brcmfmac: fix wrong location to get firmware feature
	regulator: qcom-rpmh: Fix typos in pm8150 and pm8150l
	tools api fs: Make xxx__mountpoint() more scalable
	e1000: Distribute switch variables for initialization
	dt-bindings: display: mediatek: control dpi pins mode to avoid leakage
	drm/mediatek: set dpi pin mode to gpio low to avoid leakage current
	audit: fix a net reference leak in audit_send_reply()
	media: dvb: return -EREMOTEIO on i2c transfer failure.
	media: platform: fcp: Set appropriate DMA parameters
	MIPS: Make sparse_init() using top-down allocation
	ath10k: add flush tx packets for SDIO chip
	Bluetooth: btbcm: Add 2 missing models to subver tables
	audit: fix a net reference leak in audit_list_rules_send()
	Drivers: hv: vmbus: Always handle the VMBus messages on CPU0
	dpaa2-eth: fix return codes used in ndo_setup_tc
	netfilter: nft_nat: return EOPNOTSUPP if type or flags are not supported
	selftests/bpf: Fix memory leak in extract_build_id()
	net: bcmgenet: set Rx mode before starting netif
	net: bcmgenet: Fix WoL with password after deep sleep
	lib/mpi: Fix 64-bit MIPS build with Clang
	exit: Move preemption fixup up, move blocking operations down
	sched/core: Fix illegal RCU from offline CPUs
	drivers/perf: hisi: Fix typo in events attribute array
	iocost_monitor: drop string wrap around numbers when outputting json
	net: lpc-enet: fix error return code in lpc_mii_init()
	selinux: fix error return code in policydb_read()
	drivers: net: davinci_mdio: fix potential NULL dereference in davinci_mdio_probe()
	media: cec: silence shift wrapping warning in __cec_s_log_addrs()
	net: allwinner: Fix use correct return type for ndo_start_xmit()
	powerpc/spufs: fix copy_to_user while atomic
	libertas_tf: avoid a null dereference in pointer priv
	xfs: clean up the error handling in xfs_swap_extents
	Crypto/chcr: fix for ccm(aes) failed test
	MIPS: Truncate link address into 32bit for 32bit kernel
	mips: cm: Fix an invalid error code of INTVN_*_ERR
	kgdb: Fix spurious true from in_dbg_master()
	xfs: reset buffer write failure state on successful completion
	xfs: fix duplicate verification from xfs_qm_dqflush()
	platform/x86: intel-vbtn: Use acpi_evaluate_integer()
	platform/x86: intel-vbtn: Split keymap into buttons and switches parts
	platform/x86: intel-vbtn: Do not advertise switches to userspace if they are not there
	platform/x86: intel-vbtn: Also handle tablet-mode switch on "Detachable" and "Portable" chassis-types
	iwlwifi: avoid debug max amsdu config overwriting itself
	nvme: refine the Qemu Identify CNS quirk
	nvme-pci: align io queue count with allocted nvme_queue in nvme_probe
	nvme-tcp: use bh_lock in data_ready
	ath10k: Remove msdu from idr when management pkt send fails
	wcn36xx: Fix error handling path in 'wcn36xx_probe()'
	net: qed*: Reduce RX and TX default ring count when running inside kdump kernel
	drm/mcde: dsi: Fix return value check in mcde_dsi_bind()
	mt76: avoid rx reorder buffer overflow
	md: don't flush workqueue unconditionally in md_open
	raid5: remove gfp flags from scribble_alloc()
	iocost: don't let vrate run wild while there's no saturation signal
	veth: Adjust hard_start offset on redirect XDP frames
	net/mlx5e: IPoIB, Drop multicast packets that this interface sent
	rtlwifi: Fix a double free in _rtl_usb_tx_urb_setup()
	mwifiex: Fix memory corruption in dump_station
	kgdboc: Use a platform device to handle tty drivers showing up late
	x86/boot: Correct relocation destination on old linkers
	sched: Defend cfs and rt bandwidth quota against overflow
	mips: MAAR: Use more precise address mask
	mips: Add udelay lpj numbers adjustment
	crypto: stm32/crc32 - fix ext4 chksum BUG_ON()
	crypto: stm32/crc32 - fix run-time self test issue.
	crypto: stm32/crc32 - fix multi-instance
	drm/amd/powerpay: Disable gfxoff when setting manual mode on picasso and raven
	drm/amdgpu: Sync with VM root BO when switching VM to CPU update mode
	selftests/bpf: CONFIG_IPV6_SEG6_BPF required for test_seg6_loop.o
	x86/mm: Stop printing BRK addresses
	MIPS: tools: Fix resource leak in elf-entry.c
	m68k: mac: Don't call via_flush_cache() on Mac IIfx
	btrfs: improve global reserve stealing logic
	btrfs: qgroup: mark qgroup inconsistent if we're inherting snapshot to a new qgroup
	macvlan: Skip loopback packets in RX handler
	PCI: Don't disable decoding when mmio_always_on is set
	MIPS: Fix IRQ tracing when call handle_fpe() and handle_msa_fpe()
	bcache: fix refcount underflow in bcache_device_free()
	mmc: sdhci-msm: Set SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 quirk
	staging: greybus: sdio: Respect the cmd->busy_timeout from the mmc core
	mmc: via-sdmmc: Respect the cmd->busy_timeout from the mmc core
	ice: fix potential double free in probe unrolling
	ixgbe: fix signed-integer-overflow warning
	iwlwifi: mvm: fix aux station leak
	mmc: sdhci-esdhc-imx: fix the mask for tuning start point
	spi: dw: Return any value retrieved from the dma_transfer callback
	cpuidle: Fix three reference count leaks
	platform/x86: hp-wmi: Convert simple_strtoul() to kstrtou32()
	platform/x86: intel-hid: Add a quirk to support HP Spectre X2 (2015)
	platform/x86: intel-vbtn: Only blacklist SW_TABLET_MODE on the 9 / "Laptop" chasis-type
	platform/x86: asus_wmi: Reserve more space for struct bias_args
	libbpf: Fix perf_buffer__free() API for sparse allocs
	bpf: Fix map permissions check
	bpf: Refactor sockmap redirect code so its easy to reuse
	bpf: Fix running sk_skb program types with ktls
	selftests/bpf, flow_dissector: Close TAP device FD after the test
	kasan: stop tests being eliminated as dead code with FORTIFY_SOURCE
	string.h: fix incompatibility between FORTIFY_SOURCE and KASAN
	btrfs: free alien device after device add
	btrfs: include non-missing as a qualifier for the latest_bdev
	btrfs: send: emit file capabilities after chown
	btrfs: force chunk allocation if our global rsv is larger than metadata
	btrfs: fix error handling when submitting direct I/O bio
	btrfs: fix wrong file range cleanup after an error filling dealloc range
	btrfs: fix space_info bytes_may_use underflow after nocow buffered write
	btrfs: fix space_info bytes_may_use underflow during space cache writeout
	powerpc/mm: Fix conditions to perform MMU specific management by blocks on PPC32.
	mm: thp: make the THP mapcount atomic against __split_huge_pmd_locked()
	mm: initialize deferred pages with interrupts enabled
	mm/pagealloc.c: call touch_nmi_watchdog() on max order boundaries in deferred init
	mm: call cond_resched() from deferred_init_memmap()
	ima: Fix ima digest hash table key calculation
	ima: Switch to ima_hash_algo for boot aggregate
	ima: Evaluate error in init_ima()
	ima: Directly assign the ima_default_policy pointer to ima_rules
	ima: Call ima_calc_boot_aggregate() in ima_eventdigest_init()
	ima: Remove __init annotation from ima_pcrread()
	evm: Fix possible memory leak in evm_calc_hmac_or_hash()
	ext4: fix EXT_MAX_EXTENT/INDEX to check for zeroed eh_max
	ext4: fix error pointer dereference
	ext4: fix race between ext4_sync_parent() and rename()
	PCI: Avoid Pericom USB controller OHCI/EHCI PME# defect
	PCI: Avoid FLR for AMD Matisse HD Audio & USB 3.0
	PCI: Avoid FLR for AMD Starship USB 3.0
	PCI: Add ACS quirk for Intel Root Complex Integrated Endpoints
	PCI: vmd: Add device id for VMD device 8086:9A0B
	x86/amd_nb: Add Family 19h PCI IDs
	PCI: Add Loongson vendor ID
	serial: 8250_pci: Move Pericom IDs to pci_ids.h
	x86/amd_nb: Add AMD family 17h model 60h PCI IDs
	ima: Remove redundant policy rule set in add_rules()
	ima: Set again build_ima_appraise variable
	PCI: Program MPS for RCiEP devices
	e1000e: Disable TSO for buffer overrun workaround
	e1000e: Relax condition to trigger reset for ME workaround
	carl9170: remove P2P_GO support
	media: go7007: fix a miss of snd_card_free
	media: cedrus: Program output format during each run
	serial: 8250: Avoid error message on reprobe
	Bluetooth: hci_bcm: fix freeing not-requested IRQ
	b43legacy: Fix case where channel status is corrupted
	b43: Fix connection problem with WPA3
	b43_legacy: Fix connection problem with WPA3
	media: ov5640: fix use of destroyed mutex
	clk: mediatek: assign the initial value to clk_init_data of mtk_mux
	igb: Report speed and duplex as unknown when device is runtime suspended
	hwmon: (k10temp) Add AMD family 17h model 60h PCI match
	EDAC/amd64: Add AMD family 17h model 60h PCI IDs
	power: vexpress: add suppress_bind_attrs to true
	power: supply: core: fix HWMON temperature labels
	power: supply: core: fix memory leak in HWMON error path
	pinctrl: samsung: Correct setting of eint wakeup mask on s5pv210
	pinctrl: samsung: Save/restore eint_mask over suspend for EINT_TYPE GPIOs
	gnss: sirf: fix error return code in sirf_probe()
	sparc32: fix register window handling in genregs32_[gs]et()
	sparc64: fix misuses of access_process_vm() in genregs32_[sg]et()
	dm crypt: avoid truncating the logical block size
	alpha: fix memory barriers so that they conform to the specification
	powerpc/fadump: use static allocation for reserved memory ranges
	powerpc/fadump: consider reserved ranges while reserving memory
	powerpc/fadump: Account for memory_limit while reserving memory
	kernel/cpu_pm: Fix uninitted local in cpu_pm
	ARM: tegra: Correct PL310 Auxiliary Control Register initialization
	soc/tegra: pmc: Select GENERIC_PINCONF
	ARM: dts: exynos: Fix GPIO polarity for thr GalaxyS3 CM36651 sensor's bus
	ARM: dts: at91: sama5d2_ptc_ek: fix vbus pin
	ARM: dts: s5pv210: Set keep-power-in-suspend for SDHCI1 on Aries
	drivers/macintosh: Fix memleak in windfarm_pm112 driver
	powerpc/32s: Fix another build failure with CONFIG_PPC_KUAP_DEBUG
	powerpc/kasan: Fix issues by lowering KASAN_SHADOW_END
	powerpc/kasan: Fix shadow pages allocation failure
	powerpc/32: Disable KASAN with pages bigger than 16k
	powerpc/64s: Don't let DT CPU features set FSCR_DSCR
	powerpc/64s: Save FSCR to init_task.thread.fscr after feature init
	kbuild: force to build vmlinux if CONFIG_MODVERSION=y
	sunrpc: svcauth_gss_register_pseudoflavor must reject duplicate registrations.
	sunrpc: clean up properly in gss_mech_unregister()
	mtd: rawnand: Fix nand_gpio_waitrdy()
	mtd: rawnand: onfi: Fix redundancy detection check
	mtd: rawnand: brcmnand: fix hamming oob layout
	mtd: rawnand: diskonchip: Fix the probe error path
	mtd: rawnand: sharpsl: Fix the probe error path
	mtd: rawnand: ingenic: Fix the probe error path
	mtd: rawnand: xway: Fix the probe error path
	mtd: rawnand: orion: Fix the probe error path
	mtd: rawnand: socrates: Fix the probe error path
	mtd: rawnand: oxnas: Fix the probe error path
	mtd: rawnand: sunxi: Fix the probe error path
	mtd: rawnand: plat_nand: Fix the probe error path
	mtd: rawnand: pasemi: Fix the probe error path
	mtd: rawnand: mtk: Fix the probe error path
	mtd: rawnand: tmio: Fix the probe error path
	w1: omap-hdq: cleanup to add missing newline for some dev_dbg
	f2fs: fix checkpoint=disable:%u%%
	perf probe: Do not show the skipped events
	perf probe: Fix to check blacklist address correctly
	perf probe: Check address correctness by map instead of _etext
	perf symbols: Fix debuginfo search for Ubuntu
	perf symbols: Fix kernel maps for kcore and eBPF
	Linux 5.4.48

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I9954fb3f08956419e8586bcb9078e604df207fb9
This commit is contained in:
Greg Kroah-Hartman 2020-06-22 11:43:59 +02:00
commit fa46997961
273 changed files with 2670 additions and 1247 deletions

View File

@ -16,6 +16,9 @@ Required properties:
Documentation/devicetree/bindings/graph.txt. This port should be connected
to the input port of an attached HDMI or LVDS encoder chip.
Optional properties:
- pinctrl-names: Contain "default" and "sleep".
Example:
dpi0: dpi@1401d000 {
@ -26,6 +29,9 @@ dpi0: dpi@1401d000 {
<&mmsys CLK_MM_DPI_ENGINE>,
<&apmixedsys CLK_APMIXED_TVDPLL>;
clock-names = "pixel", "engine", "pll";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&dpi_pin_func>;
pinctrl-1 = <&dpi_pin_idle>;
port {
dpi0_out: endpoint {

View File

@ -4444,9 +4444,11 @@ EOI was received.
#define KVM_EXIT_HYPERV_SYNIC 1
#define KVM_EXIT_HYPERV_HCALL 2
__u32 type;
__u32 pad1;
union {
struct {
__u32 msr;
__u32 pad2;
__u64 control;
__u64 evt_page;
__u64 msg_page;

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 47
SUBLEVEL = 48
EXTRAVERSION =
NAME = Kleptomaniac Octopus
@ -591,12 +591,8 @@ KBUILD_MODULES :=
KBUILD_BUILTIN := 1
# If we have only "make modules", don't compile built-in objects.
# When we're building modules with modversions, we need to consider
# the built-in objects during the descend as well, in order to
# make sure the checksums are up to date before we record them.
ifeq ($(MAKECMDGOALS),modules)
KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1)
KBUILD_BUILTIN :=
endif
# If we have "make <whatever> modules", compile modules
@ -1349,6 +1345,13 @@ ifdef CONFIG_MODULES
all: modules
# When we're building modules with modversions, we need to consider
# the built-in objects during the descend as well, in order to
# make sure the checksums are up to date before we record them.
ifdef CONFIG_MODVERSIONS
KBUILD_BUILTIN := 1
endif
# Build modules
#
# A module can be listed more than once in obj-m resulting in

View File

@ -322,14 +322,18 @@ static inline int __is_mmio(const volatile void __iomem *addr)
#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
extern inline unsigned int ioread8(void __iomem *addr)
{
unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
unsigned int ret;
mb();
ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
mb();
return ret;
}
extern inline unsigned int ioread16(void __iomem *addr)
{
unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
unsigned int ret;
mb();
ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
mb();
return ret;
}
@ -370,7 +374,9 @@ extern inline void outw(u16 b, unsigned long port)
#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
extern inline unsigned int ioread32(void __iomem *addr)
{
unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
unsigned int ret;
mb();
ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
mb();
return ret;
}
@ -415,14 +421,18 @@ extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
extern inline u8 readb(const volatile void __iomem *addr)
{
u8 ret = __raw_readb(addr);
u8 ret;
mb();
ret = __raw_readb(addr);
mb();
return ret;
}
extern inline u16 readw(const volatile void __iomem *addr)
{
u16 ret = __raw_readw(addr);
u16 ret;
mb();
ret = __raw_readw(addr);
mb();
return ret;
}
@ -463,14 +473,18 @@ extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
extern inline u32 readl(const volatile void __iomem *addr)
{
u32 ret = __raw_readl(addr);
u32 ret;
mb();
ret = __raw_readl(addr);
mb();
return ret;
}
extern inline u64 readq(const volatile void __iomem *addr)
{
u64 ret = __raw_readq(addr);
u64 ret;
mb();
ret = __raw_readq(addr);
mb();
return ret;
}
@ -499,14 +513,44 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
#define outb_p outb
#define outw_p outw
#define outl_p outl
#define readb_relaxed(addr) __raw_readb(addr)
#define readw_relaxed(addr) __raw_readw(addr)
#define readl_relaxed(addr) __raw_readl(addr)
#define readq_relaxed(addr) __raw_readq(addr)
#define writeb_relaxed(b, addr) __raw_writeb(b, addr)
#define writew_relaxed(b, addr) __raw_writew(b, addr)
#define writel_relaxed(b, addr) __raw_writel(b, addr)
#define writeq_relaxed(b, addr) __raw_writeq(b, addr)
extern u8 readb_relaxed(const volatile void __iomem *addr);
extern u16 readw_relaxed(const volatile void __iomem *addr);
extern u32 readl_relaxed(const volatile void __iomem *addr);
extern u64 readq_relaxed(const volatile void __iomem *addr);
#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
extern inline u8 readb_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readb(addr);
}
extern inline u16 readw_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readw(addr);
}
#endif
#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
extern inline u32 readl_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readl(addr);
}
extern inline u64 readq_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readq(addr);
}
#endif
#define writeb_relaxed writeb
#define writew_relaxed writew
#define writel_relaxed writel
#define writeq_relaxed writeq
/*
* String version of IO memory access ops:

View File

@ -16,21 +16,27 @@
unsigned int
ioread8(void __iomem *addr)
{
unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
unsigned int ret;
mb();
ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
mb();
return ret;
}
unsigned int ioread16(void __iomem *addr)
{
unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
unsigned int ret;
mb();
ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
mb();
return ret;
}
unsigned int ioread32(void __iomem *addr)
{
unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
unsigned int ret;
mb();
ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
mb();
return ret;
}
@ -148,28 +154,36 @@ EXPORT_SYMBOL(__raw_writeq);
u8 readb(const volatile void __iomem *addr)
{
u8 ret = __raw_readb(addr);
u8 ret;
mb();
ret = __raw_readb(addr);
mb();
return ret;
}
u16 readw(const volatile void __iomem *addr)
{
u16 ret = __raw_readw(addr);
u16 ret;
mb();
ret = __raw_readw(addr);
mb();
return ret;
}
u32 readl(const volatile void __iomem *addr)
{
u32 ret = __raw_readl(addr);
u32 ret;
mb();
ret = __raw_readl(addr);
mb();
return ret;
}
u64 readq(const volatile void __iomem *addr)
{
u64 ret = __raw_readq(addr);
u64 ret;
mb();
ret = __raw_readq(addr);
mb();
return ret;
}
@ -207,6 +221,38 @@ EXPORT_SYMBOL(writew);
EXPORT_SYMBOL(writel);
EXPORT_SYMBOL(writeq);
/*
* The _relaxed functions must be ordered w.r.t. each other, but they don't
* have to be ordered w.r.t. other memory accesses.
*/
u8 readb_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readb(addr);
}
u16 readw_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readw(addr);
}
u32 readl_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readl(addr);
}
u64 readq_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readq(addr);
}
EXPORT_SYMBOL(readb_relaxed);
EXPORT_SYMBOL(readw_relaxed);
EXPORT_SYMBOL(readl_relaxed);
EXPORT_SYMBOL(readq_relaxed);
/*
* Read COUNT 8-bit bytes from port PORT into memory starting at SRC.

View File

@ -40,7 +40,7 @@
ahb {
usb0: gadget@300000 {
atmel,vbus-gpio = <&pioA PIN_PA27 GPIO_ACTIVE_HIGH>;
atmel,vbus-gpio = <&pioA PIN_PB11 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usba_vbus>;
status = "okay";

View File

@ -68,7 +68,7 @@
i2c_cm36651: i2c-gpio-2 {
compatible = "i2c-gpio";
gpios = <&gpf0 0 GPIO_ACTIVE_LOW>, <&gpf0 1 GPIO_ACTIVE_LOW>;
gpios = <&gpf0 0 GPIO_ACTIVE_HIGH>, <&gpf0 1 GPIO_ACTIVE_HIGH>;
i2c-gpio,delay-us = <2>;
#address-cells = <1>;
#size-cells = <0>;

View File

@ -454,6 +454,7 @@
pinctrl-names = "default";
cap-sd-highspeed;
cap-mmc-highspeed;
keep-power-in-suspend;
mmc-pwrseq = <&wifi_pwrseq>;
non-removable;

View File

@ -106,8 +106,8 @@ static const char * const tegra_dt_board_compat[] = {
};
DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)")
.l2c_aux_val = 0x3c400001,
.l2c_aux_mask = 0xc20fc3fe,
.l2c_aux_val = 0x3c400000,
.l2c_aux_mask = 0xc20fc3ff,
.smp = smp_ops(tegra_smp_ops),
.map_io = tegra_map_common_io,
.init_early = tegra_init_early,

View File

@ -5,6 +5,7 @@
* VMA_VM_FLAGS
* VM_EXEC
*/
#include <linux/const.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
@ -30,7 +31,7 @@
* act_mm - get current->active_mm
*/
.macro act_mm, rd
bic \rd, sp, #8128
bic \rd, sp, #(THREAD_SIZE - 1) & ~63
bic \rd, \rd, #63
ldr \rd, [\rd, #TI_TASK]
.if (TSK_ACTIVE_MM > IMM12_MASK)

View File

@ -79,7 +79,7 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
* IPI all online CPUs so that they undergo a context synchronization
* event and are forced to refetch the new instructions.
*/
#ifdef CONFIG_KGDB
/*
* KGDB performs cache maintenance with interrupts disabled, so we
* will deadlock trying to IPI the secondary CPUs. In theory, we can
@ -89,9 +89,9 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
* the patching operation, so we don't need extra IPIs here anyway.
* In which case, add a KGDB-specific bodge and return early.
*/
if (kgdb_connected && irqs_disabled())
if (in_dbg_master())
return;
#endif
kick_all_cpus_sync();
}

View File

@ -456,6 +456,7 @@ extern pgd_t init_pg_dir[PTRS_PER_PGD];
extern pgd_t init_pg_end[];
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_end[];
extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);

View File

@ -394,13 +394,19 @@ __create_page_tables:
/*
* Since the page tables have been populated with non-cacheable
* accesses (MMU disabled), invalidate the idmap and swapper page
* tables again to remove any speculatively loaded cache lines.
* accesses (MMU disabled), invalidate those tables again to
* remove any speculatively loaded cache lines.
*/
dmb sy
adrp x0, idmap_pg_dir
adrp x1, idmap_pg_end
sub x1, x1, x0
bl __inval_dcache_area
adrp x0, init_pg_dir
adrp x1, init_pg_end
sub x1, x1, x0
dmb sy
bl __inval_dcache_area
ret x28

View File

@ -1508,16 +1508,10 @@ static u32 aarch64_encode_immediate(u64 imm,
u32 insn)
{
unsigned int immr, imms, n, ones, ror, esz, tmp;
u64 mask = ~0UL;
/* Can't encode full zeroes or full ones */
if (!imm || !~imm)
return AARCH64_BREAK_FAULT;
u64 mask;
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
if (upper_32_bits(imm))
return AARCH64_BREAK_FAULT;
esz = 32;
break;
case AARCH64_INSN_VARIANT_64BIT:
@ -1529,6 +1523,12 @@ static u32 aarch64_encode_immediate(u64 imm,
return AARCH64_BREAK_FAULT;
}
mask = GENMASK(esz - 1, 0);
/* Can't encode full zeroes, full ones, or value wider than the mask */
if (!imm || imm == mask || imm & ~mask)
return AARCH64_BREAK_FAULT;
/*
* Inverse of Replicate(). Try to spot a repeating pattern
* with a pow2 stride.

View File

@ -142,6 +142,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
idmap_pg_dir = .;
. += IDMAP_DIR_SIZE;
idmap_pg_end = .;
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
tramp_pg_dir = .;

View File

@ -257,6 +257,7 @@ extern int rbv_present,via_alt_mapping;
struct irq_desc;
extern void via_l2_flush(int writeback);
extern void via_register_interrupts(void);
extern void via_irq_enable(int);
extern void via_irq_disable(int);

View File

@ -59,7 +59,6 @@ extern void iop_preinit(void);
extern void iop_init(void);
extern void via_init(void);
extern void via_init_clock(irq_handler_t func);
extern void via_flush_cache(void);
extern void oss_init(void);
extern void psc_init(void);
extern void baboon_init(void);
@ -130,21 +129,6 @@ int __init mac_parse_bootinfo(const struct bi_record *record)
return unknown;
}
/*
* Flip into 24bit mode for an instant - flushes the L2 cache card. We
* have to disable interrupts for this. Our IRQ handlers will crap
* themselves if they take an IRQ in 24bit mode!
*/
static void mac_cache_card_flush(int writeback)
{
unsigned long flags;
local_irq_save(flags);
via_flush_cache();
local_irq_restore(flags);
}
void __init config_mac(void)
{
if (!MACH_IS_MAC)
@ -175,9 +159,8 @@ void __init config_mac(void)
* not.
*/
if (macintosh_config->ident == MAC_MODEL_IICI
|| macintosh_config->ident == MAC_MODEL_IIFX)
mach_l2_flush = mac_cache_card_flush;
if (macintosh_config->ident == MAC_MODEL_IICI)
mach_l2_flush = via_l2_flush;
}

View File

@ -294,10 +294,14 @@ void via_debug_dump(void)
* the system into 24-bit mode for an instant.
*/
void via_flush_cache(void)
void via_l2_flush(int writeback)
{
unsigned long flags;
local_irq_save(flags);
via2[gBufB] &= ~VIA2B_vMode32;
via2[gBufB] |= VIA2B_vMode32;
local_irq_restore(flags);
}
/*

View File

@ -285,12 +285,23 @@ ifdef CONFIG_64BIT
endif
endif
# When linking a 32-bit executable the LLVM linker cannot cope with a
# 32-bit load address that has been sign-extended to 64 bits. Simply
# remove the upper 32 bits then, as it is safe to do so with other
# linkers.
ifdef CONFIG_64BIT
load-ld = $(load-y)
else
load-ld = $(subst 0xffffffff,0x,$(load-y))
endif
KBUILD_AFLAGS += $(cflags-y)
KBUILD_CFLAGS += $(cflags-y)
KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) -DLINKER_LOAD_ADDRESS=$(load-ld)
KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
LINKER_LOAD_ADDRESS=$(load-ld) \
VMLINUX_ENTRY_ADDRESS=$(entry-y) \
PLATFORM="$(platform-y)" \
ITS_INPUTS="$(its-y)"

View File

@ -90,7 +90,7 @@ ifneq ($(zload-y),)
VMLINUZ_LOAD_ADDRESS := $(zload-y)
else
VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
$(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
$(obj)/vmlinux.bin $(LINKER_LOAD_ADDRESS))
endif
UIMAGE_LOADADDR = $(VMLINUZ_LOAD_ADDRESS)

View File

@ -231,7 +231,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
CONFIG_DRM_RADEON=m
CONFIG_FB_RADEON=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=m

View File

@ -288,10 +288,12 @@
# define cpu_has_mips32r6 __isa_ge_or_flag(6, MIPS_CPU_ISA_M32R6)
#endif
#ifndef cpu_has_mips64r1
# define cpu_has_mips64r1 __isa_range_or_flag(1, 6, MIPS_CPU_ISA_M64R1)
# define cpu_has_mips64r1 (cpu_has_64bits && \
__isa_range_or_flag(1, 6, MIPS_CPU_ISA_M64R1))
#endif
#ifndef cpu_has_mips64r2
# define cpu_has_mips64r2 __isa_range_or_flag(2, 6, MIPS_CPU_ISA_M64R2)
# define cpu_has_mips64r2 (cpu_has_64bits && \
__isa_range_or_flag(2, 6, MIPS_CPU_ISA_M64R2))
#endif
#ifndef cpu_has_mips64r6
# define cpu_has_mips64r6 __isa_ge_and_flag(6, MIPS_CPU_ISA_M64R6)

View File

@ -750,7 +750,7 @@
/* MAAR bit definitions */
#define MIPS_MAAR_VH (_U64CAST_(1) << 63)
#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12)
#define MIPS_MAAR_ADDR GENMASK_ULL(55, 12)
#define MIPS_MAAR_ADDR_SHIFT 12
#define MIPS_MAAR_S (_ULCAST_(1) << 1)
#define MIPS_MAAR_VL (_ULCAST_(1) << 0)

View File

@ -474,20 +474,20 @@ NESTED(nmi_handler, PT_SIZE, sp)
.endm
.macro __build_clear_fpe
CLI
TRACE_IRQS_OFF
.set push
/* gas fails to assemble cfc1 for some archs (octeon).*/ \
.set mips1
SET_HARDFLOAT
cfc1 a1, fcr31
.set pop
CLI
TRACE_IRQS_OFF
.endm
.macro __build_clear_msa_fpe
_cfcmsa a1, MSA_CSR
CLI
TRACE_IRQS_OFF
_cfcmsa a1, MSA_CSR
.endm
.macro __build_clear_ade

View File

@ -119,9 +119,9 @@ static char *cm2_causes[32] = {
"COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
"0x08", "0x09", "0x0a", "0x0b",
"0x0c", "0x0d", "0x0e", "0x0f",
"0x10", "0x11", "0x12", "0x13",
"0x14", "0x15", "0x16", "INTVN_WR_ERR",
"INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
"0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13",
"0x14", "0x15", "0x16", "0x17",
"0x18", "0x19", "0x1a", "0x1b",
"0x1c", "0x1d", "0x1e", "0x1f"
};

View File

@ -653,7 +653,17 @@ static void __init arch_mem_init(char **cmdline_p)
crashk_res.end - crashk_res.start + 1);
#endif
device_tree_init();
/*
* In order to reduce the possibility of kernel panic when failed to
* get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
* low memory as small as possible before plat_swiotlb_setup(), so
* make sparse_init() using top-down allocation.
*/
memblock_set_bottom_up(false);
sparse_init();
memblock_set_bottom_up(true);
plat_swiotlb_setup();
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));

View File

@ -18,12 +18,82 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/div64.h>
#include <asm/time.h>
#ifdef CONFIG_CPU_FREQ
static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref);
static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq);
static unsigned long glb_lpj_ref;
static unsigned long glb_lpj_ref_freq;
static int cpufreq_callback(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
struct cpumask *cpus = freq->policy->cpus;
unsigned long lpj;
int cpu;
/*
* Skip lpj numbers adjustment if the CPU-freq transition is safe for
* the loops delay. (Is this possible?)
*/
if (freq->flags & CPUFREQ_CONST_LOOPS)
return NOTIFY_OK;
/* Save the initial values of the lpjes for future scaling. */
if (!glb_lpj_ref) {
glb_lpj_ref = boot_cpu_data.udelay_val;
glb_lpj_ref_freq = freq->old;
for_each_online_cpu(cpu) {
per_cpu(pcp_lpj_ref, cpu) =
cpu_data[cpu].udelay_val;
per_cpu(pcp_lpj_ref_freq, cpu) = freq->old;
}
}
/*
* Adjust global lpj variable and per-CPU udelay_val number in
* accordance with the new CPU frequency.
*/
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
loops_per_jiffy = cpufreq_scale(glb_lpj_ref,
glb_lpj_ref_freq,
freq->new);
for_each_cpu(cpu, cpus) {
lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu),
per_cpu(pcp_lpj_ref_freq, cpu),
freq->new);
cpu_data[cpu].udelay_val = (unsigned int)lpj;
}
}
return NOTIFY_OK;
}
static struct notifier_block cpufreq_notifier = {
.notifier_call = cpufreq_callback,
};
static int __init register_cpufreq_notifier(void)
{
return cpufreq_register_notifier(&cpufreq_notifier,
CPUFREQ_TRANSITION_NOTIFIER);
}
core_initcall(register_cpufreq_notifier);
#endif /* CONFIG_CPU_FREQ */
/*
* forward reference
*/

View File

@ -50,7 +50,7 @@ SECTIONS
/* . = 0xa800000000300000; */
. = 0xffffffff80300000;
#endif
. = VMLINUX_LOAD_ADDRESS;
. = LINKER_LOAD_ADDRESS;
/* read-only */
_text = .; /* Text and read-only data */
.text : {

View File

@ -51,11 +51,14 @@ int main(int argc, const char *argv[])
nread = fread(&hdr, 1, sizeof(hdr), file);
if (nread != sizeof(hdr)) {
perror("Unable to read input file");
fclose(file);
return EXIT_FAILURE;
}
if (memcmp(hdr.ehdr32.e_ident, ELFMAG, SELFMAG))
if (memcmp(hdr.ehdr32.e_ident, ELFMAG, SELFMAG)) {
fclose(file);
die("Input is not an ELF\n");
}
switch (hdr.ehdr32.e_ident[EI_CLASS]) {
case ELFCLASS32:
@ -67,6 +70,7 @@ int main(int argc, const char *argv[])
entry = be32toh(hdr.ehdr32.e_entry);
break;
default:
fclose(file);
die("Invalid ELF encoding\n");
}
@ -83,14 +87,17 @@ int main(int argc, const char *argv[])
entry = be64toh(hdr.ehdr64.e_entry);
break;
default:
fclose(file);
die("Invalid ELF encoding\n");
}
break;
default:
fclose(file);
die("Invalid ELF class\n");
}
printf("0x%016" PRIx64 "\n", entry);
fclose(file);
return EXIT_SUCCESS;
}

View File

@ -171,7 +171,7 @@ config PPC
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if PPC32
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT

View File

@ -2,6 +2,7 @@
#ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
#define _ASM_POWERPC_BOOK3S_32_KUP_H
#include <asm/bug.h>
#include <asm/book3s/32/mmu-hash.h>
#ifdef __ASSEMBLY__
@ -75,7 +76,7 @@
.macro kuap_check current, gpr
#ifdef CONFIG_PPC_KUAP_DEBUG
lwz \gpr, KUAP(thread)
lwz \gpr, THREAD + KUAP(\current)
999: twnei \gpr, 0
EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
#endif

View File

@ -64,12 +64,14 @@ struct fadump_memory_range {
};
/* fadump memory ranges info */
#define RNG_NAME_SZ 16
struct fadump_mrange_info {
char name[16];
char name[RNG_NAME_SZ];
struct fadump_memory_range *mem_ranges;
u32 mem_ranges_sz;
u32 mem_range_cnt;
u32 max_mem_ranges;
bool is_static;
};
/* Platform specific callback functions */

View File

@ -23,17 +23,13 @@
#define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
#define KASAN_SHADOW_END 0UL
#define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START)
#define KASAN_SHADOW_END (-(-KASAN_SHADOW_START >> KASAN_SHADOW_SCALE_SHIFT))
#ifdef CONFIG_KASAN
void kasan_early_init(void);
void kasan_mmu_init(void);
void kasan_init(void);
#else
static inline void kasan_init(void) { }
static inline void kasan_mmu_init(void) { }
#endif
#endif /* __ASSEMBLY */

View File

@ -346,6 +346,14 @@ static int __init feat_enable_dscr(struct dt_cpu_feature *f)
{
u64 lpcr;
/*
* Linux relies on FSCR[DSCR] being clear, so that we can take the
* facility unavailable interrupt and track the task's usage of DSCR.
* See facility_unavailable_exception().
* Clear the bit here so that feat_enable() doesn't set it.
*/
f->fscr_bit_nr = -1;
feat_enable(f);
lpcr = mfspr(SPRN_LPCR);

View File

@ -38,8 +38,17 @@ static void __init fadump_reserve_crash_area(u64 base);
#ifndef CONFIG_PRESERVE_FA_DUMP
static DEFINE_MUTEX(fadump_mutex);
struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0 };
struct fadump_mrange_info reserved_mrange_info = { "reserved", NULL, 0, 0, 0 };
struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
#define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */
#define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \
sizeof(struct fadump_memory_range))
static struct fadump_memory_range rngs[RESERVED_RNGS_CNT];
struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs,
RESERVED_RNGS_SZ, 0,
RESERVED_RNGS_CNT, true };
static void __init early_init_dt_scan_reserved_ranges(unsigned long node);
#ifdef CONFIG_CMA
static struct cma *fadump_cma;
@ -108,6 +117,11 @@ static int __init fadump_cma_init(void) { return 1; }
int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
int depth, void *data)
{
if (depth == 0) {
early_init_dt_scan_reserved_ranges(node);
return 0;
}
if (depth != 1)
return 0;
@ -429,10 +443,72 @@ static int __init fadump_get_boot_mem_regions(void)
return ret;
}
/*
* Returns true, if the given range overlaps with reserved memory ranges
* starting at idx. Also, updates idx to index of overlapping memory range
* with the given memory range.
* False, otherwise.
*/
static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx)
{
bool ret = false;
int i;
for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) {
u64 rbase = reserved_mrange_info.mem_ranges[i].base;
u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size;
if (end <= rbase)
break;
if ((end > rbase) && (base < rend)) {
*idx = i;
ret = true;
break;
}
}
return ret;
}
/*
* Locate a suitable memory area to reserve memory for FADump. While at it,
* lookup reserved-ranges & avoid overlap with them, as they are used by F/W.
*/
static u64 __init fadump_locate_reserve_mem(u64 base, u64 size)
{
struct fadump_memory_range *mrngs;
phys_addr_t mstart, mend;
int idx = 0;
u64 i, ret = 0;
mrngs = reserved_mrange_info.mem_ranges;
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
&mstart, &mend, NULL) {
pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n",
i, mstart, mend, base);
if (mstart > base)
base = PAGE_ALIGN(mstart);
while ((mend > base) && ((mend - base) >= size)) {
if (!overlaps_reserved_ranges(base, base+size, &idx)) {
ret = base;
goto out;
}
base = mrngs[idx].base + mrngs[idx].size;
base = PAGE_ALIGN(base);
}
}
out:
return ret;
}
int __init fadump_reserve_mem(void)
{
u64 base, size, mem_boundary, bootmem_min, align = PAGE_SIZE;
bool is_memblock_bottom_up = memblock_bottom_up();
u64 base, size, mem_boundary, bootmem_min;
int ret = 1;
if (!fw_dump.fadump_enabled)
@ -453,9 +529,9 @@ int __init fadump_reserve_mem(void)
PAGE_ALIGN(fadump_calculate_reserve_size());
#ifdef CONFIG_CMA
if (!fw_dump.nocma) {
align = FADUMP_CMA_ALIGNMENT;
fw_dump.boot_memory_size =
ALIGN(fw_dump.boot_memory_size, align);
ALIGN(fw_dump.boot_memory_size,
FADUMP_CMA_ALIGNMENT);
}
#endif
@ -523,13 +599,9 @@ int __init fadump_reserve_mem(void)
* Reserve memory at an offset closer to bottom of the RAM to
* minimize the impact of memory hot-remove operation.
*/
memblock_set_bottom_up(true);
base = memblock_find_in_range(base, mem_boundary, size, align);
base = fadump_locate_reserve_mem(base, size);
/* Restore the previous allocation mode */
memblock_set_bottom_up(is_memblock_bottom_up);
if (!base) {
if (!base || (base + size > mem_boundary)) {
pr_err("Failed to find memory chunk for reservation!\n");
goto error_out;
}
@ -726,10 +798,14 @@ void fadump_free_cpu_notes_buf(void)
static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info)
{
if (mrange_info->is_static) {
mrange_info->mem_range_cnt = 0;
return;
}
kfree(mrange_info->mem_ranges);
mrange_info->mem_ranges = NULL;
mrange_info->mem_ranges_sz = 0;
mrange_info->max_mem_ranges = 0;
memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0,
(sizeof(struct fadump_mrange_info) - RNG_NAME_SZ));
}
/*
@ -786,6 +862,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) {
int ret;
if (mrange_info->is_static) {
pr_err("Reached array size limit for %s memory ranges\n",
mrange_info->name);
return -ENOSPC;
}
ret = fadump_alloc_mem_ranges(mrange_info);
if (ret)
return ret;
@ -1202,20 +1284,19 @@ static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
* Scan reserved-ranges to consider them while reserving/releasing
* memory for FADump.
*/
static inline int fadump_scan_reserved_mem_ranges(void)
static void __init early_init_dt_scan_reserved_ranges(unsigned long node)
{
struct device_node *root;
const __be32 *prop;
int len, ret = -1;
unsigned long i;
root = of_find_node_by_path("/");
if (!root)
return ret;
/* reserved-ranges already scanned */
if (reserved_mrange_info.mem_range_cnt != 0)
return;
prop = of_get_property(root, "reserved-ranges", &len);
prop = of_get_flat_dt_prop(node, "reserved-ranges", &len);
if (!prop)
return ret;
return;
/*
* Each reserved range is an (address,size) pair, 2 cells each,
@ -1237,7 +1318,8 @@ static inline int fadump_scan_reserved_mem_ranges(void)
}
}
return ret;
/* Compact reserved ranges */
sort_and_merge_mem_ranges(&reserved_mrange_info);
}
/*
@ -1251,32 +1333,21 @@ static void fadump_release_memory(u64 begin, u64 end)
u64 ra_start, ra_end, tstart;
int i, ret;
fadump_scan_reserved_mem_ranges();
ra_start = fw_dump.reserve_dump_area_start;
ra_end = ra_start + fw_dump.reserve_dump_area_size;
/*
* Add reserved dump area to reserved ranges list
* and exclude all these ranges while releasing memory.
* If reserved ranges array limit is hit, overwrite the last reserved
* memory range with reserved dump area to ensure it is excluded from
* the memory being released (reused for next FADump registration).
*/
ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
if (ret != 0) {
/*
* Not enough memory to setup reserved ranges but the system is
* running shortage of memory. So, release all the memory except
* Reserved dump area (reused for next fadump registration).
*/
if (begin < ra_end && end > ra_start) {
if (begin < ra_start)
fadump_release_reserved_area(begin, ra_start);
if (end > ra_end)
fadump_release_reserved_area(ra_end, end);
} else
fadump_release_reserved_area(begin, end);
if (reserved_mrange_info.mem_range_cnt ==
reserved_mrange_info.max_mem_ranges)
reserved_mrange_info.mem_range_cnt--;
ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
if (ret != 0)
return;
}
/* Get the reserved ranges list in order first. */
sort_and_merge_mem_ranges(&reserved_mrange_info);

View File

@ -685,6 +685,23 @@ static void __init tm_init(void)
static void tm_init(void) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_PPC64
static void __init save_fscr_to_task(void)
{
/*
* Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we
* have configured via the device tree features or via __init_FSCR().
* That value will then be propagated to pid 1 (init) and all future
* processes.
*/
if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
init_task.thread.fscr = mfspr(SPRN_FSCR);
}
#else
static inline void save_fscr_to_task(void) {};
#endif
void __init early_init_devtree(void *params)
{
phys_addr_t limit;
@ -773,6 +790,8 @@ void __init early_init_devtree(void *params)
BUG();
}
save_fscr_to_task();
#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
/* We'll later wait for secondaries to check in; there are
* NCPUS-1 non-boot CPUs :-)

View File

@ -175,8 +175,6 @@ void __init MMU_init(void)
btext_unmap();
#endif
kasan_mmu_init();
setup_kup();
/* Shortly after that, the entire linear mapping will be available */

View File

@ -129,7 +129,7 @@ static void __init kasan_remap_early_shadow_ro(void)
flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
}
void __init kasan_mmu_init(void)
static void __init kasan_mmu_init(void)
{
int ret;
struct memblock_region *reg;
@ -156,6 +156,8 @@ void __init kasan_mmu_init(void)
void __init kasan_init(void)
{
kasan_mmu_init();
kasan_remap_early_shadow_ro();
clear_page(kasan_early_shadow_page);

View File

@ -207,7 +207,7 @@ void mark_initmem_nx(void)
unsigned long numpages = PFN_UP((unsigned long)_einittext) -
PFN_DOWN((unsigned long)_sinittext);
if (v_block_mapped((unsigned long)_stext + 1))
if (v_block_mapped((unsigned long)_sinittext))
mmu_mark_initmem_nx();
else
change_page_attr(page, numpages, PAGE_KERNEL);
@ -219,7 +219,7 @@ void mark_rodata_ro(void)
struct page *page;
unsigned long numpages;
if (v_block_mapped((unsigned long)_sinittext)) {
if (v_block_mapped((unsigned long)_stext + 1)) {
mmu_mark_rodata_ro();
ptdump_check_wx();
return;

View File

@ -1978,8 +1978,9 @@ static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
int ret;
struct spu_context *ctx = file->private_data;
u32 stat, data;
int ret;
if (!access_ok(buf, len))
return -EFAULT;
@ -1988,11 +1989,16 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
ret = __spufs_mbox_info_read(ctx, buf, len, pos);
stat = ctx->csa.prob.mb_stat_R;
data = ctx->csa.prob.pu_mb_R;
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
return ret;
/* EOF if there's no entry in the mbox */
if (!(stat & 0x0000ff))
return 0;
return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
}
static const struct file_operations spufs_mbox_info_fops = {
@ -2019,6 +2025,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
u32 stat, data;
int ret;
if (!access_ok(buf, len))
@ -2028,11 +2035,16 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
ret = __spufs_ibox_info_read(ctx, buf, len, pos);
stat = ctx->csa.prob.mb_stat_R;
data = ctx->csa.priv2.puint_mb_R;
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
return ret;
/* EOF if there's no entry in the ibox */
if (!(stat & 0xff0000))
return 0;
return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
}
static const struct file_operations spufs_ibox_info_fops = {
@ -2041,6 +2053,11 @@ static const struct file_operations spufs_ibox_info_fops = {
.llseek = generic_file_llseek,
};
static size_t spufs_wbox_info_cnt(struct spu_context *ctx)
{
return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32);
}
static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
char __user *buf, size_t len, loff_t *pos)
{
@ -2049,7 +2066,7 @@ static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
u32 wbox_stat;
wbox_stat = ctx->csa.prob.mb_stat_R;
cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
cnt = spufs_wbox_info_cnt(ctx);
for (i = 0; i < cnt; i++) {
data[i] = ctx->csa.spu_mailbox_data[i];
}
@ -2062,7 +2079,8 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
int ret;
u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)];
int ret, count;
if (!access_ok(buf, len))
return -EFAULT;
@ -2071,11 +2089,13 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
ret = __spufs_wbox_info_read(ctx, buf, len, pos);
count = spufs_wbox_info_cnt(ctx);
memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data));
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
return ret;
return simple_read_from_buffer(buf, len, pos, &data,
count * sizeof(u32));
}
static const struct file_operations spufs_wbox_info_fops = {
@ -2084,27 +2104,33 @@ static const struct file_operations spufs_wbox_info_fops = {
.llseek = generic_file_llseek,
};
static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
char __user *buf, size_t len, loff_t *pos)
static void spufs_get_dma_info(struct spu_context *ctx,
struct spu_dma_info *info)
{
struct spu_dma_info info;
struct mfc_cq_sr *qp, *spuqp;
int i;
info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
info->dma_info_status = ctx->csa.spu_chnldata_RW[24];
info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
for (i = 0; i < 16; i++) {
qp = &info.dma_info_command_data[i];
spuqp = &ctx->csa.priv2.spuq[i];
struct mfc_cq_sr *qp = &info->dma_info_command_data[i];
struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i];
qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
}
}
static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
char __user *buf, size_t len, loff_t *pos)
{
struct spu_dma_info info;
spufs_get_dma_info(ctx, &info);
return simple_read_from_buffer(buf, len, pos, &info,
sizeof info);
@ -2114,6 +2140,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
struct spu_dma_info info;
int ret;
if (!access_ok(buf, len))
@ -2123,11 +2150,12 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
ret = __spufs_dma_info_read(ctx, buf, len, pos);
spufs_get_dma_info(ctx, &info);
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
return ret;
return simple_read_from_buffer(buf, len, pos, &info,
sizeof(info));
}
static const struct file_operations spufs_dma_info_fops = {
@ -2136,13 +2164,31 @@ static const struct file_operations spufs_dma_info_fops = {
.llseek = no_llseek,
};
static void spufs_get_proxydma_info(struct spu_context *ctx,
struct spu_proxydma_info *info)
{
int i;
info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
for (i = 0; i < 8; i++) {
struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i];
struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i];
qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
}
}
static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
char __user *buf, size_t len, loff_t *pos)
{
struct spu_proxydma_info info;
struct mfc_cq_sr *qp, *puqp;
int ret = sizeof info;
int i;
if (len < ret)
return -EINVAL;
@ -2150,18 +2196,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
if (!access_ok(buf, len))
return -EFAULT;
info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
for (i = 0; i < 8; i++) {
qp = &info.proxydma_info_command_data[i];
puqp = &ctx->csa.priv2.puq[i];
qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
}
spufs_get_proxydma_info(ctx, &info);
return simple_read_from_buffer(buf, len, pos, &info,
sizeof info);
@ -2171,17 +2206,19 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
struct spu_proxydma_info info;
int ret;
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
spufs_get_proxydma_info(ctx, &info);
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
return ret;
return simple_read_from_buffer(buf, len, pos, &info,
sizeof(info));
}
static const struct file_operations spufs_proxydma_info_fops = {

View File

@ -167,7 +167,6 @@ static void pnv_smp_cpu_kill_self(void)
/* Standard hot unplug procedure */
idle_task_exit();
current->active_mm = NULL; /* for sanity */
cpu = smp_processor_id();
DBG("CPU%d offline\n", cpu);
generic_set_cpu_dead(cpu);

View File

@ -46,82 +46,79 @@ enum sparc_regset {
REGSET_FP,
};
static int regwindow32_get(struct task_struct *target,
const struct pt_regs *regs,
u32 *uregs)
{
unsigned long reg_window = regs->u_regs[UREG_I6];
int size = 16 * sizeof(u32);
if (target == current) {
if (copy_from_user(uregs, (void __user *)reg_window, size))
return -EFAULT;
} else {
if (access_process_vm(target, reg_window, uregs, size,
FOLL_FORCE) != size)
return -EFAULT;
}
return 0;
}
static int regwindow32_set(struct task_struct *target,
const struct pt_regs *regs,
u32 *uregs)
{
unsigned long reg_window = regs->u_regs[UREG_I6];
int size = 16 * sizeof(u32);
if (target == current) {
if (copy_to_user((void __user *)reg_window, uregs, size))
return -EFAULT;
} else {
if (access_process_vm(target, reg_window, uregs, size,
FOLL_FORCE | FOLL_WRITE) != size)
return -EFAULT;
}
return 0;
}
static int genregs32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_regs *regs = target->thread.kregs;
unsigned long __user *reg_window;
unsigned long *k = kbuf;
unsigned long __user *u = ubuf;
unsigned long reg;
u32 uregs[16];
int ret;
if (target == current)
flush_user_windows();
pos /= sizeof(reg);
count /= sizeof(reg);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs->u_regs,
0, 16 * sizeof(u32));
if (ret || !count)
return ret;
if (kbuf) {
for (; count > 0 && pos < 16; count--)
*k++ = regs->u_regs[pos++];
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (get_user(*k++, &reg_window[pos++]))
return -EFAULT;
}
} else {
for (; count > 0 && pos < 16; count--) {
if (put_user(regs->u_regs[pos++], u++))
return -EFAULT;
}
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, &reg_window[pos++]) ||
put_user(reg, u++))
return -EFAULT;
}
}
while (count > 0) {
switch (pos) {
case 32: /* PSR */
reg = regs->psr;
break;
case 33: /* PC */
reg = regs->pc;
break;
case 34: /* NPC */
reg = regs->npc;
break;
case 35: /* Y */
reg = regs->y;
break;
case 36: /* WIM */
case 37: /* TBR */
reg = 0;
break;
default:
goto finish;
}
if (kbuf)
*k++ = reg;
else if (put_user(reg, u++))
if (pos < 32 * sizeof(u32)) {
if (regwindow32_get(target, regs, uregs))
return -EFAULT;
pos++;
count--;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
uregs,
16 * sizeof(u32), 32 * sizeof(u32));
if (ret || !count)
return ret;
}
finish:
pos *= sizeof(reg);
count *= sizeof(reg);
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
38 * sizeof(reg), -1);
uregs[0] = regs->psr;
uregs[1] = regs->pc;
uregs[2] = regs->npc;
uregs[3] = regs->y;
uregs[4] = 0; /* WIM */
uregs[5] = 0; /* TBR */
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
uregs,
32 * sizeof(u32), 38 * sizeof(u32));
}
static int genregs32_set(struct task_struct *target,
@ -130,82 +127,53 @@ static int genregs32_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = target->thread.kregs;
unsigned long __user *reg_window;
const unsigned long *k = kbuf;
const unsigned long __user *u = ubuf;
unsigned long reg;
u32 uregs[16];
u32 psr;
int ret;
if (target == current)
flush_user_windows();
pos /= sizeof(reg);
count /= sizeof(reg);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->u_regs,
0, 16 * sizeof(u32));
if (ret || !count)
return ret;
if (kbuf) {
for (; count > 0 && pos < 16; count--)
regs->u_regs[pos++] = *k++;
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (put_user(*k++, &reg_window[pos++]))
return -EFAULT;
}
} else {
for (; count > 0 && pos < 16; count--) {
if (get_user(reg, u++))
return -EFAULT;
regs->u_regs[pos++] = reg;
}
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, u++) ||
put_user(reg, &reg_window[pos++]))
return -EFAULT;
}
}
while (count > 0) {
unsigned long psr;
if (kbuf)
reg = *k++;
else if (get_user(reg, u++))
if (pos < 32 * sizeof(u32)) {
if (regwindow32_get(target, regs, uregs))
return -EFAULT;
switch (pos) {
case 32: /* PSR */
psr = regs->psr;
psr &= ~(PSR_ICC | PSR_SYSCALL);
psr |= (reg & (PSR_ICC | PSR_SYSCALL));
regs->psr = psr;
break;
case 33: /* PC */
regs->pc = reg;
break;
case 34: /* NPC */
regs->npc = reg;
break;
case 35: /* Y */
regs->y = reg;
break;
case 36: /* WIM */
case 37: /* TBR */
break;
default:
goto finish;
}
pos++;
count--;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
uregs,
16 * sizeof(u32), 32 * sizeof(u32));
if (ret)
return ret;
if (regwindow32_set(target, regs, uregs))
return -EFAULT;
if (!count)
return 0;
}
finish:
pos *= sizeof(reg);
count *= sizeof(reg);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&psr,
32 * sizeof(u32), 33 * sizeof(u32));
if (ret)
return ret;
regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) |
(psr & (PSR_ICC | PSR_SYSCALL));
if (!count)
return 0;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&regs->pc,
33 * sizeof(u32), 34 * sizeof(u32));
if (ret || !count)
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&regs->y,
34 * sizeof(u32), 35 * sizeof(u32));
if (ret || !count)
return ret;
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
38 * sizeof(reg), -1);
35 * sizeof(u32), 38 * sizeof(u32));
}
static int fpregs32_get(struct task_struct *target,

View File

@ -572,19 +572,13 @@ static int genregs32_get(struct task_struct *target,
for (; count > 0 && pos < 32; count--) {
if (access_process_vm(target,
(unsigned long)
&reg_window[pos],
&reg_window[pos++],
&reg, sizeof(reg),
FOLL_FORCE)
!= sizeof(reg))
return -EFAULT;
if (access_process_vm(target,
(unsigned long) u,
&reg, sizeof(reg),
FOLL_FORCE | FOLL_WRITE)
!= sizeof(reg))
if (put_user(reg, u++))
return -EFAULT;
pos++;
u++;
}
}
}
@ -684,12 +678,7 @@ static int genregs32_set(struct task_struct *target,
}
} else {
for (; count > 0 && pos < 32; count--) {
if (access_process_vm(target,
(unsigned long)
u,
&reg, sizeof(reg),
FOLL_FORCE)
!= sizeof(reg))
if (get_user(reg, u++))
return -EFAULT;
if (access_process_vm(target,
(unsigned long)

View File

@ -49,16 +49,17 @@
* Position Independent Executable (PIE) so that linker won't optimize
* R_386_GOT32X relocation to its fixed symbol address. Older
* linkers generate R_386_32 relocations against locally defined symbols,
* _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less
* _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less
* optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle
* R_386_32 relocations when relocating the kernel. To generate
* R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as
* R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as
* hidden:
*/
.hidden _bss
.hidden _ebss
.hidden _got
.hidden _egot
.hidden _end
__HEAD
ENTRY(startup_32)

View File

@ -42,6 +42,7 @@
.hidden _ebss
.hidden _got
.hidden _egot
.hidden _end
__HEAD
.code32

View File

@ -57,8 +57,10 @@ static __always_inline unsigned long smap_save(void)
{
unsigned long flags;
asm volatile (ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC,
X86_FEATURE_SMAP)
asm volatile ("# smap_save\n\t"
ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP)
"pushf; pop %0; " __ASM_CLAC "\n\t"
"1:"
: "=rm" (flags) : : "memory", "cc");
return flags;
@ -66,7 +68,10 @@ static __always_inline unsigned long smap_save(void)
static __always_inline void smap_restore(unsigned long flags)
{
asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP)
asm volatile ("# smap_restore\n\t"
ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP)
"push %0; popf\n\t"
"1:"
: : "g" (flags) : "memory", "cc");
}

View File

@ -18,10 +18,13 @@
#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
/* Protect the PCI config register pairs used for SMN and DF indirect access. */
static DEFINE_MUTEX(smn_mutex);
@ -32,6 +35,7 @@ static const struct pci_device_id amd_root_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
{}
};
@ -50,8 +54,10 @@ const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
{}
};
EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
@ -65,7 +71,9 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};

View File

@ -43,7 +43,7 @@ static int map_irq_stack(unsigned int cpu)
pages[i] = pfn_to_page(pa >> PAGE_SHIFT);
}
va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL);
if (!va)
return -ENOMEM;

View File

@ -121,8 +121,6 @@ __ref void *alloc_low_pages(unsigned int num)
} else {
pfn = pgt_buf_end;
pgt_buf_end += num;
printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n",
pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1);
}
for (i = 0; i < num; i++) {

View File

@ -1546,19 +1546,39 @@ skip_surplus_transfers:
if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
missed_ppm[READ] > ppm_rthr ||
missed_ppm[WRITE] > ppm_wthr) {
/* clearly missing QoS targets, slow down vrate */
ioc->busy_level = max(ioc->busy_level, 0);
ioc->busy_level++;
} else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
/* take action iff there is contention */
if (nr_shortages && !nr_lagging) {
/* QoS targets are being met with >25% margin */
if (nr_shortages) {
/*
* We're throttling while the device has spare
* capacity. If vrate was being slowed down, stop.
*/
ioc->busy_level = min(ioc->busy_level, 0);
/* redistribute surpluses first */
if (!nr_surpluses)
/*
* If there are IOs spanning multiple periods, wait
* them out before pushing the device harder. If
* there are surpluses, let redistribution work it
* out first.
*/
if (!nr_lagging && !nr_surpluses)
ioc->busy_level--;
} else {
/*
* Nobody is being throttled and the users aren't
* issuing enough IOs to saturate the device. We
* simply don't know how close the device is to
* saturation. Coast.
*/
ioc->busy_level = 0;
}
} else {
/* inside the hysterisis margin, we're good */
ioc->busy_level = 0;
}

View File

@ -2493,18 +2493,6 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* If the cpu isn't present, the cpu is mapped to first hctx.
*/
for_each_possible_cpu(i) {
hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_rq_map(set, hctx_idx)) {
/*
* If tags initialization fail for some hctx,
* that hctx won't be brought online. In this
* case, remap the current ctx to hctx[0] which
* is guaranteed to always have tags allocated
*/
set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0;
}
ctx = per_cpu_ptr(q->queue_ctx, i);
for (j = 0; j < set->nr_maps; j++) {
@ -2513,6 +2501,18 @@ static void blk_mq_map_swqueue(struct request_queue *q)
HCTX_TYPE_DEFAULT, i);
continue;
}
hctx_idx = set->map[j].mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_rq_map(set, hctx_idx)) {
/*
* If tags initialization fail for some hctx,
* that hctx won't be brought online. In this
* case, remap the current ctx to hctx[0] which
* is guaranteed to always have tags allocated
*/
set->map[j].mq_map[i] = 0;
}
hctx = blk_mq_map_queue_type(q, j, i);
ctx->hctxs[j] = hctx;
@ -3304,8 +3304,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
prev_nr_hw_queues = set->nr_hw_queues;
set->nr_hw_queues = nr_hw_queues;
blk_mq_update_queue_map(set);
fallback:
blk_mq_update_queue_map(set);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_realloc_hw_ctxs(set, q);
if (q->nr_hw_queues != set->nr_hw_queues) {

View File

@ -518,13 +518,20 @@ acpi_ds_create_field(union acpi_parse_object *op,
info.region_node = region_node;
status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
if (info.region_node->object->region.space_id ==
ACPI_ADR_SPACE_PLATFORM_COMM
&& !(region_node->object->field.internal_pcc_buffer =
ACPI_ALLOCATE_ZEROED(info.region_node->object->region.
length))) {
return_ACPI_STATUS(AE_NO_MEMORY);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
if (info.region_node->object->region.space_id ==
ACPI_ADR_SPACE_PLATFORM_COMM) {
region_node->object->field.internal_pcc_buffer =
ACPI_ALLOCATE_ZEROED(info.region_node->object->region.
length);
if (!region_node->object->field.internal_pcc_buffer) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
}
return_ACPI_STATUS(status);
}

View File

@ -361,6 +361,7 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
static int iort_get_id_mapping_index(struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
struct acpi_iort_pmcg *pmcg;
switch (node->type) {
case ACPI_IORT_NODE_SMMU_V3:
@ -388,6 +389,10 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node)
return smmu->id_mapping_index;
case ACPI_IORT_NODE_PMCG:
pmcg = (struct acpi_iort_pmcg *)node->node_data;
if (pmcg->overflow_gsiv || node->mapping_count == 0)
return -EINVAL;
return 0;
default:
return -EINVAL;

View File

@ -94,7 +94,7 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
trigger = p->triggering;
} else {
gsi = pext->interrupts[0];
trigger = p->triggering;
trigger = pext->triggering;
}
irq = r.start;

View File

@ -329,6 +329,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4204, "BCM2076B1" }, /* 002.002.004 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
{ 0x4606, "BCM4324B5" }, /* 002.006.006 */
{ 0x6109, "BCM4335C0" }, /* 003.001.009 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
{ 0x2122, "BCM4343A0" }, /* 001.001.034 */
@ -343,6 +344,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
};
static const struct bcm_subver_table bcm_usb_subver_table[] = {
{ 0x2105, "BCM20703A1" }, /* 001.001.005 */
{ 0x210b, "BCM43142A0" }, /* 001.001.011 */
{ 0x2112, "BCM4314A0" }, /* 001.001.018 */
{ 0x2118, "BCM20702A0" }, /* 001.001.024 */

View File

@ -1015,7 +1015,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
if (btmtkuart_is_standalone(bdev)) {
err = clk_prepare_enable(bdev->osc);
if (err < 0)
return err;
goto err_hci_free_dev;
if (bdev->boot) {
gpiod_set_value_cansleep(bdev->boot, 1);
@ -1028,10 +1028,8 @@ static int btmtkuart_probe(struct serdev_device *serdev)
/* Power on */
err = regulator_enable(bdev->vcc);
if (err < 0) {
clk_disable_unprepare(bdev->osc);
return err;
}
if (err < 0)
goto err_clk_disable_unprepare;
/* Reset if the reset-gpios is available otherwise the board
* -level design should be guaranteed.
@ -1063,7 +1061,6 @@ static int btmtkuart_probe(struct serdev_device *serdev)
err = hci_register_dev(hdev);
if (err < 0) {
dev_err(&serdev->dev, "Can't register HCI device\n");
hci_free_dev(hdev);
goto err_regulator_disable;
}
@ -1072,6 +1069,11 @@ static int btmtkuart_probe(struct serdev_device *serdev)
err_regulator_disable:
if (btmtkuart_is_standalone(bdev))
regulator_disable(bdev->vcc);
err_clk_disable_unprepare:
if (btmtkuart_is_standalone(bdev))
clk_disable_unprepare(bdev->osc);
err_hci_free_dev:
hci_free_dev(hdev);
return err;
}

View File

@ -107,6 +107,7 @@ struct bcm_device {
u32 oper_speed;
int irq;
bool irq_active_low;
bool irq_acquired;
#ifdef CONFIG_PM
struct hci_uart *hu;
@ -319,6 +320,8 @@ static int bcm_request_irq(struct bcm_data *bcm)
goto unlock;
}
bdev->irq_acquired = true;
device_init_wakeup(bdev->dev, true);
pm_runtime_set_autosuspend_delay(bdev->dev,
@ -487,7 +490,7 @@ static int bcm_close(struct hci_uart *hu)
}
if (bdev) {
if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) {
if (IS_ENABLED(CONFIG_PM) && bdev->irq_acquired) {
devm_free_irq(bdev->dev, bdev->irq, bdev);
device_init_wakeup(bdev->dev, false);
pm_runtime_disable(bdev->dev);

View File

@ -160,7 +160,7 @@ struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
spinlock_t *lock)
{
struct mtk_clk_mux *clk_mux;
struct clk_init_data init;
struct clk_init_data init = {};
struct clk *clk;
clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL);

View File

@ -222,7 +222,8 @@ static int apbt_next_event(unsigned long delta,
/**
* dw_apb_clockevent_init() - use an APB timer as a clock_event_device
*
* @cpu: The CPU the events will be targeted at.
* @cpu: The CPU the events will be targeted at or -1 if CPU affiliation
* isn't required.
* @name: The name used for the timer and the IRQ for it.
* @rating: The rating to give the timer.
* @base: I/O base for the timer registers.
@ -257,7 +258,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
dw_ced->ced.max_delta_ticks = 0x7fffffff;
dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced);
dw_ced->ced.min_delta_ticks = 5000;
dw_ced->ced.cpumask = cpumask_of(cpu);
dw_ced->ced.cpumask = cpu < 0 ? cpu_possible_mask : cpumask_of(cpu);
dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
dw_ced->ced.set_state_shutdown = apbt_shutdown;

View File

@ -147,10 +147,6 @@ static int num_called;
static int __init dw_apb_timer_init(struct device_node *timer)
{
switch (num_called) {
case 0:
pr_debug("%s: found clockevent timer\n", __func__);
add_clockevent(timer);
break;
case 1:
pr_debug("%s: found clocksource timer\n", __func__);
add_clocksource(timer);
@ -161,6 +157,8 @@ static int __init dw_apb_timer_init(struct device_node *timer)
#endif
break;
default:
pr_debug("%s: found clockevent timer\n", __func__);
add_clockevent(timer);
break;
}

View File

@ -480,7 +480,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle,
&kdev->kobj, "state%d", i);
if (ret) {
kfree(kobj);
kobject_put(&kobj->kobj);
goto error_state;
}
cpuidle_add_s2idle_attr_group(kobj);
@ -611,7 +611,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle,
&kdev->kobj, "driver");
if (ret) {
kfree(kdrv);
kobject_put(&kdrv->kobj);
return ret;
}
@ -705,7 +705,7 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
"cpuidle");
if (error) {
kfree(kdev);
kobject_put(&kdev->kobj);
return error;
}

View File

@ -10,10 +10,9 @@ config CRYPTO_DEV_CCP_DD
config CRYPTO_DEV_SP_CCP
bool "Cryptographic Coprocessor device"
default y
depends on CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_CCP_DD && DMADEVICES
select HW_RANDOM
select DMA_ENGINE
select DMADEVICES
select CRYPTO_SHA1
select CRYPTO_SHA256
help

View File

@ -2818,7 +2818,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
unsigned int c_id = a_ctx(tfm)->tx_chan_id;
unsigned int ccm_xtra;
unsigned char tag_offset = 0, auth_offset = 0;
unsigned int tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)

View File

@ -28,8 +28,10 @@
/* Registers values */
#define CRC_CR_RESET BIT(0)
#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
#define CRC_INIT_DEFAULT 0xFFFFFFFF
#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5))
#define CRC_CR_REV_IN_BYTE BIT(5)
#define CRC_CR_REV_OUT BIT(7)
#define CRC32C_INIT_DEFAULT 0xFFFFFFFF
#define CRC_AUTOSUSPEND_DELAY 50
@ -38,8 +40,6 @@ struct stm32_crc {
struct device *dev;
void __iomem *regs;
struct clk *clk;
u8 pending_data[sizeof(u32)];
size_t nb_pending_bytes;
};
struct stm32_crc_list {
@ -59,14 +59,13 @@ struct stm32_crc_ctx {
struct stm32_crc_desc_ctx {
u32 partial; /* crc32c: partial in first 4 bytes of that struct */
struct stm32_crc *crc;
};
static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
{
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = CRC_INIT_DEFAULT;
mctx->key = 0;
mctx->poly = CRC32_POLY_LE;
return 0;
}
@ -75,7 +74,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
{
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = CRC_INIT_DEFAULT;
mctx->key = CRC32C_INIT_DEFAULT;
mctx->poly = CRC32C_POLY_LE;
return 0;
}
@ -94,32 +93,42 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
return 0;
}
static struct stm32_crc *stm32_crc_get_next_crc(void)
{
struct stm32_crc *crc;
spin_lock_bh(&crc_list.lock);
crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
if (crc)
list_move_tail(&crc->list, &crc_list.dev_list);
spin_unlock_bh(&crc_list.lock);
return crc;
}
static int stm32_crc_init(struct shash_desc *desc)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
spin_lock_bh(&crc_list.lock);
list_for_each_entry(crc, &crc_list.dev_list, list) {
ctx->crc = crc;
break;
}
spin_unlock_bh(&crc_list.lock);
crc = stm32_crc_get_next_crc();
if (!crc)
return -ENODEV;
pm_runtime_get_sync(ctx->crc->dev);
pm_runtime_get_sync(crc->dev);
/* Reset, set key, poly and configure in bit reverse mode */
writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
/* Store partial result */
ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
ctx->crc->nb_pending_bytes = 0;
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
pm_runtime_mark_last_busy(ctx->crc->dev);
pm_runtime_put_autosuspend(ctx->crc->dev);
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
return 0;
}
@ -128,31 +137,49 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
unsigned int length)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc *crc = ctx->crc;
u32 *d32;
unsigned int i;
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
crc = stm32_crc_get_next_crc();
if (!crc)
return -ENODEV;
pm_runtime_get_sync(crc->dev);
if (unlikely(crc->nb_pending_bytes)) {
while (crc->nb_pending_bytes != sizeof(u32) && length) {
/* Fill in pending data */
crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
/*
* Restore previously calculated CRC for this context as init value
* Restore polynomial configuration
* Configure in register for word input data,
* Configure out register in reversed bit mode data.
*/
writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
if (d8 != PTR_ALIGN(d8, sizeof(u32))) {
/* Configure for byte data */
writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) {
writeb_relaxed(*d8++, crc->regs + CRC_DR);
length--;
}
if (crc->nb_pending_bytes == sizeof(u32)) {
/* Process completed pending data */
writel_relaxed(*(u32 *)crc->pending_data,
crc->regs + CRC_DR);
crc->nb_pending_bytes = 0;
}
/* Configure for word data */
writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
}
d32 = (u32 *)d8;
for (i = 0; i < length >> 2; i++)
/* Process 32 bits data */
writel_relaxed(*(d32++), crc->regs + CRC_DR);
for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32))
writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR);
if (length) {
/* Configure for byte data */
writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
while (length--)
writeb_relaxed(*d8++, crc->regs + CRC_DR);
}
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
@ -160,22 +187,6 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
/* Check for pending data (non 32 bits) */
length &= 3;
if (likely(!length))
return 0;
if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
/* Shall not happen */
dev_err(crc->dev, "Pending data overflow\n");
return -EINVAL;
}
d8 = (const u8 *)d32;
for (i = 0; i < length; i++)
/* Store pending data */
crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
return 0;
}
@ -204,6 +215,8 @@ static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
}
static unsigned int refcnt;
static DEFINE_MUTEX(refcnt_lock);
static struct shash_alg algs[] = {
/* CRC-32 */
{
@ -294,12 +307,18 @@ static int stm32_crc_probe(struct platform_device *pdev)
list_add(&crc->list, &crc_list.dev_list);
spin_unlock(&crc_list.lock);
ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
if (ret) {
dev_err(dev, "Failed to register\n");
clk_disable_unprepare(crc->clk);
return ret;
mutex_lock(&refcnt_lock);
if (!refcnt) {
ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
if (ret) {
mutex_unlock(&refcnt_lock);
dev_err(dev, "Failed to register\n");
clk_disable_unprepare(crc->clk);
return ret;
}
}
refcnt++;
mutex_unlock(&refcnt_lock);
dev_info(dev, "Initialized\n");
@ -320,7 +339,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
list_del(&crc->list);
spin_unlock(&crc_list.lock);
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
mutex_lock(&refcnt_lock);
if (!--refcnt)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
mutex_unlock(&refcnt_lock);
pm_runtime_disable(crc->dev);
pm_runtime_put_noidle(crc->dev);

View File

@ -2317,6 +2317,15 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M60H_CPUS] = {
.ctl_name = "F17h_M60h",
.f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M70H_CPUS] = {
.ctl_name = "F17h_M70h",
.f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
@ -3366,6 +3375,10 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
fam_type = &family_types[F17_M30H_CPUS];
pvt->ops = &family_types[F17_M30H_CPUS].ops;
break;
} else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
fam_type = &family_types[F17_M60H_CPUS];
pvt->ops = &family_types[F17_M60H_CPUS].ops;
break;
} else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
fam_type = &family_types[F17_M70H_CPUS];
pvt->ops = &family_types[F17_M70H_CPUS].ops;

View File

@ -120,6 +120,8 @@
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F0 0x1448
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F6 0x144e
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
@ -291,6 +293,7 @@ enum amd_families {
F17_CPUS,
F17_M10H_CPUS,
F17_M30H_CPUS,
F17_M60H_CPUS,
F17_M70H_CPUS,
NUM_FAMILIES,
};

View File

@ -28,6 +28,7 @@ KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
-D__NO_FORTIFY \
$(call cc-option,-ffreestanding) \
$(call cc-option,-fno-stack-protector) \
$(call cc-option,-fno-addrsig) \
-D__DISABLE_EXPORTS
# disable LTO

View File

@ -439,14 +439,18 @@ static int sirf_probe(struct serdev_device *serdev)
data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff",
GPIOD_OUT_LOW);
if (IS_ERR(data->on_off))
if (IS_ERR(data->on_off)) {
ret = PTR_ERR(data->on_off);
goto err_put_device;
}
if (data->on_off) {
data->wakeup = devm_gpiod_get_optional(dev, "sirf,wakeup",
GPIOD_IN);
if (IS_ERR(data->wakeup))
if (IS_ERR(data->wakeup)) {
ret = PTR_ERR(data->wakeup);
goto err_put_device;
}
ret = regulator_enable(data->vcc);
if (ret)

View File

@ -161,16 +161,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_bo_list_entry vm_pd;
struct list_head list, duplicates;
struct dma_fence *fence = NULL;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
int r;
long r;
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
tv.num_shared = 1;
tv.num_shared = 2;
list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
@ -178,28 +179,34 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
"we fail to reserve bo (%ld)\n", r);
return;
}
bo_va = amdgpu_vm_bo_find(vm, bo);
if (bo_va && --bo_va->ref_count == 0) {
amdgpu_vm_bo_rmv(adev, bo_va);
if (!bo_va || --bo_va->ref_count)
goto out_unlock;
if (amdgpu_vm_ready(vm)) {
struct dma_fence *fence = NULL;
amdgpu_vm_bo_rmv(adev, bo_va);
if (!amdgpu_vm_ready(vm))
goto out_unlock;
r = amdgpu_vm_clear_freed(adev, vm, &fence);
if (unlikely(r)) {
dev_err(adev->dev, "failed to clear page "
"tables on GEM object close (%d)\n", r);
}
if (fence) {
amdgpu_bo_fence(bo, fence, true);
dma_fence_put(fence);
}
}
fence = dma_resv_get_excl(bo->tbo.base.resv);
if (fence) {
amdgpu_bo_fence(bo, fence, true);
fence = NULL;
}
r = amdgpu_vm_clear_freed(adev, vm, &fence);
if (r || !fence)
goto out_unlock;
amdgpu_bo_fence(bo, fence, true);
dma_fence_put(fence);
out_unlock:
if (unlikely(r < 0))
dev_err(adev->dev, "failed to clear page "
"tables on GEM object close (%ld)\n", r);
ttm_eu_backoff_reservation(&ticket, &list);
}

View File

@ -370,6 +370,15 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
if (current_level == level)
return count;
if (adev->asic_type == CHIP_RAVEN) {
if (adev->rev_id < 8) {
if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
amdgpu_gfx_off_ctrl(adev, false);
else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
amdgpu_gfx_off_ctrl(adev, true);
}
}
/* profile_exit setting is valid only when current mode is in profile mode */
if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
@ -416,8 +425,11 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
ret = smu_get_power_num_states(&adev->smu, &data);
if (ret)
return ret;
} else if (adev->powerplay.pp_funcs->get_pp_num_states)
} else if (adev->powerplay.pp_funcs->get_pp_num_states) {
amdgpu_dpm_get_pp_num_states(adev, &data);
} else {
memset(&data, 0, sizeof(data));
}
buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
for (i = 0; i < data.nums; i++)

View File

@ -2867,10 +2867,17 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
if (vm->use_cpu_for_update)
if (vm->use_cpu_for_update) {
/* Sync with last SDMA update/clear before switching to CPU */
r = amdgpu_bo_sync_wait(vm->root.base.bo,
AMDGPU_FENCE_OWNER_UNDEFINED, true);
if (r)
goto free_idr;
vm->update_funcs = &amdgpu_vm_cpu_funcs;
else
} else {
vm->update_funcs = &amdgpu_vm_sdma_funcs;
}
dma_fence_put(vm->last_update);
vm->last_update = NULL;

View File

@ -19,13 +19,15 @@ static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
{
switch (fs) {
case 32000:
*n = 4096;
case 48000:
case 96000:
case 192000:
*n = fs * 128 / 1000;
break;
case 44100:
*n = 6272;
break;
case 48000:
*n = 6144;
case 88200:
case 176400:
*n = fs * 128 / 900;
break;
}

View File

@ -940,10 +940,9 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
panel = NULL;
bridge = of_drm_find_bridge(child);
if (IS_ERR(bridge)) {
dev_err(dev, "failed to find bridge (%ld)\n",
PTR_ERR(bridge));
return PTR_ERR(bridge);
if (!bridge) {
dev_err(dev, "failed to find bridge\n");
return -EINVAL;
}
}
}

View File

@ -10,7 +10,9 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/types.h>
@ -73,6 +75,9 @@ struct mtk_dpi {
enum mtk_dpi_out_yc_map yc_map;
enum mtk_dpi_out_bit_num bit_num;
enum mtk_dpi_out_channel_swap channel_swap;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_gpio;
struct pinctrl_state *pins_dpi;
int refcount;
};
@ -378,6 +383,9 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi)
if (--dpi->refcount != 0)
return;
if (dpi->pinctrl && dpi->pins_gpio)
pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
mtk_dpi_disable(dpi);
clk_disable_unprepare(dpi->pixel_clk);
clk_disable_unprepare(dpi->engine_clk);
@ -402,6 +410,9 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
goto err_pixel;
}
if (dpi->pinctrl && dpi->pins_dpi)
pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
mtk_dpi_enable(dpi);
return 0;
@ -689,6 +700,26 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dpi->dev = dev;
dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev);
dpi->pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(dpi->pinctrl)) {
dpi->pinctrl = NULL;
dev_dbg(&pdev->dev, "Cannot find pinctrl!\n");
}
if (dpi->pinctrl) {
dpi->pins_gpio = pinctrl_lookup_state(dpi->pinctrl, "sleep");
if (IS_ERR(dpi->pins_gpio)) {
dpi->pins_gpio = NULL;
dev_dbg(&pdev->dev, "Cannot find pinctrl idle!\n");
}
if (dpi->pins_gpio)
pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
dpi->pins_dpi = pinctrl_lookup_state(dpi->pinctrl, "default");
if (IS_ERR(dpi->pins_dpi)) {
dpi->pins_dpi = NULL;
dev_dbg(&pdev->dev, "Cannot find pinctrl active!\n");
}
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dpi->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(dpi->regs)) {

View File

@ -785,13 +785,15 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
drm_plane_create_alpha_property(&plane->plane);
if (type == DRM_PLANE_TYPE_PRIMARY)
continue;
drm_object_attach_property(&plane->plane.base,
rcdu->props.colorkey,
RCAR_DU_COLORKEY_NONE);
drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
if (type == DRM_PLANE_TYPE_PRIMARY) {
drm_plane_create_zpos_immutable_property(&plane->plane,
0);
} else {
drm_object_attach_property(&plane->plane.base,
rcdu->props.colorkey,
RCAR_DU_COLORKEY_NONE);
drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
}
}
return 0;

View File

@ -392,12 +392,14 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
drm_plane_helper_add(&plane->plane,
&rcar_du_vsp_plane_helper_funcs);
if (type == DRM_PLANE_TYPE_PRIMARY)
continue;
drm_plane_create_alpha_property(&plane->plane);
drm_plane_create_zpos_property(&plane->plane, 1, 1,
vsp->num_planes - 1);
if (type == DRM_PLANE_TYPE_PRIMARY) {
drm_plane_create_zpos_immutable_property(&plane->plane,
0);
} else {
drm_plane_create_alpha_property(&plane->plane);
drm_plane_create_zpos_property(&plane->plane, 1, 1,
vsp->num_planes - 1);
}
}
return 0;

View File

@ -67,7 +67,6 @@ static __u32 vmbus_get_next_version(__u32 current_version)
int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
{
int ret = 0;
unsigned int cur_cpu;
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
@ -100,24 +99,7 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
/*
* We want all channel messages to be delivered on CPU 0.
* This has been the behavior pre-win8. This is not
* perf issue and having all channel messages delivered on CPU 0
* would be ok.
* For post win8 hosts, we support receiving channel messagges on
* all the CPUs. This is needed for kexec to work correctly where
* the CPU attempting to connect may not be CPU 0.
*/
if (version >= VERSION_WIN8_1) {
cur_cpu = get_cpu();
msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
vmbus_connection.connect_cpu = cur_cpu;
put_cpu();
} else {
msg->target_vcpu = 0;
vmbus_connection.connect_cpu = 0;
}
msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
/*
* Add to list before we send the request since we may

View File

@ -249,6 +249,13 @@ int hv_synic_cleanup(unsigned int cpu)
bool channel_found = false;
unsigned long flags;
/*
* Hyper-V does not provide a way to change the connect CPU once
* it is set; we must prevent the connect CPU from going offline.
*/
if (cpu == VMBUS_CONNECT_CPU)
return -EBUSY;
/*
* Search for channels which are bound to the CPU we're about to
* cleanup. In case we find one and vmbus is still connected we need to

View File

@ -212,12 +212,13 @@ enum vmbus_connect_state {
#define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT
struct vmbus_connection {
/*
* CPU on which the initial host contact was made.
*/
int connect_cpu;
/*
* The CPU that Hyper-V will interrupt for VMBUS messages, such as
* CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER.
*/
#define VMBUS_CONNECT_CPU 0
struct vmbus_connection {
u32 msg_conn_id;
atomic_t offer_in_progress;

View File

@ -1092,14 +1092,28 @@ void vmbus_on_msg_dpc(unsigned long data)
/*
* If we are handling the rescind message;
* schedule the work on the global work queue.
*
* The OFFER message and the RESCIND message should
* not be handled by the same serialized work queue,
* because the OFFER handler may call vmbus_open(),
* which tries to open the channel by sending an
* OPEN_CHANNEL message to the host and waits for
* the host's response; however, if the host has
* rescinded the channel before it receives the
* OPEN_CHANNEL message, the host just silently
* ignores the OPEN_CHANNEL message; as a result,
* the guest's OFFER handler hangs for ever, if we
* handle the RESCIND message in the same serialized
* work queue: the RESCIND handler can not start to
* run before the OFFER handler finishes.
*/
schedule_work_on(vmbus_connection.connect_cpu,
schedule_work_on(VMBUS_CONNECT_CPU,
&ctx->work);
break;
case CHANNELMSG_OFFERCHANNEL:
atomic_inc(&vmbus_connection.offer_in_progress);
queue_work_on(vmbus_connection.connect_cpu,
queue_work_on(VMBUS_CONNECT_CPU,
vmbus_connection.work_queue,
&ctx->work);
break;
@ -1146,7 +1160,7 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
INIT_WORK(&ctx->work, vmbus_onmessage_work);
queue_work_on(vmbus_connection.connect_cpu,
queue_work_on(VMBUS_CONNECT_CPU,
vmbus_connection.work_queue,
&ctx->work);
}

View File

@ -349,6 +349,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}

View File

@ -132,14 +132,6 @@ static int create_cpu_loop(int cpu)
s32 tmax;
int fmin;
/* Get PID params from the appropriate SAT */
hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL);
if (hdr == NULL) {
printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n");
return -EINVAL;
}
piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
/* Get FVT params to get Tmax; if not found, assume default */
hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL);
if (hdr) {
@ -152,6 +144,16 @@ static int create_cpu_loop(int cpu)
if (tmax < cpu_all_tmax)
cpu_all_tmax = tmax;
kfree(hdr);
/* Get PID params from the appropriate SAT */
hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL);
if (hdr == NULL) {
printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n");
return -EINVAL;
}
piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
/*
* Darwin has a minimum fan speed of 1000 rpm for the 4-way and
* 515 for the 2-way. That appears to be overkill, so for now,
@ -174,6 +176,9 @@ static int create_cpu_loop(int cpu)
pid.min = fmin;
wf_cpu_pid_init(&cpu_pid[cpu], &pid);
kfree(hdr);
return 0;
}

View File

@ -789,7 +789,9 @@ static void bcache_device_free(struct bcache_device *d)
bcache_device_detach(d);
if (disk) {
if (disk->flags & GENHD_FL_UP)
bool disk_added = (disk->flags & GENHD_FL_UP) != 0;
if (disk_added)
del_gendisk(disk);
if (disk->queue)
@ -797,7 +799,8 @@ static void bcache_device_free(struct bcache_device *d)
ida_simple_remove(&bcache_device_idx,
first_minor_to_idx(disk->first_minor));
put_disk(disk);
if (disk_added)
put_disk(disk);
}
bioset_exit(&d->bio_split);

View File

@ -2957,7 +2957,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->max_segment_size = PAGE_SIZE;
limits->logical_block_size =
max_t(unsigned short, limits->logical_block_size, cc->sector_size);
max_t(unsigned, limits->logical_block_size, cc->sector_size);
limits->physical_block_size =
max_t(unsigned, limits->physical_block_size, cc->sector_size);
limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);

View File

@ -7607,7 +7607,8 @@ static int md_open(struct block_device *bdev, fmode_t mode)
*/
mddev_put(mddev);
/* Wait until bdev->bd_disk is definitely gone */
flush_workqueue(md_misc_wq);
if (work_pending(&mddev->del_work))
flush_workqueue(md_misc_wq);
/* Then retry the open from the top */
return -ERESTARTSYS;
}

View File

@ -2228,14 +2228,19 @@ static int grow_stripes(struct r5conf *conf, int num)
* of the P and Q blocks.
*/
static int scribble_alloc(struct raid5_percpu *percpu,
int num, int cnt, gfp_t flags)
int num, int cnt)
{
size_t obj_size =
sizeof(struct page *) * (num+2) +
sizeof(addr_conv_t) * (num+2);
void *scribble;
scribble = kvmalloc_array(cnt, obj_size, flags);
/*
* If here is in raid array suspend context, it is in memalloc noio
* context as well, there is no potential recursive memory reclaim
* I/Os with the GFP_KERNEL flag.
*/
scribble = kvmalloc_array(cnt, obj_size, GFP_KERNEL);
if (!scribble)
return -ENOMEM;
@ -2267,8 +2272,7 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
percpu = per_cpu_ptr(conf->percpu, cpu);
err = scribble_alloc(percpu, new_disks,
new_sectors / STRIPE_SECTORS,
GFP_NOIO);
new_sectors / STRIPE_SECTORS);
if (err)
break;
}
@ -6765,8 +6769,7 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
conf->previous_raid_disks),
max(conf->chunk_sectors,
conf->prev_chunk_sectors)
/ STRIPE_SECTORS,
GFP_KERNEL)) {
/ STRIPE_SECTORS)) {
free_scratch_buffer(conf, percpu);
return -ENOMEM;
}

View File

@ -1732,6 +1732,10 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
unsigned j;
log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
dprintk(1, "unknown logical address type\n");
return -EINVAL;
}
if (type_mask & (1 << log_addrs->log_addr_type[i])) {
dprintk(1, "duplicate logical address type\n");
return -EINVAL;
@ -1752,10 +1756,6 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
dprintk(1, "invalid primary device type\n");
return -EINVAL;
}
if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
dprintk(1, "unknown logical address type\n");
return -EINVAL;
}
for (j = 0; j < feature_sz; j++) {
if ((features[j] & 0x80) == 0) {
if (op_is_dev_features)

View File

@ -707,9 +707,10 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
}
if (ntuner && ndemod) {
pad_source = media_get_pad_index(tuner, true,
/* NOTE: first found tuner source pad presumed correct */
pad_source = media_get_pad_index(tuner, false,
PAD_SIGNAL_ANALOG);
if (pad_source)
if (pad_source < 0)
return -EINVAL;
ret = media_create_pad_links(mdev,
MEDIA_ENT_F_TUNER,

View File

@ -3068,8 +3068,8 @@ static int ov5640_probe(struct i2c_client *client)
free_ctrls:
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
entity_cleanup:
mutex_destroy(&sensor->lock);
media_entity_cleanup(&sensor->sd.entity);
mutex_destroy(&sensor->lock);
return ret;
}
@ -3079,9 +3079,9 @@ static int ov5640_remove(struct i2c_client *client)
struct ov5640_dev *sensor = to_ov5640_dev(sd);
v4l2_async_unregister_subdev(&sensor->sd);
mutex_destroy(&sensor->lock);
media_entity_cleanup(&sensor->sd.entity);
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
mutex_destroy(&sensor->lock);
return 0;
}

View File

@ -8,6 +8,7 @@
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@ -21,6 +22,7 @@
struct rcar_fcp_device {
struct list_head list;
struct device *dev;
struct device_dma_parameters dma_parms;
};
static LIST_HEAD(fcp_devices);
@ -136,6 +138,9 @@ static int rcar_fcp_probe(struct platform_device *pdev)
fcp->dev = &pdev->dev;
fcp->dev->dma_parms = &fcp->dma_parms;
dma_set_max_seg_size(fcp->dev, DMA_BIT_MASK(32));
pm_runtime_enable(&pdev->dev);
mutex_lock(&fcp_lock);

View File

@ -2172,16 +2172,19 @@ static int vicodec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
if (register_instance(dev, &dev->stateful_enc,
"stateful-encoder", true))
ret = register_instance(dev, &dev->stateful_enc, "stateful-encoder",
true);
if (ret)
goto unreg_dev;
if (register_instance(dev, &dev->stateful_dec,
"stateful-decoder", false))
ret = register_instance(dev, &dev->stateful_dec, "stateful-decoder",
false);
if (ret)
goto unreg_sf_enc;
if (register_instance(dev, &dev->stateless_dec,
"stateless-decoder", false))
ret = register_instance(dev, &dev->stateless_dec, "stateless-decoder",
false);
if (ret)
goto unreg_sf_dec;
#ifdef CONFIG_MEDIA_CONTROLLER

View File

@ -75,24 +75,23 @@ static int si2157_init(struct dvb_frontend *fe)
struct si2157_cmd cmd;
const struct firmware *fw;
const char *fw_name;
unsigned int uitmp, chip_id;
unsigned int chip_id, xtal_trim;
dev_dbg(&client->dev, "\n");
/* Returned IF frequency is garbage when firmware is not running */
memcpy(cmd.args, "\x15\x00\x06\x07", 4);
/* Try to get Xtal trim property, to verify tuner still running */
memcpy(cmd.args, "\x15\x00\x04\x02", 4);
cmd.wlen = 4;
cmd.rlen = 4;
ret = si2157_cmd_execute(client, &cmd);
if (ret)
goto err;
uitmp = cmd.args[2] << 0 | cmd.args[3] << 8;
dev_dbg(&client->dev, "if_frequency kHz=%u\n", uitmp);
xtal_trim = cmd.args[2] | (cmd.args[3] << 8);
if (uitmp == dev->if_frequency / 1000)
if (ret == 0 && xtal_trim < 16)
goto warm;
dev->if_frequency = 0; /* we no longer know current tuner state */
/* power up */
if (dev->chiptype == SI2157_CHIPTYPE_SI2146) {
memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9);

View File

@ -81,7 +81,7 @@ static int dibusb_tuner_probe_and_attach(struct dvb_usb_adapter *adap)
if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) {
err("tuner i2c write failed.");
ret = -EREMOTEIO;
return -EREMOTEIO;
}
if (adap->fe_adap[0].fe->ops.i2c_gate_ctrl)

View File

@ -236,22 +236,18 @@ int go7007_snd_init(struct go7007 *go)
gosnd->capturing = 0;
ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0,
&gosnd->card);
if (ret < 0) {
kfree(gosnd);
return ret;
}
if (ret < 0)
goto free_snd;
ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go,
&go7007_snd_device_ops);
if (ret < 0) {
kfree(gosnd);
return ret;
}
if (ret < 0)
goto free_card;
ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm);
if (ret < 0) {
snd_card_free(gosnd->card);
kfree(gosnd);
return ret;
}
if (ret < 0)
goto free_card;
strscpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver));
strscpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->shortname));
strscpy(gosnd->card->longname, gosnd->card->shortname,
@ -262,11 +258,8 @@ int go7007_snd_init(struct go7007 *go)
&go7007_snd_capture_ops);
ret = snd_card_register(gosnd->card);
if (ret < 0) {
snd_card_free(gosnd->card);
kfree(gosnd);
return ret;
}
if (ret < 0)
goto free_card;
gosnd->substream = NULL;
go->snd_context = gosnd;
@ -274,6 +267,12 @@ int go7007_snd_init(struct go7007 *go)
++dev;
return 0;
free_card:
snd_card_free(gosnd->card);
free_snd:
kfree(gosnd);
return ret;
}
EXPORT_SYMBOL(go7007_snd_init);

View File

@ -246,6 +246,9 @@ static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host)
mrq = host->mrq;
if (host->cmd->error)
meson_mx_mmc_soft_reset(host);
host->mrq = NULL;
host->cmd = NULL;

View File

@ -87,7 +87,7 @@
#define ESDHC_STD_TUNING_EN (1 << 24)
/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
#define ESDHC_TUNING_START_TAP_DEFAULT 0x1
#define ESDHC_TUNING_START_TAP_MASK 0xff
#define ESDHC_TUNING_START_TAP_MASK 0x7f
#define ESDHC_TUNING_STEP_MASK 0x00070000
#define ESDHC_TUNING_STEP_SHIFT 16

View File

@ -1742,7 +1742,9 @@ static const struct sdhci_ops sdhci_msm_ops = {
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.ops = &sdhci_msm_ops,
};

View File

@ -319,6 +319,8 @@ struct via_crdr_mmc_host {
/* some devices need a very long delay for power to stabilize */
#define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001
#define VIA_CMD_TIMEOUT_MS 1000
static const struct pci_device_id via_ids[] = {
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
@ -551,14 +553,17 @@ static void via_sdc_send_command(struct via_crdr_mmc_host *host,
{
void __iomem *addrbase;
struct mmc_data *data;
unsigned int timeout_ms;
u32 cmdctrl = 0;
WARN_ON(host->cmd);
data = cmd->data;
mod_timer(&host->timer, jiffies + HZ);
host->cmd = cmd;
timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : VIA_CMD_TIMEOUT_MS;
mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms));
/*Command index*/
cmdctrl = cmd->opcode << 8;

View File

@ -1019,11 +1019,14 @@ static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
if (!section) {
/*
* Small-page NAND use byte 6 for BBI while large-page
* NAND use byte 0.
* NAND use bytes 0 and 1.
*/
if (cfg->page_size > 512)
oobregion->offset++;
oobregion->length--;
if (cfg->page_size > 512) {
oobregion->offset += 2;
oobregion->length -= 2;
} else {
oobregion->length--;
}
}
}

View File

@ -1609,13 +1609,10 @@ static int __init doc_probe(unsigned long physadr)
numchips = doc2001_init(mtd);
if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) {
/* DBB note: i believe nand_release is necessary here, as
/* DBB note: i believe nand_cleanup is necessary here, as
buffers may have been allocated in nand_base. Check with
Thomas. FIX ME! */
/* nand_release will call mtd_device_unregister, but we
haven't yet added it. This is handled without incident by
mtd_device_unregister, as far as I can tell. */
nand_release(nand);
nand_cleanup(nand);
goto fail;
}

Some files were not shown because too many files have changed in this diff Show More