This is the 5.4.235 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIyBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmQModwACgkQONu9yGCS
 aT7W7A/1EyhortcaMdZXEkdl7kZYupASsOm2QgOzeRkK0ELtbYRTt1qXdZgl40hU
 binrh5Yib2avHTEAF9I6AKVXMirSUTtODe/zQ7icyxVNcXeanlIbobEVBzSWIBtC
 Wxj129KZyCQlucagWihngQ9D+66bvD5JCsJ3EHKJjpheSqmZI88KVnOSnvyoJArj
 yLDY21UgxRN4KASgB+tpLBT4x0yN9zk8VuCGpyJjO/nHzhj6Y6DkOcx2q7hAxdn+
 H1OBCQ2QBCODCMrpW4xBuwy2blBZsRytUdEy8JsfxjgXvUp8+TdxUsuxb16a31jW
 pVo9LYB0cdKVoAzNJ2pTD8rhaATSbq+2MYDEUYCz8Rr+dZ/Nt2nTKSYeJprLsTwx
 TzPRNErQMKxKoQUQU/seWx47ebwt+Z8Rk4FAoyQMxRITw/9bBGLWpDKrGjNsByz9
 A2Q9UU+uM+jyqZnjQMvkzKSznggwfJ+SgaeqDMjwyyCQysJS8DTXPr9nA+IC9cht
 Kz00QetNgvPvZPE/gg81XOcKtJVTmA4AITQ0PlxYJT0hHCHx02GxvdPH2XBspgUt
 aNbDgVsupq8ONvRZlEf9hJKltTUmIRvI9JSOXnuhaN2jCv88SNv1M0TKfAo0XDNK
 Z/prv3qCnugMZ0KB0TD7d09XqSlKbefOq8TdtbXoTcC0NzFQkw==
 =29jZ
 -----END PGP SIGNATURE-----

Merge 5.4.235 into android11-5.4-lts

Changes in 5.4.235
	HID: asus: Remove check for same LED brightness on set
	HID: asus: use spinlock to protect concurrent accesses
	HID: asus: use spinlock to safely schedule workers
	ARM: OMAP2+: Fix memory leak in realtime_counter_init()
	arm64: dts: qcom: qcs404: use symbol names for PCIe resets
	ARM: zynq: Fix refcount leak in zynq_early_slcr_init
	arm64: dts: meson-gx: Fix Ethernet MAC address unit name
	arm64: dts: meson-g12a: Fix internal Ethernet PHY unit name
	arm64: dts: meson-gx: Fix the SCPI DVFS node name and unit address
	arm64: dts: meson: remove CPU opps below 1GHz for G12A boards
	ARM: OMAP1: call platform_device_put() in error case in omap1_dm_timer_init()
	ARM: dts: exynos: correct wr-active property in Exynos3250 Rinato
	ARM: imx: Call ida_simple_remove() for ida_simple_get
	arm64: dts: amlogic: meson-gx: fix SCPI clock dvfs node name
	arm64: dts: amlogic: meson-axg: fix SCPI clock dvfs node name
	arm64: dts: amlogic: meson-gx: add missing SCPI sensors compatible
	arm64: dts: amlogic: meson-gx: add missing unit address to rng node name
	arm64: dts: amlogic: meson-gxl: add missing unit address to eth-phy-mux node name
	arm64: dts: amlogic: meson-gxl-s905d-phicomm-n1: fix led node name
	ARM: dts: imx7s: correct iomuxc gpr mux controller cells
	arm64: dts: mediatek: mt7622: Add missing pwm-cells to pwm node
	Revert "scsi: core: run queue if SCSI device queue isn't ready and queue is idle"
	block: Limit number of items taken from the I/O scheduler in one go
	blk-mq: remove stale comment for blk_mq_sched_mark_restart_hctx
	blk-mq: wait on correct sbitmap_queue in blk_mq_mark_tag_wait
	blk-mq: correct stale comment of .get_budget
	s390/dasd: Prepare for additional path event handling
	s390/dasd: Fix potential memleak in dasd_eckd_init()
	sched/deadline,rt: Remove unused parameter from pick_next_[rt|dl]_entity()
	sched/rt: pick_next_rt_entity(): check list_entry
	block: bio-integrity: Copy flags when bio_integrity_payload is cloned
	wifi: rsi: Fix memory leak in rsi_coex_attach()
	net/wireless: Delete unnecessary checks before the macro call “dev_kfree_skb”
	wifi: iwlegacy: common: don't call dev_kfree_skb() under spin_lock_irqsave()
	wifi: libertas: fix memory leak in lbs_init_adapter()
	wifi: rtl8xxxu: don't call dev_kfree_skb() under spin_lock_irqsave()
	rtlwifi: fix -Wpointer-sign warning
	wifi: rtlwifi: Fix global-out-of-bounds bug in _rtl8812ae_phy_set_txpower_limit()
	ipw2x00: switch from 'pci_' to 'dma_' API
	wifi: ipw2x00: don't call dev_kfree_skb() under spin_lock_irqsave()
	wifi: ipw2200: fix memory leak in ipw_wdev_init()
	wilc1000: let wilc_mac_xmit() return NETDEV_TX_OK
	wifi: wilc1000: fix potential memory leak in wilc_mac_xmit()
	wifi: brcmfmac: fix potential memory leak in brcmf_netdev_start_xmit()
	wifi: brcmfmac: unmap dma buffer in brcmf_msgbuf_alloc_pktid()
	wifi: libertas_tf: don't call kfree_skb() under spin_lock_irqsave()
	wifi: libertas: if_usb: don't call kfree_skb() under spin_lock_irqsave()
	wifi: libertas: main: don't call kfree_skb() under spin_lock_irqsave()
	wifi: libertas: cmdresp: don't call kfree_skb() under spin_lock_irqsave()
	wifi: wl3501_cs: don't call kfree_skb() under spin_lock_irqsave()
	crypto: x86/ghash - fix unaligned access in ghash_setkey()
	ACPICA: Drop port I/O validation for some regions
	genirq: Fix the return type of kstat_cpu_irqs_sum()
	lib/mpi: Fix buffer overrun when SG is too long
	ACPICA: nsrepair: handle cases without a return value correctly
	wifi: orinoco: check return value of hermes_write_wordrec()
	wifi: ath9k: htc_hst: free skb in ath9k_htc_rx_msg() if there is no callback function
	ath9k: hif_usb: simplify if-if to if-else
	ath9k: htc: clean up statistics macros
	wifi: ath9k: hif_usb: clean up skbs if ath9k_hif_usb_rx_stream() fails
	wifi: ath9k: Fix potential stack-out-of-bounds write in ath9k_wmi_rsp_callback()
	ACPI: battery: Fix missing NUL-termination with large strings
	crypto: ccp - Failure on re-initialization due to duplicate sysfs filename
	crypto: essiv - remove redundant null pointer check before kfree
	crypto: essiv - Handle EBUSY correctly
	crypto: seqiv - Handle EBUSY correctly
	powercap: fix possible name leak in powercap_register_zone()
	net/mlx5: Enhance debug print in page allocation failure
	irqchip/alpine-msi: Fix refcount leak in alpine_msix_init_domains
	irqchip/irq-mvebu-gicp: Fix refcount leak in mvebu_gicp_probe
	irqchip/ti-sci: Fix refcount leak in ti_sci_intr_irq_domain_probe
	mptcp: add sk_stop_timer_sync helper
	net: add sock_init_data_uid()
	tun: tun_chr_open(): correctly initialize socket uid
	tap: tap_open(): correctly initialize socket uid
	OPP: fix error checking in opp_migrate_dentry()
	Bluetooth: L2CAP: Fix potential user-after-free
	libbpf: Fix alen calculation in libbpf_nla_dump_errormsg()
	rds: rds_rm_zerocopy_callback() correct order for list_add_tail()
	crypto: rsa-pkcs1pad - Use akcipher_request_complete
	m68k: /proc/hardware should depend on PROC_FS
	RISC-V: time: initialize hrtimer based broadcast clock event device
	usb: gadget: udc: Avoid tasklet passing a global
	treewide: Replace DECLARE_TASKLET() with DECLARE_TASKLET_OLD()
	wifi: iwl3945: Add missing check for create_singlethread_workqueue
	wifi: iwl4965: Add missing check for create_singlethread_workqueue()
	wifi: mwifiex: fix loop iterator in mwifiex_update_ampdu_txwinsize()
	crypto: crypto4xx - Call dma_unmap_page when done
	wifi: mac80211: make rate u32 in sta_set_rate_info_rx()
	thermal/drivers/hisi: Drop second sensor hi3660
	can: esd_usb: Move mislocated storage of SJA1000_ECC_SEG bits in case of a bus error
	irqchip/irq-brcmstb-l2: Set IRQ_LEVEL for level triggered interrupts
	irqchip/irq-bcm7120-l2: Set IRQ_LEVEL for level triggered interrupts
	selftests/net: Interpret UDP_GRO cmsg data as an int value
	selftest: fib_tests: Always cleanup before exit
	drm/fourcc: Add missing big-endian XRGB1555 and RGB565 formats
	drm: mxsfb: DRM_MXSFB should depend on ARCH_MXS || ARCH_MXC
	drm/bridge: megachips: Fix error handling in i2c_register_driver()
	drm/vc4: dpi: Add option for inverting pixel clock and output enable
	drm/vc4: dpi: Fix format mapping for RGB565
	gpu: ipu-v3: common: Add of_node_put() for reference returned by of_graph_get_port_by_id()
	drm/msm/hdmi: Add missing check for alloc_ordered_workqueue
	pinctrl: stm32: Fix refcount leak in stm32_pctrl_get_irq_domain
	ASoC: fsl_sai: initialize is_dsp_mode flag
	ALSA: hda/ca0132: minor fix for allocation size
	drm/mipi-dsi: Fix byte order of 16-bit DCS set/get brightness
	drm/msm: use strscpy instead of strncpy
	drm/msm/dpu: Add check for cstate
	drm/msm/dpu: Add check for pstates
	drm/exynos: Don't reset bridge->next
	drm/bridge: Rename bridge helpers targeting a bridge chain
	drm/bridge: Introduce drm_bridge_get_next_bridge()
	drm: Initialize struct drm_crtc_state.no_vblank from device settings
	drm/msm/mdp5: Add check for kzalloc
	gpu: host1x: Don't skip assigning syncpoints to channels
	drm/mediatek: remove cast to pointers passed to kfree
	drm/mediatek: Use NULL instead of 0 for NULL pointer
	drm/mediatek: Drop unbalanced obj unref
	drm/mediatek: Clean dangling pointer on bind error path
	ASoC: soc-compress.c: fixup private_data on snd_soc_new_compress()
	gpio: vf610: connect GPIO label to dev name
	hwmon: (ltc2945) Handle error case in ltc2945_value_store
	scsi: aic94xx: Add missing check for dma_map_single()
	spi: bcm63xx-hsspi: fix pm_runtime
	spi: bcm63xx-hsspi: Fix multi-bit mode setting
	hwmon: (mlxreg-fan) Return zero speed for broken fan
	dm: remove flush_scheduled_work() during local_exit()
	spi: synquacer: Fix timeout handling in synquacer_spi_transfer_one()
	ASoC: dapm: declare missing structure prototypes
	ASoC: soc-dapm.h: fixup warning struct snd_pcm_substream not declared
	HID: bigben: use spinlock to protect concurrent accesses
	HID: bigben_worker() remove unneeded check on report_field
	HID: bigben: use spinlock to safely schedule workers
	HID: asus: Only set EV_REP if we are adding a mapping
	HID: asus: Add report_size to struct asus_touchpad_info
	HID: asus: Add support for multi-touch touchpad on Medion Akoya E1239T
	HID: asus: Fix mute and touchpad-toggle keys on Medion Akoya E1239T
	hid: bigben_probe(): validate report count
	nfsd: fix race to check ls_layouts
	cifs: Fix lost destroy smbd connection when MR allocate failed
	cifs: Fix warning and UAF when destroy the MR list
	gfs2: jdata writepage fix
	perf llvm: Fix inadvertent file creation
	perf tools: Fix auto-complete on aarch64
	sparc: allow PM configs for sparc32 COMPILE_TEST
	selftests/ftrace: Fix bash specific "==" operator
	mfd: pcf50633-adc: Fix potential memleak in pcf50633_adc_async_read()
	clk: qcom: gcc-qcs404: disable gpll[04]_out_aux parents
	clk: qcom: gcc-qcs404: fix names of the DSI clocks used as parents
	mtd: rawnand: sunxi: Fix the size of the last OOB region
	clk: renesas: cpg-mssr: Fix use after free if cpg_mssr_common_init() failed
	clk: renesas: cpg-mssr: Use enum clk_reg_layout instead of a boolean flag
	clk: renesas: cpg-mssr: Remove superfluous check in resume code
	Input: ads7846 - don't report pressure for ads7845
	Input: ads7846 - don't check penirq immediately for 7845
	clk: qcom: gpucc-sdm845: fix clk_dis_wait being programmed for CX GDSC
	powerpc/powernv/ioda: Skip unallocated resources when mapping to PE
	clk: Honor CLK_OPS_PARENT_ENABLE in clk_core_is_enabled()
	powerpc/pseries/lpar: add missing RTAS retry status handling
	powerpc/pseries/lparcfg: add missing RTAS retry status handling
	powerpc/rtas: make all exports GPL
	powerpc/rtas: ensure 4KB alignment for rtas_data_buf
	powerpc/eeh: Small refactor of eeh_handle_normal_event()
	powerpc/eeh: Set channel state after notifying the drivers
	MIPS: SMP-CPS: fix build error when HOTPLUG_CPU not set
	MIPS: vpe-mt: drop physical_memsize
	remoteproc: qcom_q6v5_mss: Use a carveout to authenticate modem headers
	media: platform: ti: Add missing check for devm_regulator_get
	powerpc: Remove linker flag from KBUILD_AFLAGS
	media: ov5675: Fix memleak in ov5675_init_controls()
	media: i2c: ov772x: Fix memleak in ov772x_probe()
	media: rc: Fix use-after-free bugs caused by ene_tx_irqsim()
	media: i2c: ov7670: 0 instead of -EINVAL was returned
	media: usb: siano: Fix use after free bugs caused by do_submit_urb
	rpmsg: glink: Avoid infinite loop on intent for missing channel
	udf: Define EFSCORRUPTED error code
	ARM: dts: exynos: Use Exynos5420 compatible for the MIPI video phy
	blk-iocost: fix divide by 0 error in calc_lcoefs()
	wifi: brcmfmac: Fix potential stack-out-of-bounds in brcmf_c_preinit_dcmds()
	rcu: Suppress smp_processor_id() complaint in synchronize_rcu_expedited_wait()
	thermal: intel: Fix unsigned comparison with less than zero
	timers: Prevent union confusion from unexpected restart_syscall()
	x86/bugs: Reset speculation control settings on init
	wifi: brcmfmac: ensure CLM version is null-terminated to prevent stack-out-of-bounds
	wifi: mt7601u: fix an integer underflow
	inet: fix fast path in __inet_hash_connect()
	ice: add missing checks for PF vsi type
	ACPI: Don't build ACPICA with '-Os'
	net: bcmgenet: Add a check for oversized packets
	m68k: Check syscall_trace_enter() return code
	wifi: mt76: dma: free rx_head in mt76_dma_rx_cleanup
	ACPI: video: Fix Lenovo Ideapad Z570 DMI match
	net/mlx5: fw_tracer: Fix debug print
	coda: Avoid partial allocation of sig_inputArgs
	uaccess: Add minimum bounds check on kernel buffer size
	drm/amd/display: Fix potential null-deref in dm_resume
	drm/omap: dsi: Fix excessive stack usage
	HID: Add Mapping for System Microphone Mute
	drm/radeon: free iio for atombios when driver shutdown
	drm/msm/dsi: Add missing check for alloc_ordered_workqueue
	docs/scripts/gdb: add necessary make scripts_gdb step
	ASoC: kirkwood: Iterate over array indexes instead of using pointer math
	regulator: max77802: Bounds check regulator id against opmode
	regulator: s5m8767: Bounds check id indexing into arrays
	hwmon: (coretemp) Simplify platform device handling
	pinctrl: at91: use devm_kasprintf() to avoid potential leaks
	drm: panel-orientation-quirks: Add quirk for Lenovo IdeaPad Duet 3 10IGL5
	dm thin: add cond_resched() to various workqueue loops
	dm cache: add cond_resched() to various workqueue loops
	nfsd: zero out pointers after putting nfsd_files on COPY setup error
	wifi: rtl8xxxu: fixing transmisison failure for rtl8192eu
	firmware: coreboot: framebuffer: Ignore reserved pixel color bits
	rtc: pm8xxx: fix set-alarm race
	ipmi_ssif: Rename idle state and check
	s390: discard .interp section
	s390/kprobes: fix irq mask clobbering on kprobe reenter from post_handler
	s390/kprobes: fix current_kprobe never cleared after kprobes reenter
	ARM: dts: exynos: correct HDMI phy compatible in Exynos4
	hfs: fix missing hfs_bnode_get() in __hfs_bnode_create
	fs: hfsplus: fix UAF issue in hfsplus_put_super
	f2fs: fix information leak in f2fs_move_inline_dirents()
	f2fs: fix cgroup writeback accounting with fs-layer encryption
	ocfs2: fix defrag path triggering jbd2 ASSERT
	ocfs2: fix non-auto defrag path not working issue
	udf: Truncate added extents on failed expansion
	udf: Do not bother merging very long extents
	udf: Do not update file length for failed writes to inline files
	udf: Preserve link count of system files
	udf: Detect system inodes linked into directory hierarchy
	udf: Fix file corruption when appending just after end of preallocated extent
	KVM: Destroy target device if coalesced MMIO unregistration fails
	KVM: s390: disable migration mode when dirty tracking is disabled
	x86/virt: Force GIF=1 prior to disabling SVM (for reboot flows)
	x86/crash: Disable virt in core NMI crash handler to avoid double shootdown
	x86/reboot: Disable virtualization in an emergency if SVM is supported
	x86/reboot: Disable SVM, not just VMX, when stopping CPUs
	x86/kprobes: Fix __recover_optprobed_insn check optimizing logic
	x86/kprobes: Fix arch_check_optimized_kprobe check within optimized_kprobe range
	x86/microcode/amd: Remove load_microcode_amd()'s bsp parameter
	x86/microcode/AMD: Add a @cpu parameter to the reloading functions
	x86/microcode/AMD: Fix mixed steppings support
	x86/speculation: Allow enabling STIBP with legacy IBRS
	Documentation/hw-vuln: Document the interaction between IBRS and STIBP
	ima: Align ima_file_mmap() parameters with mmap_file LSM hook
	irqdomain: Fix association race
	irqdomain: Fix disassociation race
	irqdomain: Drop bogus fwspec-mapping error handling
	ALSA: ice1712: Do not left ice->gpio_mutex locked in aureon_add_controls()
	ALSA: hda/realtek: Add quirk for HP EliteDesk 800 G6 Tower PC
	ext4: optimize ea_inode block expansion
	ext4: refuse to create ea block when umounted
	wifi: rtl8xxxu: Use a longer retry limit of 48
	wifi: cfg80211: Fix use after free for wext
	thermal: intel: powerclamp: Fix cur_state for multi package system
	dm flakey: fix logic when corrupting a bio
	dm flakey: don't corrupt the zero page
	ARM: dts: exynos: correct TMU phandle in Exynos4
	ARM: dts: exynos: correct TMU phandle in Odroid XU
	rbd: avoid use-after-free in do_rbd_add() when rbd_dev_create() fails
	alpha: fix FEN fault handling
	mips: fix syscall_get_nr
	media: ipu3-cio2: Fix PM runtime usage_count in driver unbind
	mm: memcontrol: deprecate charge moving
	mm/thp: check and bail out if page in deferred queue already
	ktest.pl: Give back console on Ctrt^C on monitor
	ktest.pl: Fix missing "end_monitor" when machine check fails
	ktest.pl: Add RUN_TIMEOUT option with default unlimited
	scsi: qla2xxx: Fix link failure in NPIV environment
	scsi: qla2xxx: Fix DMA-API call trace on NVMe LS requests
	scsi: qla2xxx: Fix erroneous link down
	scsi: ses: Don't attach if enclosure has no components
	scsi: ses: Fix slab-out-of-bounds in ses_enclosure_data_process()
	scsi: ses: Fix possible addl_desc_ptr out-of-bounds accesses
	scsi: ses: Fix possible desc_ptr out-of-bounds accesses
	scsi: ses: Fix slab-out-of-bounds in ses_intf_remove()
	PCI/PM: Observe reset delay irrespective of bridge_d3
	PCI: hotplug: Allow marking devices as disconnected during bind/unbind
	PCI: Avoid FLR for AMD FCH AHCI adapters
	drm/i915/quirks: Add inverted backlight quirk for HP 14-r206nv
	drm/radeon: Fix eDP for single-display iMac11,2
	wifi: ath9k: use proper statements in conditionals
	kbuild: Port silent mode detection to future gnu make.
	net/sched: Retire tcindex classifier
	fs/jfs: fix shift exponent db_agl2size negative
	pwm: sifive: Reduce time the controller lock is held
	pwm: sifive: Always let the first pwm_apply_state succeed
	pwm: stm32-lp: fix the check on arr and cmp registers update
	f2fs: use memcpy_{to,from}_page() where possible
	fs: f2fs: initialize fsdata in pagecache_write()
	um: vector: Fix memory leak in vector_config
	ubi: ensure that VID header offset + VID header size <= alloc, size
	ubifs: Fix build errors as symbol undefined
	ubifs: Rectify space budget for ubifs_symlink() if symlink is encrypted
	ubifs: Rectify space budget for ubifs_xrename()
	ubifs: Fix wrong dirty space budget for dirty inode
	ubifs: do_rename: Fix wrong space budget when target inode's nlink > 1
	ubifs: Reserve one leb for each journal head while doing budget
	ubi: Fix use-after-free when volume resizing failed
	ubi: Fix unreferenced object reported by kmemleak in ubi_resize_volume()
	ubifs: Fix memory leak in alloc_wbufs()
	ubi: Fix possible null-ptr-deref in ubi_free_volume()
	ubifs: Re-statistic cleaned znode count if commit failed
	ubifs: dirty_cow_znode: Fix memleak in error handling path
	ubifs: ubifs_writepage: Mark page dirty after writing inode failed
	ubi: Fix UAF wear-leveling entry in eraseblk_count_seq_show()
	ubi: ubi_wl_put_peb: Fix infinite loop when wear-leveling work failed
	x86: um: vdso: Add '%rcx' and '%r11' to the syscall clobber list
	watchdog: at91sam9_wdt: use devm_request_irq to avoid missing free_irq() in error path
	watchdog: Fix kmemleak in watchdog_cdev_register
	watchdog: pcwd_usb: Fix attempting to access uninitialized memory
	netfilter: ctnetlink: fix possible refcount leak in ctnetlink_create_conntrack()
	ipv6: Add lwtunnel encap size of all siblings in nexthop calculation
	sctp: add a refcnt in sctp_stream_priorities to avoid a nested loop
	net: fix __dev_kfree_skb_any() vs drop monitor
	9p/xen: fix version parsing
	9p/xen: fix connection sequence
	9p/rdma: unmap receive dma buffer in rdma_request()/post_recv()
	net/mlx5: Geneve, Fix handling of Geneve object id as error code
	nfc: fix memory leak of se_io context in nfc_genl_se_io
	net/sched: act_sample: fix action bind logic
	ARM: dts: spear320-hmi: correct STMPE GPIO compatible
	tcp: tcp_check_req() can be called from process context
	vc_screen: modify vcs_size() handling in vcs_read()
	rtc: sun6i: Make external 32k oscillator optional
	rtc: sun6i: Always export the internal oscillator
	scsi: ipr: Work around fortify-string warning
	thermal: intel: quark_dts: fix error pointer dereference
	thermal: intel: BXT_PMIC: select REGMAP instead of depending on it
	tracing: Add NULL checks for buffer in ring_buffer_free_read_page()
	firmware/efi sysfb_efi: Add quirk for Lenovo IdeaPad Duet 3
	mfd: arizona: Use pm_runtime_resume_and_get() to prevent refcnt leak
	media: uvcvideo: Handle cameras with invalid descriptors
	media: uvcvideo: Handle errors from calls to usb_string
	media: uvcvideo: Quirk for autosuspend in Logitech B910 and C910
	media: uvcvideo: Silence memcpy() run-time false positive warnings
	staging: emxx_udc: Add checks for dma_alloc_coherent()
	tty: fix out-of-bounds access in tty_driver_lookup_tty()
	tty: serial: fsl_lpuart: disable the CTS when send break signal
	mei: bus-fixup:upon error print return values of send and receive
	tools/iio/iio_utils:fix memory leak
	iio: accel: mma9551_core: Prevent uninitialized variable in mma9551_read_status_word()
	iio: accel: mma9551_core: Prevent uninitialized variable in mma9551_read_config_word()
	usb: host: xhci: mvebu: Iterate over array indexes instead of using pointer math
	USB: ene_usb6250: Allocate enough memory for full object
	usb: uvc: Enumerate valid values for color matching
	kernel/fail_function: fix memory leak with using debugfs_lookup()
	PCI: Add ACS quirk for Wangxun NICs
	phy: rockchip-typec: Fix unsigned comparison with less than zero
	net: tls: avoid hanging tasks on the tx_lock
	x86/resctrl: Apply READ_ONCE/WRITE_ONCE to task_struct.{rmid,closid}
	x86/resctl: fix scheduler confusion with 'current'
	Bluetooth: hci_sock: purge socket queues in the destruct() callback
	tcp: Fix listen() regression in 5.4.229.
	media: uvcvideo: Provide sync and async uvc_ctrl_status_event
	media: uvcvideo: Fix race condition with usb_kill_urb
	dt-bindings: rtc: sun6i-a31-rtc: Loosen the requirements on the clocks
	Linux 5.4.235

Change-Id: I256ca8288bf61707f5103c9b7c7831da0d7a08a0
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-03-12 15:42:52 +00:00
commit 0d9d32f54c
354 changed files with 2762 additions and 2413 deletions

View File

@ -82,6 +82,8 @@ Brief summary of control files.
memory.swappiness set/show swappiness parameter of vmscan
(See sysctl's vm.swappiness)
memory.move_charge_at_immigrate set/show controls of moving charges
This knob is deprecated and shouldn't be
used.
memory.oom_control set/show oom controls.
memory.numa_stat show the number of memory usage per numa
node
@ -745,8 +747,15 @@ NOTE2:
It is recommended to set the soft limit always below the hard limit,
otherwise the hard limit will take precedence.
8. Move charges at task migration
=================================
8. Move charges at task migration (DEPRECATED!)
===============================================
THIS IS DEPRECATED!
It's expensive and unreliable! It's better practice to launch workload
tasks directly from inside their target cgroup. Use dedicated workload
cgroups to allow fine-grained policy adjustments without having to
move physical pages between control domains.
Users can move charges associated with a task along with task migration, that
is, uncharge task's pages from the old cgroup and charge them to the new cgroup.

View File

@ -479,8 +479,16 @@ Spectre variant 2
On Intel Skylake-era systems the mitigation covers most, but not all,
cases. See :ref:`[3] <spec_ref3>` for more details.
On CPUs with hardware mitigation for Spectre variant 2 (e.g. Enhanced
IBRS on x86), retpoline is automatically disabled at run time.
On CPUs with hardware mitigation for Spectre variant 2 (e.g. IBRS
or enhanced IBRS on x86), retpoline is automatically disabled at run time.
Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
boot, by setting the IBRS bit, and they're automatically protected against
Spectre v2 variant attacks, including cross-thread branch target injections
on SMT systems (STIBP). In other words, eIBRS enables STIBP too.
Legacy IBRS systems clear the IBRS bit on exit to userspace and
therefore explicitly enable STIBP for that
The retpoline mitigation is turned on by default on vulnerable
CPUs. It can be forced on or off by the administrator
@ -504,9 +512,12 @@ Spectre variant 2
For Spectre variant 2 mitigation, individual user programs
can be compiled with return trampolines for indirect branches.
This protects them from consuming poisoned entries in the branch
target buffer left by malicious software. Alternatively, the
programs can disable their indirect branch speculation via prctl()
(See :ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`).
target buffer left by malicious software.
On legacy IBRS systems, at return to userspace, implicit STIBP is disabled
because the kernel clears the IBRS bit. In this case, the userspace programs
can disable indirect branch speculation via prctl() (See
:ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`).
On x86, this will turn on STIBP to guard against attacks from the
sibling thread when the user program is running, and use IBPB to
flush the branch target buffer when switching to/from the program.

View File

@ -39,6 +39,10 @@ Setup
this mode. In this case, you should build the kernel with
CONFIG_RANDOMIZE_BASE disabled if the architecture supports KASLR.
- Build the gdb scripts (required on kernels v5.1 and above)::
make scripts_gdb
- Enable the gdb stub of QEMU/KVM, either
- at VM startup time by appending "-s" to the QEMU command line

View File

@ -128,7 +128,6 @@ required:
- compatible
- reg
- interrupts
- clocks
- clock-output-names
additionalProperties: false

View File

@ -3615,6 +3615,18 @@ Type: vm ioctl
Parameters: struct kvm_s390_cmma_log (in, out)
Returns: 0 on success, a negative value on error
Errors:
====== =============================================================
ENOMEM not enough memory can be allocated to complete the task
ENXIO if CMMA is not enabled
EINVAL if KVM_S390_CMMA_PEEK is not set but migration mode was not enabled
EINVAL if KVM_S390_CMMA_PEEK is not set but dirty tracking has been
disabled (and thus migration mode was automatically disabled)
EFAULT if the userspace address is invalid or if no page table is
present for the addresses (e.g. when using hugepages).
====== =============================================================
This ioctl is used to get the values of the CMMA bits on the s390
architecture. It is meant to be used in two scenarios:
- During live migration to save the CMMA values. Live migration needs
@ -3691,12 +3703,6 @@ mask is unused.
values points to the userspace buffer where the result will be stored.
This ioctl can fail with -ENOMEM if not enough memory can be allocated to
complete the task, with -ENXIO if CMMA is not enabled, with -EINVAL if
KVM_S390_CMMA_PEEK is not set but migration mode was not enabled, with
-EFAULT if the userspace address is invalid or if no page table is
present for the addresses (e.g. when using hugepages).
4.108 KVM_S390_SET_CMMA_BITS
Capability: KVM_CAP_S390_CMMA_MIGRATION

View File

@ -254,6 +254,10 @@ Allows userspace to start migration mode, needed for PGSTE migration.
Setting this attribute when migration mode is already active will have
no effects.
Dirty tracking must be enabled on all memslots, else -EINVAL is returned. When
dirty tracking is disabled on any memslot, migration mode is automatically
stopped.
Parameters: none
Returns: -ENOMEM if there is not enough free memory to start migration mode
-EINVAL if the state of the VM is invalid (e.g. no memory defined)

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 234
SUBLEVEL = 235
EXTRAVERSION =
NAME = Kleptomaniac Octopus
@ -89,9 +89,16 @@ endif
# If the user is running make -s (silent mode), suppress echoing of
# commands
# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS.
ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),)
quiet=silent_
ifeq ($(filter 3.%,$(MAKE_VERSION)),)
silence:=$(findstring s,$(firstword -$(MAKEFLAGS)))
else
silence:=$(findstring s,$(filter-out --%,$(MAKEFLAGS)))
endif
ifeq ($(silence),s)
quiet=silent_
endif
export quiet Q KBUILD_VERBOSE

View File

@ -235,7 +235,21 @@ do_entIF(unsigned long type, struct pt_regs *regs)
{
int signo, code;
if ((regs->ps & ~IPL_MAX) == 0) {
if (type == 3) { /* FEN fault */
/* Irritating users can call PAL_clrfen to disable the
FPU for the process. The kernel will then trap in
do_switch_stack and undo_switch_stack when we try
to save and restore the FP registers.
Given that GCC by default generates code that uses the
FP registers, PAL_clrfen is not useful except for DoS
attacks. So turn the bleeding FPU back on and be done
with it. */
current_thread_info()->pcb.flags |= 1;
__reload_thread(&current_thread_info()->pcb);
return;
}
if (!user_mode(regs)) {
if (type == 1) {
const unsigned int *data
= (const unsigned int *) regs->pc;
@ -368,20 +382,6 @@ do_entIF(unsigned long type, struct pt_regs *regs)
}
break;
case 3: /* FEN fault */
/* Irritating users can call PAL_clrfen to disable the
FPU for the process. The kernel will then trap in
do_switch_stack and undo_switch_stack when we try
to save and restore the FP registers.
Given that GCC by default generates code that uses the
FP registers, PAL_clrfen is not useful except for DoS
attacks. So turn the bleeding FPU back on and be done
with it. */
current_thread_info()->pcb.flags |= 1;
__reload_thread(&current_thread_info()->pcb);
return;
case 5: /* illoc */
default: /* unexpected instruction-fault type */
;

View File

@ -239,7 +239,7 @@
i80-if-timings {
cs-setup = <0>;
wr-setup = <0>;
wr-act = <1>;
wr-active = <1>;
wr-hold = <0>;
};
};

View File

@ -10,7 +10,7 @@
/ {
thermal-zones {
cpu_thermal: cpu-thermal {
thermal-sensors = <&tmu 0>;
thermal-sensors = <&tmu>;
polling-delay-passive = <0>;
polling-delay = <0>;
trips {

View File

@ -605,7 +605,7 @@
status = "disabled";
hdmi_i2c_phy: hdmiphy@38 {
compatible = "exynos4210-hdmiphy";
compatible = "samsung,exynos4210-hdmiphy";
reg = <0x38>;
};
};

View File

@ -116,7 +116,6 @@
};
&cpu0_thermal {
thermal-sensors = <&tmu_cpu0 0>;
polling-delay-passive = <0>;
polling-delay = <0>;

View File

@ -539,7 +539,7 @@
};
mipi_phy: mipi-video-phy {
compatible = "samsung,s5pv210-mipi-video-phy";
compatible = "samsung,exynos5420-mipi-video-phy";
syscon = <&pmu_system_controller>;
#phy-cells = <1>;
};

View File

@ -504,7 +504,7 @@
mux: mux-controller {
compatible = "mmio-mux";
#mux-control-cells = <0>;
#mux-control-cells = <1>;
mux-reg-masks = <0x14 0x00000010>;
};

View File

@ -242,7 +242,7 @@
irq-trigger = <0x1>;
stmpegpio: stmpe-gpio {
compatible = "stmpe,gpio";
compatible = "st,stmpe-gpio";
reg = <0>;
gpio-controller;
#gpio-cells = <2>;

View File

@ -99,6 +99,7 @@ struct mmdc_pmu {
cpumask_t cpu;
struct hrtimer hrtimer;
unsigned int active_events;
int id;
struct device *dev;
struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
struct hlist_node node;
@ -433,8 +434,6 @@ static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer)
static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
void __iomem *mmdc_base, struct device *dev)
{
int mmdc_num;
*pmu_mmdc = (struct mmdc_pmu) {
.pmu = (struct pmu) {
.task_ctx_nr = perf_invalid_context,
@ -452,15 +451,16 @@ static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
.active_events = 0,
};
mmdc_num = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
pmu_mmdc->id = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
return mmdc_num;
return pmu_mmdc->id;
}
static int imx_mmdc_remove(struct platform_device *pdev)
{
struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
perf_pmu_unregister(&pmu_mmdc->pmu);
iounmap(pmu_mmdc->mmdc_base);
@ -474,7 +474,6 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
{
struct mmdc_pmu *pmu_mmdc;
char *name;
int mmdc_num;
int ret;
const struct of_device_id *of_id =
of_match_device(imx_mmdc_dt_ids, &pdev->dev);
@ -497,14 +496,14 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
cpuhp_mmdc_state = ret;
}
mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
if (mmdc_num == 0)
name = "mmdc";
else
name = devm_kasprintf(&pdev->dev,
GFP_KERNEL, "mmdc%d", mmdc_num);
ret = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
if (ret < 0)
goto pmu_free;
name = devm_kasprintf(&pdev->dev,
GFP_KERNEL, "mmdc%d", ret);
pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC,
@ -525,6 +524,7 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
pmu_register_err:
pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
hrtimer_cancel(&pmu_mmdc->hrtimer);
pmu_free:

View File

@ -165,7 +165,7 @@ err_free_pdata:
kfree(pdata);
err_free_pdev:
platform_device_unregister(pdev);
platform_device_put(pdev);
return ret;
}

View File

@ -649,6 +649,7 @@ static void __init realtime_counter_init(void)
}
rate = clk_get_rate(sys_clk);
clk_put(sys_clk);
if (soc_is_dra7xx()) {
/*

View File

@ -213,6 +213,7 @@ int __init zynq_early_slcr_init(void)
zynq_slcr_regmap = syscon_regmap_lookup_by_compatible("xlnx,zynq-slcr");
if (IS_ERR(zynq_slcr_regmap)) {
pr_err("%s: failed to find zynq-slcr\n", __func__);
of_node_put(np);
return -ENODEV;
}

View File

@ -150,7 +150,7 @@
scpi_clocks: clocks {
compatible = "arm,scpi-clocks";
scpi_dvfs: clock-controller {
scpi_dvfs: clocks-0 {
compatible = "arm,scpi-dvfs-clocks";
#clock-cells = <1>;
clock-indices = <0>;
@ -159,7 +159,7 @@
};
scpi_sensors: sensors {
compatible = "amlogic,meson-gxbb-scpi-sensors";
compatible = "amlogic,meson-gxbb-scpi-sensors", "arm,scpi-sensors";
#thermal-sensor-cells = <1>;
};
};

View File

@ -1783,7 +1783,7 @@
#address-cells = <1>;
#size-cells = <0>;
internal_ephy: ethernet_phy@8 {
internal_ephy: ethernet-phy@8 {
compatible = "ethernet-phy-id0180.3301",
"ethernet-phy-ieee802.3-c22";
interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;

View File

@ -54,26 +54,6 @@
compatible = "operating-points-v2";
opp-shared;
opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <731000>;
};
opp-250000000 {
opp-hz = /bits/ 64 <250000000>;
opp-microvolt = <731000>;
};
opp-500000000 {
opp-hz = /bits/ 64 <500000000>;
opp-microvolt = <731000>;
};
opp-667000000 {
opp-hz = /bits/ 64 <666666666>;
opp-microvolt = <731000>;
};
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <731000>;

View File

@ -172,7 +172,7 @@
reg = <0x14 0x10>;
};
eth_mac: eth_mac@34 {
eth_mac: eth-mac@34 {
reg = <0x34 0x10>;
};
@ -189,7 +189,7 @@
scpi_clocks: clocks {
compatible = "arm,scpi-clocks";
scpi_dvfs: scpi_clocks@0 {
scpi_dvfs: clocks-0 {
compatible = "arm,scpi-dvfs-clocks";
#clock-cells = <1>;
clock-indices = <0>;
@ -464,7 +464,7 @@
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0xc8834000 0x0 0x2000>;
hwrng: rng {
hwrng: rng@0 {
compatible = "amlogic,meson-rng";
reg = <0x0 0x0 0x0 0x4>;
};

View File

@ -18,7 +18,7 @@
leds {
compatible = "gpio-leds";
status {
led {
label = "n1:white:status";
gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>;
default-state = "on";

View File

@ -700,7 +700,7 @@
};
};
eth-phy-mux {
eth-phy-mux@55c {
compatible = "mdio-mux-mmioreg", "mdio-mux";
#address-cells = <1>;
#size-cells = <0>;

View File

@ -428,6 +428,7 @@
pwm: pwm@11006000 {
compatible = "mediatek,mt7622-pwm";
reg = <0 0x11006000 0 0x1000>;
#pwm-cells = <2>;
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_PWM_SEL>,
<&pericfg CLK_PERI_PWM_PD>,

View File

@ -533,7 +533,7 @@
clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
resets = <&gcc GCC_PCIEPHY_0_PHY_BCR>,
<&gcc 21>;
<&gcc GCC_PCIE_0_PIPE_ARES>;
reset-names = "phy", "pipe";
clock-output-names = "pcie_0_pipe_clk";
@ -991,12 +991,12 @@
<&gcc GCC_PCIE_0_SLV_AXI_CLK>;
clock-names = "iface", "aux", "master_bus", "slave_bus";
resets = <&gcc 18>,
<&gcc 17>,
<&gcc 15>,
<&gcc 19>,
resets = <&gcc GCC_PCIE_0_AXI_MASTER_ARES>,
<&gcc GCC_PCIE_0_AXI_SLAVE_ARES>,
<&gcc GCC_PCIE_0_AXI_MASTER_STICKY_ARES>,
<&gcc GCC_PCIE_0_CORE_STICKY_ARES>,
<&gcc GCC_PCIE_0_BCR>,
<&gcc 16>;
<&gcc GCC_PCIE_0_AHB_ARES>;
reset-names = "axi_m",
"axi_s",
"axi_m_sticky",

View File

@ -47,6 +47,8 @@ do_trace:
jbsr syscall_trace_enter
RESTORE_SWITCH_STACK
addql #4,%sp
addql #1,%d0
jeq ret_from_exception
movel %sp@(PT_OFF_ORIG_D0),%d1
movel #-ENOSYS,%d0
cmpl #NR_syscalls,%d1

View File

@ -19,6 +19,7 @@ config HEARTBEAT
# We have a dedicated heartbeat LED. :-)
config PROC_HARDWARE
bool "/proc/hardware support"
depends on PROC_FS
help
Say Y here to support the /proc/hardware file, which gives you
access to information about the machine you're running on,

View File

@ -92,6 +92,8 @@ ENTRY(system_call)
jbsr syscall_trace_enter
RESTORE_SWITCH_STACK
addql #4,%sp
addql #1,%d0
jeq ret_from_exception
movel %d3,%a0
jbsr %a0@
movel %d0,%sp@(PT_OFF_D0) /* save the return value */

View File

@ -160,9 +160,12 @@ do_trace_entry:
jbsr syscall_trace
RESTORE_SWITCH_STACK
addql #4,%sp
addql #1,%d0 | optimization for cmpil #-1,%d0
jeq ret_from_syscall
movel %sp@(PT_OFF_ORIG_D0),%d0
cmpl #NR_syscalls,%d0
jcs syscall
jra ret_from_syscall
badsys:
movel #-ENOSYS,%sp@(PT_OFF_D0)
jra ret_from_syscall

View File

@ -38,7 +38,7 @@ static inline bool mips_syscall_is_indirect(struct task_struct *task,
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
return current_thread_info()->syscall;
return task_thread_info(task)->syscall;
}
static inline void mips_syscall_update_nr(struct task_struct *task,

View File

@ -104,7 +104,6 @@ struct vpe_control {
struct list_head tc_list; /* Thread contexts */
};
extern unsigned long physical_memsize;
extern struct vpe_control vpecontrol;
extern const struct file_operations vpe_fops;

View File

@ -423,9 +423,11 @@ static void cps_shutdown_this_cpu(enum cpu_death death)
wmb();
}
} else {
pr_debug("Gating power to core %d\n", core);
/* Power down the core */
cps_pm_enter_state(CPS_PM_POWER_GATED);
if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
pr_debug("Gating power to core %d\n", core);
/* Power down the core */
cps_pm_enter_state(CPS_PM_POWER_GATED);
}
}
}

View File

@ -92,12 +92,11 @@ int vpe_run(struct vpe *v)
write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
/*
* The sde-kit passes 'memsize' to __start in $a3, so set something
* here... Or set $a3 to zero and define DFLT_STACK_SIZE and
* DFLT_HEAP_SIZE when you compile your program
* We don't pass the memsize here, so VPE programs need to be
* compiled with DFLT_STACK_SIZE and DFLT_HEAP_SIZE defined.
*/
mttgpr(7, 0);
mttgpr(6, v->ntcs);
mttgpr(7, physical_memsize);
/* set up VPE1 */
/*

View File

@ -22,12 +22,6 @@
DEFINE_SPINLOCK(ebu_lock);
EXPORT_SYMBOL_GPL(ebu_lock);
/*
* This is needed by the VPE loader code, just set it to 0 and assume
* that the firmware hardcodes this value to something useful.
*/
unsigned long physical_memsize = 0L;
/*
* this struct is filled by the soc specific detection code and holds
* information about the specific soc type, revision and name

View File

@ -93,7 +93,7 @@ aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
ifeq ($(HAS_BIARCH),y)
KBUILD_CFLAGS += -m$(BITS)
KBUILD_AFLAGS += -m$(BITS) -Wl,-a$(BITS)
KBUILD_AFLAGS += -m$(BITS)
KBUILD_LDFLAGS += -m elf$(BITS)$(LDEMULATION)
endif

View File

@ -1072,45 +1072,46 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
}
pr_info("EEH: Recovery successful.\n");
} else {
/*
* About 90% of all real-life EEH failures in the field
* are due to poorly seated PCI cards. Only 10% or so are
* due to actual, failed cards.
*/
pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
"Please try reseating or replacing it\n",
pe->phb->global_number, pe->addr);
goto out;
}
eeh_slot_error_detail(pe, EEH_LOG_PERM);
/*
* About 90% of all real-life EEH failures in the field
* are due to poorly seated PCI cards. Only 10% or so are
* due to actual, failed cards.
*/
pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
"Please try reseating or replacing it\n",
pe->phb->global_number, pe->addr);
/* Notify all devices that they're about to go down. */
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
eeh_set_irq_state(pe, false);
eeh_pe_report("error_detected(permanent failure)", pe,
eeh_report_failure, NULL);
eeh_slot_error_detail(pe, EEH_LOG_PERM);
/* Mark the PE to be removed permanently */
eeh_pe_state_mark(pe, EEH_PE_REMOVED);
/* Notify all devices that they're about to go down. */
eeh_set_irq_state(pe, false);
eeh_pe_report("error_detected(permanent failure)", pe,
eeh_report_failure, NULL);
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
/*
* Shut down the device drivers for good. We mark
* all removed devices correctly to avoid access
* the their PCI config any more.
*/
if (pe->type & EEH_PE_VF) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
} else {
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
/* Mark the PE to be removed permanently */
eeh_pe_state_mark(pe, EEH_PE_REMOVED);
pci_lock_rescan_remove();
pci_hp_remove_devices(bus);
pci_unlock_rescan_remove();
/* The passed PE should no longer be used */
return;
}
/*
* Shut down the device drivers for good. We mark
* all removed devices correctly to avoid access
* the their PCI config any more.
*/
if (pe->type & EEH_PE_VF) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
} else {
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
pci_lock_rescan_remove();
pci_hp_remove_devices(bus);
pci_unlock_rescan_remove();
/* The passed PE should no longer be used */
return;
}
out:
@ -1206,10 +1207,10 @@ void eeh_handle_special_event(void)
/* Notify all devices to be down */
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
eeh_pe_report(
"error_detected(permanent failure)", pe,
eeh_report_failure, NULL);
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
pci_lock_rescan_remove();
list_for_each_entry(hose, &hose_list, list_node) {

View File

@ -51,10 +51,10 @@ struct rtas_t rtas = {
EXPORT_SYMBOL(rtas);
DEFINE_SPINLOCK(rtas_data_buf_lock);
EXPORT_SYMBOL(rtas_data_buf_lock);
EXPORT_SYMBOL_GPL(rtas_data_buf_lock);
char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
EXPORT_SYMBOL(rtas_data_buf);
char rtas_data_buf[RTAS_DATA_BUF_SIZE] __aligned(SZ_4K);
EXPORT_SYMBOL_GPL(rtas_data_buf);
unsigned long rtas_rmo_buf;
@ -63,7 +63,7 @@ unsigned long rtas_rmo_buf;
* This is done like this so rtas_flash can be a module.
*/
void (*rtas_flash_term_hook)(int);
EXPORT_SYMBOL(rtas_flash_term_hook);
EXPORT_SYMBOL_GPL(rtas_flash_term_hook);
/* RTAS use home made raw locking instead of spin_lock_irqsave
* because those can be called from within really nasty contexts
@ -311,7 +311,7 @@ void rtas_progress(char *s, unsigned short hex)
spin_unlock(&progress_lock);
}
EXPORT_SYMBOL(rtas_progress); /* needed by rtas_flash module */
EXPORT_SYMBOL_GPL(rtas_progress); /* needed by rtas_flash module */
int rtas_token(const char *service)
{
@ -321,7 +321,7 @@ int rtas_token(const char *service)
tokp = of_get_property(rtas.dev, service, NULL);
return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE;
}
EXPORT_SYMBOL(rtas_token);
EXPORT_SYMBOL_GPL(rtas_token);
int rtas_service_present(const char *service)
{
@ -481,7 +481,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
}
return ret;
}
EXPORT_SYMBOL(rtas_call);
EXPORT_SYMBOL_GPL(rtas_call);
/* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status
* code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
@ -516,7 +516,7 @@ unsigned int rtas_busy_delay(int status)
return ms;
}
EXPORT_SYMBOL(rtas_busy_delay);
EXPORT_SYMBOL_GPL(rtas_busy_delay);
static int rtas_error_rc(int rtas_rc)
{
@ -562,7 +562,7 @@ int rtas_get_power_level(int powerdomain, int *level)
return rtas_error_rc(rc);
return rc;
}
EXPORT_SYMBOL(rtas_get_power_level);
EXPORT_SYMBOL_GPL(rtas_get_power_level);
int rtas_set_power_level(int powerdomain, int level, int *setlevel)
{
@ -580,7 +580,7 @@ int rtas_set_power_level(int powerdomain, int level, int *setlevel)
return rtas_error_rc(rc);
return rc;
}
EXPORT_SYMBOL(rtas_set_power_level);
EXPORT_SYMBOL_GPL(rtas_set_power_level);
int rtas_get_sensor(int sensor, int index, int *state)
{
@ -598,7 +598,7 @@ int rtas_get_sensor(int sensor, int index, int *state)
return rtas_error_rc(rc);
return rc;
}
EXPORT_SYMBOL(rtas_get_sensor);
EXPORT_SYMBOL_GPL(rtas_get_sensor);
int rtas_get_sensor_fast(int sensor, int index, int *state)
{
@ -659,7 +659,7 @@ int rtas_set_indicator(int indicator, int index, int new_value)
return rtas_error_rc(rc);
return rc;
}
EXPORT_SYMBOL(rtas_set_indicator);
EXPORT_SYMBOL_GPL(rtas_set_indicator);
/*
* Ignoring RTAS extended delay

View File

@ -3008,7 +3008,8 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
int index;
int64_t rc;
if (!res || !res->flags || res->start > res->end)
if (!res || !res->flags || res->start > res->end ||
res->flags & IORESOURCE_UNSET)
return;
if (res->flags & IORESOURCE_IO) {

View File

@ -1416,22 +1416,22 @@ static inline void __init check_lp_set_hblkrm(unsigned int lp,
void __init pseries_lpar_read_hblkrm_characteristics(void)
{
const s32 token = rtas_token("ibm,get-system-parameter");
unsigned char local_buffer[SPLPAR_TLB_BIC_MAXLENGTH];
int call_status, len, idx, bpsize;
if (!firmware_has_feature(FW_FEATURE_BLOCK_REMOVE))
return;
spin_lock(&rtas_data_buf_lock);
memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
NULL,
SPLPAR_TLB_BIC_TOKEN,
__pa(rtas_data_buf),
RTAS_DATA_BUF_SIZE);
memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH);
local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0';
spin_unlock(&rtas_data_buf_lock);
do {
spin_lock(&rtas_data_buf_lock);
memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
call_status = rtas_call(token, 3, 1, NULL, SPLPAR_TLB_BIC_TOKEN,
__pa(rtas_data_buf), RTAS_DATA_BUF_SIZE);
memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH);
local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0';
spin_unlock(&rtas_data_buf_lock);
} while (rtas_busy_delay(call_status));
if (call_status != 0) {
pr_warn("%s %s Error calling get-system-parameter (0x%x)\n",

View File

@ -289,6 +289,7 @@ static void parse_mpp_x_data(struct seq_file *m)
*/
static void parse_system_parameter_string(struct seq_file *m)
{
const s32 token = rtas_token("ibm,get-system-parameter");
int call_status;
unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
@ -298,16 +299,15 @@ static void parse_system_parameter_string(struct seq_file *m)
return;
}
spin_lock(&rtas_data_buf_lock);
memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH);
call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
NULL,
SPLPAR_CHARACTERISTICS_TOKEN,
__pa(rtas_data_buf),
RTAS_DATA_BUF_SIZE);
memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
local_buffer[SPLPAR_MAXLENGTH - 1] = '\0';
spin_unlock(&rtas_data_buf_lock);
do {
spin_lock(&rtas_data_buf_lock);
memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH);
call_status = rtas_call(token, 3, 1, NULL, SPLPAR_CHARACTERISTICS_TOKEN,
__pa(rtas_data_buf), RTAS_DATA_BUF_SIZE);
memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
local_buffer[SPLPAR_MAXLENGTH - 1] = '\0';
spin_unlock(&rtas_data_buf_lock);
} while (rtas_busy_delay(call_status));
if (call_status != 0) {
printk(KERN_INFO

View File

@ -5,6 +5,7 @@
*/
#include <linux/of_clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/delay.h>
#include <asm/sbi.h>
@ -28,4 +29,6 @@ void __init time_init(void)
of_clk_init(NULL);
timer_probe();
tick_setup_hrtimer_broadcast();
}

View File

@ -255,6 +255,7 @@ static void pop_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->prev_kprobe.kp = NULL;
}
NOKPROBE_SYMBOL(pop_kprobe);
@ -509,12 +510,11 @@ static int post_kprobe_handler(struct pt_regs *regs)
if (!p)
return 0;
resume_execution(p, regs);
if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
resume_execution(p, regs);
pop_kprobe(kcb);
preempt_enable_no_resched();

View File

@ -189,5 +189,6 @@ SECTIONS
DISCARDS
/DISCARD/ : {
*(.eh_frame)
*(.interp)
}
}

View File

@ -4527,6 +4527,22 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
return -EINVAL;
if (!kvm->arch.migration_mode)
return 0;
/*
* Turn off migration mode when:
* - userspace creates a new memslot with dirty logging off,
* - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
* dirty logging is turned off.
* Migration mode expects dirty page logging being enabled to store
* its dirty bitmap.
*/
if (change != KVM_MR_DELETE &&
!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
WARN(kvm_s390_vm_stop_migration(kvm),
"Failed to stop migration mode");
return 0;
}

View File

@ -321,7 +321,7 @@ config FORCE_MAX_ZONEORDER
This config option is actually maximum order plus one. For example,
a value of 13 means that the largest free memory block is 2^12 pages.
if SPARC64
if SPARC64 || COMPILE_TEST
source "kernel/power/Kconfig"
endif

View File

@ -746,6 +746,7 @@ static int vector_config(char *str, char **error_out)
if (parsed == NULL) {
*error_out = "vector_config failed to parse parameters";
kfree(params);
return -EINVAL;
}

View File

@ -19,6 +19,7 @@
#include <crypto/internal/simd.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h>
#include <asm/unaligned.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
@ -54,7 +55,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
be128 *x = (be128 *)key;
u64 a, b;
if (keylen != GHASH_BLOCK_SIZE) {
@ -63,8 +63,8 @@ static int ghash_setkey(struct crypto_shash *tfm,
}
/* perform multiplication by 'x' in GF(2^128) */
a = be64_to_cpu(x->a);
b = be64_to_cpu(x->b);
a = get_unaligned_be64(key);
b = get_unaligned_be64(key + 8);
ctx->shash.a = (b << 1) | (a >> 63);
ctx->shash.b = (a << 1) | (b >> 63);

View File

@ -131,7 +131,7 @@ static inline unsigned int x86_cpuid_family(void)
int __init microcode_init(void);
extern void __init load_ucode_bsp(void);
extern void load_ucode_ap(void);
void reload_early_microcode(void);
void reload_early_microcode(unsigned int cpu);
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
extern bool initrd_gone;
void microcode_bsp_resume(void);
@ -139,7 +139,7 @@ void microcode_bsp_resume(void);
static inline int __init microcode_init(void) { return 0; };
static inline void __init load_ucode_bsp(void) { }
static inline void load_ucode_ap(void) { }
static inline void reload_early_microcode(void) { }
static inline void reload_early_microcode(unsigned int cpu) { }
static inline void microcode_bsp_resume(void) { }
static inline bool
get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }

View File

@ -47,12 +47,12 @@ struct microcode_amd {
extern void __init load_ucode_amd_bsp(unsigned int family);
extern void load_ucode_amd_ap(unsigned int family);
extern int __init save_microcode_in_initrd_amd(unsigned int family);
void reload_ucode_amd(void);
void reload_ucode_amd(unsigned int cpu);
#else
static inline void __init load_ucode_amd_bsp(unsigned int family) {}
static inline void load_ucode_amd_ap(unsigned int family) {}
static inline int __init
save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
void reload_ucode_amd(void) {}
static inline void reload_ucode_amd(unsigned int cpu) {}
#endif
#endif /* _ASM_X86_MICROCODE_AMD_H */

View File

@ -50,6 +50,10 @@
#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
/* A mask for bits which the kernel toggles when controlling mitigations */
#define SPEC_CTRL_MITIGATIONS_MASK (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \
| SPEC_CTRL_RRSBA_DIS_S)
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */

View File

@ -25,6 +25,8 @@ void __noreturn machine_real_restart(unsigned int type);
#define MRR_BIOS 0
#define MRR_APM 1
void cpu_emergency_disable_virtualization(void);
typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
void nmi_panic_self_stop(struct pt_regs *regs);
void nmi_shootdown_cpus(nmi_shootdown_cb callback);

View File

@ -51,24 +51,27 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
* simple as possible.
* Must be called with preemption disabled.
*/
static void __resctrl_sched_in(void)
static inline void __resctrl_sched_in(struct task_struct *tsk)
{
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
u32 closid = state->default_closid;
u32 rmid = state->default_rmid;
u32 tmp;
/*
* If this task has a closid/rmid assigned, use it.
* Else use the closid/rmid assigned to this cpu.
*/
if (static_branch_likely(&rdt_alloc_enable_key)) {
if (current->closid)
closid = current->closid;
tmp = READ_ONCE(tsk->closid);
if (tmp)
closid = tmp;
}
if (static_branch_likely(&rdt_mon_enable_key)) {
if (current->rmid)
rmid = current->rmid;
tmp = READ_ONCE(tsk->rmid);
if (tmp)
rmid = tmp;
}
if (closid != state->cur_closid || rmid != state->cur_rmid) {
@ -78,15 +81,15 @@ static void __resctrl_sched_in(void)
}
}
static inline void resctrl_sched_in(void)
static inline void resctrl_sched_in(struct task_struct *tsk)
{
if (static_branch_likely(&rdt_enable_key))
__resctrl_sched_in();
__resctrl_sched_in(tsk);
}
#else
static inline void resctrl_sched_in(void) {}
static inline void resctrl_sched_in(struct task_struct *tsk) {}
#endif /* CONFIG_X86_CPU_RESCTRL */

View File

@ -120,7 +120,21 @@ static inline void cpu_svm_disable(void)
wrmsrl(MSR_VM_HSAVE_PA, 0);
rdmsrl(MSR_EFER, efer);
wrmsrl(MSR_EFER, efer & ~EFER_SVME);
if (efer & EFER_SVME) {
/*
* Force GIF=1 prior to disabling SVM to ensure INIT and NMI
* aren't blocked, e.g. if a fatal error occurred between CLGI
* and STGI. Note, STGI may #UD if SVM is disabled from NMI
* context between reading EFER and executing STGI. In that
* case, GIF must already be set, otherwise the NMI would have
* been blocked, so just eat the fault.
*/
asm_volatile_goto("1: stgi\n\t"
_ASM_EXTABLE(1b, %l[fault])
::: "memory" : fault);
fault:
wrmsrl(MSR_EFER, efer & ~EFER_SVME);
}
}
/** Makes sure SVM is disabled, if it is supported on the CPU

View File

@ -135,9 +135,17 @@ void __init check_bugs(void)
* have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
* init code as it is not enumerated and depends on the family.
*/
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
/*
* Previously running kernel (kexec), may have some controls
* turned ON. Clear them and let the mitigations setup below
* rediscover them based on configuration.
*/
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
}
/* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation();
spectre_v2_select_mitigation();
@ -975,14 +983,18 @@ spectre_v2_parse_user_cmdline(void)
return SPECTRE_V2_USER_CMD_AUTO;
}
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
{
return mode == SPECTRE_V2_IBRS ||
mode == SPECTRE_V2_EIBRS ||
return mode == SPECTRE_V2_EIBRS ||
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
mode == SPECTRE_V2_EIBRS_LFENCE;
}
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
{
return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
}
static void __init
spectre_v2_user_select_mitigation(void)
{
@ -1045,12 +1057,19 @@ spectre_v2_user_select_mitigation(void)
}
/*
* If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible,
* STIBP is not required.
* If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP
* is not required.
*
* Enhanced IBRS also protects against cross-thread branch target
* injection in user-mode as the IBRS bit remains always set which
* implicitly enables cross-thread protections. However, in legacy IBRS
* mode, the IBRS bit is set only on kernel entry and cleared on return
* to userspace. This disables the implicit cross-thread protection,
* so allow for STIBP to be selected in that case.
*/
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
!smt_possible ||
spectre_v2_in_ibrs_mode(spectre_v2_enabled))
spectre_v2_in_eibrs_mode(spectre_v2_enabled))
return;
/*
@ -2113,7 +2132,7 @@ static ssize_t mmio_stale_data_show_state(char *buf)
static char *stibp_state(void)
{
if (spectre_v2_in_ibrs_mode(spectre_v2_enabled))
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
return "";
switch (spectre_v2_user_stibp) {

View File

@ -55,7 +55,9 @@ struct cont_desc {
};
static u32 ucode_new_rev;
static u8 amd_ucode_patch[PATCH_MAX_SIZE];
/* One blob per node. */
static u8 amd_ucode_patch[MAX_NUMNODES][PATCH_MAX_SIZE];
/*
* Microcode patch container file is prepended to the initrd in cpio
@ -429,7 +431,7 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p
patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
#else
new_rev = &ucode_new_rev;
patch = &amd_ucode_patch;
patch = &amd_ucode_patch[0];
#endif
desc.cpuid_1_eax = cpuid_1_eax;
@ -548,8 +550,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false);
}
static enum ucode_state
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
{
@ -567,19 +568,19 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
if (!desc.mc)
return -EINVAL;
ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
if (ret > UCODE_UPDATED)
return -EINVAL;
return 0;
}
void reload_ucode_amd(void)
void reload_ucode_amd(unsigned int cpu)
{
struct microcode_amd *mc;
u32 rev, dummy;
struct microcode_amd *mc;
mc = (struct microcode_amd *)amd_ucode_patch;
mc = (struct microcode_amd *)amd_ucode_patch[cpu_to_node(cpu)];
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
@ -845,9 +846,10 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
return UCODE_OK;
}
static enum ucode_state
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
{
struct cpuinfo_x86 *c;
unsigned int nid, cpu;
struct ucode_patch *p;
enum ucode_state ret;
@ -860,23 +862,23 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
return ret;
}
p = find_patch(0);
if (!p) {
return ret;
} else {
if (boot_cpu_data.microcode >= p->patch_id)
return ret;
for_each_node(nid) {
cpu = cpumask_first(cpumask_of_node(nid));
c = &cpu_data(cpu);
p = find_patch(cpu);
if (!p)
continue;
if (c->microcode >= p->patch_id)
continue;
ret = UCODE_NEW;
memset(&amd_ucode_patch[nid], 0, PATCH_MAX_SIZE);
memcpy(&amd_ucode_patch[nid], p->data, min_t(u32, p->size, PATCH_MAX_SIZE));
}
/* save BSP's matching patch for early load */
if (!save)
return ret;
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
memcpy(amd_ucode_patch, p->data, min_t(u32, p->size, PATCH_MAX_SIZE));
return ret;
}
@ -901,12 +903,11 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
{
char fw_name[36] = "amd-ucode/microcode_amd.bin";
struct cpuinfo_x86 *c = &cpu_data(cpu);
bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
enum ucode_state ret = UCODE_NFOUND;
const struct firmware *fw;
/* reload ucode container only on the boot cpu */
if (!refresh_fw || !bsp)
if (!refresh_fw)
return UCODE_OK;
if (c->x86 >= 0x15)
@ -921,7 +922,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
if (!verify_container(fw->data, fw->size, false))
goto fw_release;
ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
ret = load_microcode_amd(c->x86, fw->data, fw->size);
fw_release:
release_firmware(fw);

View File

@ -322,7 +322,7 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
#endif
}
void reload_early_microcode(void)
void reload_early_microcode(unsigned int cpu)
{
int vendor, family;
@ -336,7 +336,7 @@ void reload_early_microcode(void)
break;
case X86_VENDOR_AMD:
if (family >= 0x10)
reload_ucode_amd();
reload_ucode_amd(cpu);
break;
default:
break;
@ -782,7 +782,7 @@ void microcode_bsp_resume(void)
if (uci->valid && uci->mc)
microcode_ops->apply_microcode(cpu);
else if (!uci->mc)
reload_early_microcode();
reload_early_microcode(cpu);
}
static struct syscore_ops mc_syscore_ops = {

View File

@ -311,7 +311,7 @@ static void update_cpu_closid_rmid(void *info)
* executing task might have its own closid selected. Just reuse
* the context switch code.
*/
resctrl_sched_in();
resctrl_sched_in(current);
}
/*
@ -532,7 +532,7 @@ static void _update_task_closid_rmid(void *task)
* Otherwise, the MSR is updated when the task is scheduled in.
*/
if (task == current)
resctrl_sched_in();
resctrl_sched_in(task);
}
static void update_task_closid_rmid(struct task_struct *t)
@ -563,11 +563,11 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
*/
if (rdtgrp->type == RDTCTRL_GROUP) {
tsk->closid = rdtgrp->closid;
tsk->rmid = rdtgrp->mon.rmid;
WRITE_ONCE(tsk->closid, rdtgrp->closid);
WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
} else if (rdtgrp->type == RDTMON_GROUP) {
if (rdtgrp->mon.parent->closid == tsk->closid) {
tsk->rmid = rdtgrp->mon.rmid;
WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
} else {
rdt_last_cmd_puts("Can't move task to different control group\n");
return -EINVAL;
@ -2177,8 +2177,8 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
for_each_process_thread(p, t) {
if (!from || is_closid_match(t, from) ||
is_rmid_match(t, from)) {
t->closid = to->closid;
t->rmid = to->mon.rmid;
WRITE_ONCE(t->closid, to->closid);
WRITE_ONCE(t->rmid, to->mon.rmid);
/*
* Order the closid/rmid stores above before the loads

View File

@ -37,7 +37,6 @@
#include <linux/kdebug.h>
#include <asm/cpu.h>
#include <asm/reboot.h>
#include <asm/virtext.h>
#include <asm/intel_pt.h>
#include <asm/crash.h>
#include <asm/cmdline.h>
@ -94,15 +93,6 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
*/
cpu_crash_vmclear_loaded_vmcss();
/* Disable VMX or SVM if needed.
*
* We need to disable virtualization on all CPUs.
* Having VMX or SVM enabled on any CPU may break rebooting
* after the kdump kernel has finished its task.
*/
cpu_emergency_vmxoff();
cpu_emergency_svm_disable();
/*
* Disable Intel PT to stop its logging
*/
@ -161,12 +151,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
*/
cpu_crash_vmclear_loaded_vmcss();
/* Booting kdump kernel with VMX or SVM enabled won't work,
* because (among other limitations) we can't disable paging
* with the virt flags.
*/
cpu_emergency_vmxoff();
cpu_emergency_svm_disable();
cpu_emergency_disable_virtualization();
/*
* Disable Intel PT to stop its logging

View File

@ -43,8 +43,8 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
/* This function only handles jump-optimized kprobe */
if (kp && kprobe_optimized(kp)) {
op = container_of(kp, struct optimized_kprobe, kp);
/* If op->list is not empty, op is under optimizing */
if (list_empty(&op->list))
/* If op is optimized or under unoptimizing */
if (list_empty(&op->list) || optprobe_queued_unopt(op))
goto found;
}
}
@ -314,7 +314,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op)
for (i = 1; i < op->optinsn.size; i++) {
p = get_kprobe(op->kp.addr + i);
if (p && !kprobe_disabled(p))
if (p && !kprobe_disarmed(p))
return -EEXIST;
}

View File

@ -293,7 +293,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
switch_fpu_finish(next_p);
/* Load the Intel cache allocation PQR MSR. */
resctrl_sched_in();
resctrl_sched_in(next_p);
return prev_p;
}

View File

@ -610,7 +610,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
}
/* Load the Intel cache allocation PQR MSR. */
resctrl_sched_in();
resctrl_sched_in(next_p);
return prev_p;
}

View File

@ -528,33 +528,29 @@ static inline void kb_wait(void)
}
}
static void vmxoff_nmi(int cpu, struct pt_regs *regs)
{
cpu_emergency_vmxoff();
}
static inline void nmi_shootdown_cpus_on_restart(void);
/* Use NMIs as IPIs to tell all CPUs to disable virtualization */
static void emergency_vmx_disable_all(void)
static void emergency_reboot_disable_virtualization(void)
{
/* Just make sure we won't change CPUs while doing this */
local_irq_disable();
/*
* Disable VMX on all CPUs before rebooting, otherwise we risk hanging
* the machine, because the CPU blocks INIT when it's in VMX root.
* Disable virtualization on all CPUs before rebooting to avoid hanging
* the system, as VMX and SVM block INIT when running in the host.
*
* We can't take any locks and we may be on an inconsistent state, so
* use NMIs as IPIs to tell the other CPUs to exit VMX root and halt.
* use NMIs as IPIs to tell the other CPUs to disable VMX/SVM and halt.
*
* Do the NMI shootdown even if VMX if off on _this_ CPU, as that
* doesn't prevent a different CPU from being in VMX root operation.
* Do the NMI shootdown even if virtualization is off on _this_ CPU, as
* other CPUs may have virtualization enabled.
*/
if (cpu_has_vmx()) {
/* Safely force _this_ CPU out of VMX root operation. */
__cpu_emergency_vmxoff();
if (cpu_has_vmx() || cpu_has_svm(NULL)) {
/* Safely force _this_ CPU out of VMX/SVM operation. */
cpu_emergency_disable_virtualization();
/* Halt and exit VMX root operation on the other CPUs. */
nmi_shootdown_cpus(vmxoff_nmi);
/* Disable VMX/SVM and halt on other CPUs. */
nmi_shootdown_cpus_on_restart();
}
}
@ -591,7 +587,7 @@ static void native_machine_emergency_restart(void)
unsigned short mode;
if (reboot_emergency)
emergency_vmx_disable_all();
emergency_reboot_disable_virtualization();
tboot_shutdown(TB_SHUTDOWN_REBOOT);
@ -796,6 +792,17 @@ void machine_crash_shutdown(struct pt_regs *regs)
/* This is the CPU performing the emergency shutdown work. */
int crashing_cpu = -1;
/*
* Disable virtualization, i.e. VMX or SVM, to ensure INIT is recognized during
* reboot. VMX blocks INIT if the CPU is post-VMXON, and SVM blocks INIT if
* GIF=0, i.e. if the crash occurred between CLGI and STGI.
*/
void cpu_emergency_disable_virtualization(void)
{
cpu_emergency_vmxoff();
cpu_emergency_svm_disable();
}
#if defined(CONFIG_SMP)
static nmi_shootdown_cb shootdown_callback;
@ -818,7 +825,14 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
return NMI_HANDLED;
local_irq_disable();
shootdown_callback(cpu, regs);
if (shootdown_callback)
shootdown_callback(cpu, regs);
/*
* Prepare the CPU for reboot _after_ invoking the callback so that the
* callback can safely use virtualization instructions, e.g. VMCLEAR.
*/
cpu_emergency_disable_virtualization();
atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */
@ -829,18 +843,32 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
return NMI_HANDLED;
}
/*
* Halt all other CPUs, calling the specified function on each of them
/**
* nmi_shootdown_cpus - Stop other CPUs via NMI
* @callback: Optional callback to be invoked from the NMI handler
*
* This function can be used to halt all other CPUs on crash
* or emergency reboot time. The function passed as parameter
* will be called inside a NMI handler on all CPUs.
* The NMI handler on the remote CPUs invokes @callback, if not
* NULL, first and then disables virtualization to ensure that
* INIT is recognized during reboot.
*
* nmi_shootdown_cpus() can only be invoked once. After the first
* invocation all other CPUs are stuck in crash_nmi_callback() and
* cannot respond to a second NMI.
*/
void nmi_shootdown_cpus(nmi_shootdown_cb callback)
{
unsigned long msecs;
local_irq_disable();
/*
* Avoid certain doom if a shootdown already occurred; re-registering
* the NMI handler will cause list corruption, modifying the callback
* will do who knows what, etc...
*/
if (WARN_ON_ONCE(crash_ipi_issued))
return;
/* Make a note of crashing cpu. Will be used in NMI callback. */
crashing_cpu = safe_smp_processor_id();
@ -868,7 +896,17 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
msecs--;
}
/* Leave the nmi callback set */
/*
* Leave the nmi callback set, shootdown is a one-time thing. Clearing
* the callback could result in a NULL pointer dereference if a CPU
* (finally) responds after the timeout expires.
*/
}
static inline void nmi_shootdown_cpus_on_restart(void)
{
if (!crash_ipi_issued)
nmi_shootdown_cpus(NULL);
}
/*
@ -898,6 +936,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
/* No other CPUs to shoot down */
}
static inline void nmi_shootdown_cpus_on_restart(void) { }
void run_crash_ipi_callback(struct pt_regs *regs)
{
}

View File

@ -31,7 +31,7 @@
#include <asm/mce.h>
#include <asm/trace/irq_vectors.h>
#include <asm/kexec.h>
#include <asm/virtext.h>
#include <asm/reboot.h>
/*
* Some notes on x86 processor bugs affecting SMP operation:
@ -121,7 +121,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
return NMI_HANDLED;
cpu_emergency_vmxoff();
cpu_emergency_disable_virtualization();
stop_this_cpu(NULL);
return NMI_HANDLED;
@ -134,7 +134,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
asmlinkage __visible void smp_reboot_interrupt(void)
{
ipi_entering_ack_irq();
cpu_emergency_vmxoff();
cpu_emergency_disable_virtualization();
stop_this_cpu(NULL);
irq_exit();
}

View File

@ -264,6 +264,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
"Lenovo ideapad D330-10IGM"),
},
},
{
/* Lenovo IdeaPad Duet 3 10IGL5 with 1200x1920 portrait screen */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
"IdeaPad Duet 3 10IGL5"),
},
},
{},
};

View File

@ -17,8 +17,10 @@ int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
{
long ret;
asm("syscall" : "=a" (ret) :
"0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
asm("syscall"
: "=a" (ret)
: "0" (__NR_clock_gettime), "D" (clock), "S" (ts)
: "rcx", "r11", "memory");
return ret;
}
@ -29,8 +31,10 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{
long ret;
asm("syscall" : "=a" (ret) :
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
asm("syscall"
: "=a" (ret)
: "0" (__NR_gettimeofday), "D" (tv), "S" (tz)
: "rcx", "r11", "memory");
return ret;
}

View File

@ -424,6 +424,7 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
bip->bip_vcnt = bip_src->bip_vcnt;
bip->bip_iter = bip_src->bip_iter;
bip->bip_flags = bip_src->bip_flags & ~BIP_BLOCK_INTEGRITY;
return 0;
}

View File

@ -785,9 +785,14 @@ static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
*page = *seqio = *randio = 0;
if (bps)
*page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
if (bps) {
u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
if (bps_pages)
*page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
else
*page = 1;
}
if (seqiops) {
v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);

View File

@ -59,8 +59,7 @@ void blk_mq_sched_assign_ioc(struct request *rq)
}
/*
* Mark a hardware queue as needing a restart. For shared queues, maintain
* a count of how many hardware queues are marked for restart.
* Mark a hardware queue as needing a restart.
*/
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
@ -92,13 +91,17 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
/*
* Only SCSI implements .get_budget and .put_budget, and SCSI restarts
* its queue by itself in its completion handler, so we don't need to
* restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
* restart queue if .get_budget() fails to get the budget.
*
* Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
* be run again. This is necessary to avoid starving flushes.
*/
static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
struct elevator_queue *e = q->elevator;
LIST_HEAD(rq_list);
int ret = 0;
do {
struct request *rq;
@ -106,6 +109,11 @@ static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
break;
if (!list_empty_careful(&hctx->dispatch)) {
ret = -EAGAIN;
break;
}
if (!blk_mq_get_dispatch_budget(hctx))
break;
@ -122,6 +130,8 @@ static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
*/
list_add(&rq->queuelist, &rq_list);
} while (blk_mq_dispatch_rq_list(q, &rq_list, true));
return ret;
}
static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
@ -138,17 +148,26 @@ static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
/*
* Only SCSI implements .get_budget and .put_budget, and SCSI restarts
* its queue by itself in its completion handler, so we don't need to
* restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
* restart queue if .get_budget() fails to get the budget.
*
* Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
* to be run again. This is necessary to avoid starving flushes.
*/
static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
LIST_HEAD(rq_list);
struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
int ret = 0;
do {
struct request *rq;
if (!list_empty_careful(&hctx->dispatch)) {
ret = -EAGAIN;
break;
}
if (!sbitmap_any_bit_set(&hctx->ctx_map))
break;
@ -174,21 +193,17 @@ static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
} while (blk_mq_dispatch_rq_list(q, &rq_list, true));
WRITE_ONCE(hctx->dispatch_from, ctx);
return ret;
}
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
struct elevator_queue *e = q->elevator;
const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
int ret = 0;
LIST_HEAD(rq_list);
/* RCU or SRCU read lock is needed before checking quiesced flag */
if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
return;
hctx->run++;
/*
* If we have previous entries on our dispatch list, grab them first for
* more fair dispatch.
@ -217,19 +232,41 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
blk_mq_sched_mark_restart_hctx(hctx);
if (blk_mq_dispatch_rq_list(q, &rq_list, false)) {
if (has_sched_dispatch)
blk_mq_do_dispatch_sched(hctx);
ret = blk_mq_do_dispatch_sched(hctx);
else
blk_mq_do_dispatch_ctx(hctx);
ret = blk_mq_do_dispatch_ctx(hctx);
}
} else if (has_sched_dispatch) {
blk_mq_do_dispatch_sched(hctx);
ret = blk_mq_do_dispatch_sched(hctx);
} else if (hctx->dispatch_busy) {
/* dequeue request one by one from sw queue if queue is busy */
blk_mq_do_dispatch_ctx(hctx);
ret = blk_mq_do_dispatch_ctx(hctx);
} else {
blk_mq_flush_busy_ctxs(hctx, &rq_list);
blk_mq_dispatch_rq_list(q, &rq_list, false);
}
return ret;
}
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
/* RCU or SRCU read lock is needed before checking quiesced flag */
if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
return;
hctx->run++;
/*
* A return of -EAGAIN is an indication that hctx->dispatch is not
* empty and we must run again in order to avoid starving flushes.
*/
if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
blk_mq_run_hw_queue(hctx, true);
}
}
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,

View File

@ -1116,7 +1116,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
struct sbitmap_queue *sbq;
struct wait_queue_head *wq;
wait_queue_entry_t *wait;
bool ret;
@ -1139,6 +1139,10 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
if (!list_empty_careful(&wait->entry))
return false;
if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag))
sbq = &hctx->tags->breserved_tags;
else
sbq = &hctx->tags->bitmap_tags;
wq = &bt_wait_ptr(sbq, hctx)->wait;
spin_lock_irq(&wq->lock);

View File

@ -188,8 +188,12 @@ static void essiv_aead_done(struct crypto_async_request *areq, int err)
struct aead_request *req = areq->data;
struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
if (rctx->assoc)
kfree(rctx->assoc);
if (err == -EINPROGRESS)
goto out;
kfree(rctx->assoc);
out:
aead_request_complete(req, err);
}
@ -265,7 +269,7 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc)
err = enc ? crypto_aead_encrypt(subreq) :
crypto_aead_decrypt(subreq);
if (rctx->assoc && err != -EINPROGRESS)
if (rctx->assoc && err != -EINPROGRESS && err != -EBUSY)
kfree(rctx->assoc);
return err;
}

View File

@ -213,16 +213,14 @@ static void pkcs1pad_encrypt_sign_complete_cb(
struct crypto_async_request *child_async_req, int err)
{
struct akcipher_request *req = child_async_req->data;
struct crypto_async_request async_req;
if (err == -EINPROGRESS)
return;
goto out;
async_req.data = req->base.data;
async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
async_req.flags = child_async_req->flags;
req->base.complete(&async_req,
pkcs1pad_encrypt_sign_complete(req, err));
err = pkcs1pad_encrypt_sign_complete(req, err);
out:
akcipher_request_complete(req, err);
}
static int pkcs1pad_encrypt(struct akcipher_request *req)
@ -331,15 +329,14 @@ static void pkcs1pad_decrypt_complete_cb(
struct crypto_async_request *child_async_req, int err)
{
struct akcipher_request *req = child_async_req->data;
struct crypto_async_request async_req;
if (err == -EINPROGRESS)
return;
goto out;
async_req.data = req->base.data;
async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
async_req.flags = child_async_req->flags;
req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
err = pkcs1pad_decrypt_complete(req, err);
out:
akcipher_request_complete(req, err);
}
static int pkcs1pad_decrypt(struct akcipher_request *req)
@ -511,15 +508,14 @@ static void pkcs1pad_verify_complete_cb(
struct crypto_async_request *child_async_req, int err)
{
struct akcipher_request *req = child_async_req->data;
struct crypto_async_request async_req;
if (err == -EINPROGRESS)
return;
goto out;
async_req.data = req->base.data;
async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
async_req.flags = child_async_req->flags;
req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
err = pkcs1pad_verify_complete(req, err);
out:
akcipher_request_complete(req, err);
}
/*

View File

@ -25,7 +25,7 @@ static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
struct aead_request *subreq = aead_request_ctx(req);
struct crypto_aead *geniv;
if (err == -EINPROGRESS)
if (err == -EINPROGRESS || err == -EBUSY)
return;
if (err)

View File

@ -3,7 +3,7 @@
# Makefile for ACPICA Core interpreter
#
ccflags-y := -Os -D_LINUX -DBUILDING_ACPICA
ccflags-y := -D_LINUX -DBUILDING_ACPICA
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
# use acpi.o to put all files here into acpi.o modparam namespace

View File

@ -23,8 +23,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width);
*
* The table is used to implement the Microsoft port access rules that
* first appeared in Windows XP. Some ports are always illegal, and some
* ports are only illegal if the BIOS calls _OSI with a win_XP string or
* later (meaning that the BIOS itelf is post-XP.)
* ports are only illegal if the BIOS calls _OSI with nothing newer than
* the specific _OSI strings.
*
* This provides ACPICA with the desired port protections and
* Microsoft compatibility.
@ -145,7 +145,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
/* Port illegality may depend on the _OSI calls made by the BIOS */
if (acpi_gbl_osi_data >= port_info->osi_dependency) {
if (port_info->osi_dependency == ACPI_ALWAYS_ILLEGAL ||
acpi_gbl_osi_data == port_info->osi_dependency) {
ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
"Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n",
ACPI_FORMAT_UINT64(address),

View File

@ -181,8 +181,9 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
* Try to fix if there was no return object. Warning if failed to fix.
*/
if (!return_object) {
if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) {
if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
if (expected_btypes) {
if (!(expected_btypes & ACPI_RTYPE_NONE) &&
package_index != ACPI_NOT_PACKAGE_ELEMENT) {
ACPI_WARN_PREDEFINED((AE_INFO,
info->full_pathname,
ACPI_WARN_ALWAYS,
@ -196,14 +197,15 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
if (ACPI_SUCCESS(status)) {
return (AE_OK); /* Repair was successful */
}
} else {
}
if (expected_btypes != ACPI_RTYPE_NONE) {
ACPI_WARN_PREDEFINED((AE_INFO,
info->full_pathname,
ACPI_WARN_ALWAYS,
"Missing expected return value"));
return (AE_AML_NO_RETURN_VALUE);
}
return (AE_AML_NO_RETURN_VALUE);
}
}

View File

@ -465,7 +465,7 @@ static int extract_package(struct acpi_battery *battery,
u8 *ptr = (u8 *)battery + offsets[i].offset;
if (element->type == ACPI_TYPE_STRING ||
element->type == ACPI_TYPE_BUFFER)
strncpy(ptr, element->string.pointer, 32);
strscpy(ptr, element->string.pointer, 32);
else if (element->type == ACPI_TYPE_INTEGER) {
strncpy(ptr, (u8 *)&element->integer.value,
sizeof(u64));

View File

@ -316,7 +316,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
.ident = "Lenovo Ideapad Z570",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "102434U"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
},
},
{

View File

@ -5529,8 +5529,7 @@ static void rbd_dev_release(struct device *dev)
module_put(THIS_MODULE);
}
static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
struct rbd_spec *spec)
static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
{
struct rbd_device *rbd_dev;
@ -5575,9 +5574,6 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
rbd_dev->dev.parent = &rbd_root_dev;
device_initialize(&rbd_dev->dev);
rbd_dev->rbd_client = rbdc;
rbd_dev->spec = spec;
return rbd_dev;
}
@ -5590,12 +5586,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
{
struct rbd_device *rbd_dev;
rbd_dev = __rbd_dev_create(rbdc, spec);
rbd_dev = __rbd_dev_create(spec);
if (!rbd_dev)
return NULL;
rbd_dev->opts = opts;
/* get an id and fill in device name */
rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
minor_to_rbd_dev_id(1 << MINORBITS),
@ -5612,6 +5606,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
/* we have a ref from do_rbd_add() */
__module_get(THIS_MODULE);
rbd_dev->rbd_client = rbdc;
rbd_dev->spec = spec;
rbd_dev->opts = opts;
dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
return rbd_dev;
@ -6827,7 +6825,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
goto out_err;
}
parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
parent = __rbd_dev_create(rbd_dev->parent_spec);
if (!parent) {
ret = -ENOMEM;
goto out_err;
@ -6837,8 +6835,8 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
* Images related by parent/child relationships always share
* rbd_client and spec/parent_spec, so bump their refcounts.
*/
__rbd_get_client(rbd_dev->rbd_client);
rbd_spec_get(rbd_dev->parent_spec);
parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
parent->spec = rbd_spec_get(rbd_dev->parent_spec);
ret = rbd_dev_image_probe(parent, depth);
if (ret < 0)

View File

@ -97,7 +97,7 @@
#define SSIF_WATCH_WATCHDOG_TIMEOUT msecs_to_jiffies(250)
enum ssif_intf_state {
SSIF_NORMAL,
SSIF_IDLE,
SSIF_GETTING_FLAGS,
SSIF_GETTING_EVENTS,
SSIF_CLEARING_FLAGS,
@ -105,8 +105,8 @@ enum ssif_intf_state {
/* FIXME - add watchdog stuff. */
};
#define SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_NORMAL \
&& (ssif)->curr_msg == NULL)
#define IS_SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_IDLE \
&& (ssif)->curr_msg == NULL)
/*
* Indexes into stats[] in ssif_info below.
@ -353,9 +353,9 @@ static void return_hosed_msg(struct ssif_info *ssif_info,
/*
* Must be called with the message lock held. This will release the
* message lock. Note that the caller will check SSIF_IDLE and start a
* new operation, so there is no need to check for new messages to
* start in here.
* message lock. Note that the caller will check IS_SSIF_IDLE and
* start a new operation, so there is no need to check for new
* messages to start in here.
*/
static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
{
@ -372,7 +372,7 @@ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
if (start_send(ssif_info, msg, 3) != 0) {
/* Error, just go to normal state. */
ssif_info->ssif_state = SSIF_NORMAL;
ssif_info->ssif_state = SSIF_IDLE;
}
}
@ -387,7 +387,7 @@ static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags)
mb[0] = (IPMI_NETFN_APP_REQUEST << 2);
mb[1] = IPMI_GET_MSG_FLAGS_CMD;
if (start_send(ssif_info, mb, 2) != 0)
ssif_info->ssif_state = SSIF_NORMAL;
ssif_info->ssif_state = SSIF_IDLE;
}
static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
@ -398,7 +398,7 @@ static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
ssif_info->curr_msg = NULL;
ssif_info->ssif_state = SSIF_NORMAL;
ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
ipmi_free_smi_msg(msg);
}
@ -412,7 +412,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
msg = ipmi_alloc_smi_msg();
if (!msg) {
ssif_info->ssif_state = SSIF_NORMAL;
ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@ -435,7 +435,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
msg = ipmi_alloc_smi_msg();
if (!msg) {
ssif_info->ssif_state = SSIF_NORMAL;
ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@ -453,9 +453,9 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
/*
* Must be called with the message lock held. This will release the
* message lock. Note that the caller will check SSIF_IDLE and start a
* new operation, so there is no need to check for new messages to
* start in here.
* message lock. Note that the caller will check IS_SSIF_IDLE and
* start a new operation, so there is no need to check for new
* messages to start in here.
*/
static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
{
@ -471,7 +471,7 @@ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
/* Events available. */
start_event_fetch(ssif_info, flags);
else {
ssif_info->ssif_state = SSIF_NORMAL;
ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
}
}
@ -584,7 +584,7 @@ static void watch_timeout(struct timer_list *t)
if (ssif_info->watch_timeout) {
mod_timer(&ssif_info->watch_timer,
jiffies + ssif_info->watch_timeout);
if (SSIF_IDLE(ssif_info)) {
if (IS_SSIF_IDLE(ssif_info)) {
start_flag_fetch(ssif_info, flags); /* Releases lock */
return;
}
@ -787,7 +787,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
switch (ssif_info->ssif_state) {
case SSIF_NORMAL:
case SSIF_IDLE:
ipmi_ssif_unlock_cond(ssif_info, flags);
if (!msg)
break;
@ -805,7 +805,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
* Error fetching flags, or invalid length,
* just give up for now.
*/
ssif_info->ssif_state = SSIF_NORMAL;
ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
dev_warn(&ssif_info->client->dev,
"Error getting flags: %d %d, %x\n",
@ -840,7 +840,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
"Invalid response clearing flags: %x %x\n",
data[0], data[1]);
}
ssif_info->ssif_state = SSIF_NORMAL;
ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
break;
@ -918,7 +918,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
if (IS_SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
if (ssif_info->req_events)
start_event_fetch(ssif_info, flags);
else if (ssif_info->req_flags)
@ -1092,7 +1092,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
unsigned long oflags;
restart:
if (!SSIF_IDLE(ssif_info)) {
if (!IS_SSIF_IDLE(ssif_info)) {
ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@ -1315,7 +1315,7 @@ static void shutdown_ssif(void *send_info)
dev_set_drvdata(&ssif_info->client->dev, NULL);
/* make sure the driver is not looking for flags any more. */
while (ssif_info->ssif_state != SSIF_NORMAL)
while (ssif_info->ssif_state != SSIF_IDLE)
schedule_timeout(1);
ssif_info->stopping = true;
@ -1886,7 +1886,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
spin_lock_init(&ssif_info->lock);
ssif_info->ssif_state = SSIF_NORMAL;
ssif_info->ssif_state = SSIF_IDLE;
timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
timer_setup(&ssif_info->watch_timer, watch_timeout, 0);

View File

@ -253,6 +253,17 @@ static bool clk_core_is_enabled(struct clk_core *core)
}
}
/*
* This could be called with the enable lock held, or from atomic
* context. If the parent isn't enabled already, we can't do
* anything here. We can also assume this clock isn't enabled.
*/
if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent)
if (!clk_core_is_enabled(core->parent)) {
ret = false;
goto done;
}
ret = core->ops->is_enabled(core->hw);
done:
if (core->rpm_enabled)

View File

@ -25,11 +25,9 @@ enum {
P_CORE_BI_PLL_TEST_SE,
P_DSI0_PHY_PLL_OUT_BYTECLK,
P_DSI0_PHY_PLL_OUT_DSICLK,
P_GPLL0_OUT_AUX,
P_GPLL0_OUT_MAIN,
P_GPLL1_OUT_MAIN,
P_GPLL3_OUT_MAIN,
P_GPLL4_OUT_AUX,
P_GPLL4_OUT_MAIN,
P_GPLL6_OUT_AUX,
P_HDMI_PHY_PLL_CLK,
@ -109,28 +107,24 @@ static const char * const gcc_parent_names_4[] = {
static const struct parent_map gcc_parent_map_5[] = {
{ P_XO, 0 },
{ P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
{ P_GPLL0_OUT_AUX, 2 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
static const char * const gcc_parent_names_5[] = {
"cxo",
"dsi0pll_byteclk_src",
"gpll0_out_aux",
"dsi0pllbyte",
"core_bi_pll_test_se",
};
static const struct parent_map gcc_parent_map_6[] = {
{ P_XO, 0 },
{ P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
{ P_GPLL0_OUT_AUX, 3 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
static const char * const gcc_parent_names_6[] = {
"cxo",
"dsi0_phy_pll_out_byteclk",
"gpll0_out_aux",
"dsi0pllbyte",
"core_bi_pll_test_se",
};
@ -139,7 +133,6 @@ static const struct parent_map gcc_parent_map_7[] = {
{ P_GPLL0_OUT_MAIN, 1 },
{ P_GPLL3_OUT_MAIN, 2 },
{ P_GPLL6_OUT_AUX, 3 },
{ P_GPLL4_OUT_AUX, 4 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
@ -148,7 +141,6 @@ static const char * const gcc_parent_names_7[] = {
"gpll0_out_main",
"gpll3_out_main",
"gpll6_out_aux",
"gpll4_out_aux",
"core_bi_pll_test_se",
};
@ -175,7 +167,7 @@ static const struct parent_map gcc_parent_map_9[] = {
static const char * const gcc_parent_names_9[] = {
"cxo",
"gpll0_out_main",
"dsi0_phy_pll_out_dsiclk",
"dsi0pll",
"gpll6_out_aux",
"core_bi_pll_test_se",
};
@ -207,14 +199,12 @@ static const char * const gcc_parent_names_11[] = {
static const struct parent_map gcc_parent_map_12[] = {
{ P_XO, 0 },
{ P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
{ P_GPLL0_OUT_AUX, 2 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
static const char * const gcc_parent_names_12[] = {
"cxo",
"dsi0pll_pclk_src",
"gpll0_out_aux",
"dsi0pll",
"core_bi_pll_test_se",
};
@ -237,40 +227,34 @@ static const char * const gcc_parent_names_13[] = {
static const struct parent_map gcc_parent_map_14[] = {
{ P_XO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
{ P_GPLL4_OUT_AUX, 2 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
static const char * const gcc_parent_names_14[] = {
"cxo",
"gpll0_out_main",
"gpll4_out_aux",
"core_bi_pll_test_se",
};
static const struct parent_map gcc_parent_map_15[] = {
{ P_XO, 0 },
{ P_GPLL0_OUT_AUX, 2 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
static const char * const gcc_parent_names_15[] = {
"cxo",
"gpll0_out_aux",
"core_bi_pll_test_se",
};
static const struct parent_map gcc_parent_map_16[] = {
{ P_XO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
{ P_GPLL0_OUT_AUX, 2 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
static const char * const gcc_parent_names_16[] = {
"cxo",
"gpll0_out_main",
"gpll0_out_aux",
"core_bi_pll_test_se",
};

View File

@ -22,8 +22,6 @@
#define CX_GMU_CBCR_SLEEP_SHIFT 4
#define CX_GMU_CBCR_WAKE_MASK 0xf
#define CX_GMU_CBCR_WAKE_SHIFT 8
#define CLK_DIS_WAIT_SHIFT 12
#define CLK_DIS_WAIT_MASK (0xf << CLK_DIS_WAIT_SHIFT)
enum {
P_BI_TCXO,
@ -124,6 +122,7 @@ static struct clk_branch gpu_cc_cxo_clk = {
static struct gdsc gpu_cx_gdsc = {
.gdscr = 0x106c,
.gds_hw_ctrl = 0x1540,
.clk_dis_wait_val = 0x8,
.pd = {
.name = "gpu_cx_gdsc",
},
@ -221,10 +220,6 @@ static int gpu_cc_sdm845_probe(struct platform_device *pdev)
value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
regmap_update_bits(regmap, 0x1098, mask, value);
/* Configure clk_dis_wait for gpu_cx_gdsc */
regmap_update_bits(regmap, 0x106c, CLK_DIS_WAIT_MASK,
8 << CLK_DIS_WAIT_SHIFT);
return qcom_cc_really_probe(pdev, &gpu_cc_sdm845_desc, regmap);
}

View File

@ -213,7 +213,7 @@ const struct cpg_mssr_info r7s9210_cpg_mssr_info __initconst = {
.cpg_clk_register = rza2_cpg_clk_register,
/* RZ/A2 has Standby Control Registers */
.stbyctrl = true,
.reg_layout = CLK_REG_LAYOUT_RZ_A,
};
static void __init r7s9210_cpg_mssr_early_init(struct device_node *np)

View File

@ -111,12 +111,12 @@ static const u16 srcr[] = {
* @rcdev: Optional reset controller entity
* @dev: CPG/MSSR device
* @base: CPG/MSSR register block base address
* @reg_layout: CPG/MSSR register layout
* @rmw_lock: protects RMW register accesses
* @np: Device node in DT for this CPG/MSSR module
* @num_core_clks: Number of Core Clocks in clks[]
* @num_mod_clks: Number of Module Clocks in clks[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
* @stbyctrl: This device has Standby Control Registers
* @notifiers: Notifier chain to save/restore clock state for system resume
* @smstpcr_saved[].mask: Mask of SMSTPCR[] bits under our control
* @smstpcr_saved[].val: Saved values of SMSTPCR[]
@ -128,13 +128,13 @@ struct cpg_mssr_priv {
#endif
struct device *dev;
void __iomem *base;
enum clk_reg_layout reg_layout;
spinlock_t rmw_lock;
struct device_node *np;
unsigned int num_core_clks;
unsigned int num_mod_clks;
unsigned int last_dt_core_clk;
bool stbyctrl;
struct raw_notifier_head notifiers;
struct {
@ -177,7 +177,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
enable ? "ON" : "OFF");
spin_lock_irqsave(&priv->rmw_lock, flags);
if (priv->stbyctrl) {
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
value = readb(priv->base + STBCR(reg));
if (enable)
value &= ~bitmask;
@ -199,7 +199,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
spin_unlock_irqrestore(&priv->rmw_lock, flags);
if (!enable || priv->stbyctrl)
if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
return 0;
for (i = 1000; i > 0; --i) {
@ -233,7 +233,7 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
struct cpg_mssr_priv *priv = clock->priv;
u32 value;
if (priv->stbyctrl)
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
value = readb(priv->base + STBCR(clock->index / 32));
else
value = readl(priv->base + MSTPSR(clock->index / 32));
@ -272,7 +272,7 @@ struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
case CPG_MOD:
type = "module";
if (priv->stbyctrl) {
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
idx = MOD_CLK_PACK_10(clkidx);
range_check = 7 - (clkidx % 10);
} else {
@ -800,7 +800,8 @@ static int cpg_mssr_suspend_noirq(struct device *dev)
/* Save module registers with bits under our control */
for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
if (priv->smstpcr_saved[reg].mask)
priv->smstpcr_saved[reg].val = priv->stbyctrl ?
priv->smstpcr_saved[reg].val =
priv->reg_layout == CLK_REG_LAYOUT_RZ_A ?
readb(priv->base + STBCR(reg)) :
readl(priv->base + SMSTPCR(reg));
}
@ -830,7 +831,7 @@ static int cpg_mssr_resume_noirq(struct device *dev)
if (!mask)
continue;
if (priv->stbyctrl)
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
oldval = readb(priv->base + STBCR(reg));
else
oldval = readl(priv->base + SMSTPCR(reg));
@ -839,7 +840,7 @@ static int cpg_mssr_resume_noirq(struct device *dev)
if (newval == oldval)
continue;
if (priv->stbyctrl) {
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
writeb(newval, priv->base + STBCR(reg));
/* dummy read to ensure write has completed */
readb(priv->base + STBCR(reg));
@ -861,8 +862,7 @@ static int cpg_mssr_resume_noirq(struct device *dev)
}
if (!i)
dev_warn(dev, "Failed to enable %s%u[0x%x]\n",
priv->stbyctrl ? "STB" : "SMSTP", reg,
dev_warn(dev, "Failed to enable SMSTP%u[0x%x]\n", reg,
oldval & mask);
}
@ -907,12 +907,11 @@ static int __init cpg_mssr_common_init(struct device *dev,
goto out_err;
}
cpg_mssr_priv = priv;
priv->num_core_clks = info->num_total_core_clks;
priv->num_mod_clks = info->num_hw_mod_clks;
priv->last_dt_core_clk = info->last_dt_core_clk;
RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
priv->stbyctrl = info->stbyctrl;
priv->reg_layout = info->reg_layout;
for (i = 0; i < nclks; i++)
priv->clks[i] = ERR_PTR(-ENOENT);
@ -921,6 +920,8 @@ static int __init cpg_mssr_common_init(struct device *dev,
if (error)
goto out_err;
cpg_mssr_priv = priv;
return 0;
out_err:
@ -990,7 +991,7 @@ static int __init cpg_mssr_probe(struct platform_device *pdev)
return error;
/* Reset Controller not supported for Standby Control SoCs */
if (info->stbyctrl)
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
return 0;
error = cpg_mssr_reset_controller_register(priv);

View File

@ -85,6 +85,11 @@ struct mssr_mod_clk {
struct device_node;
enum clk_reg_layout {
CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3 = 0,
CLK_REG_LAYOUT_RZ_A,
};
/**
* SoC-specific CPG/MSSR Description
*
@ -105,6 +110,7 @@ struct device_node;
* @crit_mod_clks: Array with Module Clock IDs of critical clocks that
* should not be disabled without a knowledgeable driver
* @num_crit_mod_clks: Number of entries in crit_mod_clks[]
* @reg_layout: CPG/MSSR register layout from enum clk_reg_layout
*
* @core_pm_clks: Array with IDs of Core Clocks that are suitable for Power
* Management, in addition to Module Clocks
@ -112,10 +118,6 @@ struct device_node;
*
* @init: Optional callback to perform SoC-specific initialization
* @cpg_clk_register: Optional callback to handle special Core Clock types
*
* @stbyctrl: This device has Standby Control Registers which are 8-bits
* wide, no status registers (MSTPSR) and have different address
* offsets.
*/
struct cpg_mssr_info {
@ -130,7 +132,7 @@ struct cpg_mssr_info {
unsigned int num_core_clks;
unsigned int last_dt_core_clk;
unsigned int num_total_core_clks;
bool stbyctrl;
enum clk_reg_layout reg_layout;
/* Module Clocks */
const struct mssr_mod_clk *mod_clks;

View File

@ -521,7 +521,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
{
struct skcipher_request *req;
struct scatterlist *dst;
dma_addr_t addr;
req = skcipher_request_cast(pd_uinfo->async_req);
@ -530,8 +529,8 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
req->cryptlen, req->dst);
} else {
dst = pd_uinfo->dest_va;
addr = dma_map_page(dev->core_dev->device, sg_page(dst),
dst->offset, dst->length, DMA_FROM_DEVICE);
dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
DMA_FROM_DEVICE);
}
if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
@ -556,10 +555,9 @@ static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
struct ahash_request *ahash_req;
ahash_req = ahash_request_cast(pd_uinfo->async_req);
ctx = crypto_tfm_ctx(ahash_req->base.tfm);
ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req));
crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
crypto_tfm_ctx(ahash_req->base.tfm));
crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx);
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd_uinfo->state & PD_ENTRY_BUSY)

View File

@ -643,14 +643,26 @@ static void ccp_dma_release(struct ccp_device *ccp)
chan = ccp->ccp_dma_chan + i;
dma_chan = &chan->dma_chan;
if (dma_chan->client_count)
dma_release_channel(dma_chan);
tasklet_kill(&chan->cleanup_tasklet);
list_del_rcu(&dma_chan->device_node);
}
}
static void ccp_dma_release_channels(struct ccp_device *ccp)
{
struct ccp_dma_chan *chan;
struct dma_chan *dma_chan;
unsigned int i;
for (i = 0; i < ccp->cmd_q_count; i++) {
chan = ccp->ccp_dma_chan + i;
dma_chan = &chan->dma_chan;
if (dma_chan->client_count)
dma_release_channel(dma_chan);
}
}
int ccp_dmaengine_register(struct ccp_device *ccp)
{
struct ccp_dma_chan *chan;
@ -771,8 +783,9 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
if (!dmaengine)
return;
ccp_dma_release(ccp);
ccp_dma_release_channels(ccp);
dma_async_device_unregister(dma_dev);
ccp_dma_release(ccp);
kmem_cache_destroy(ccp->dma_desc_cache);
kmem_cache_destroy(ccp->dma_cmd_cache);

View File

@ -43,9 +43,7 @@ static int framebuffer_probe(struct coreboot_device *dev)
fb->green_mask_pos == formats[i].green.offset &&
fb->green_mask_size == formats[i].green.length &&
fb->blue_mask_pos == formats[i].blue.offset &&
fb->blue_mask_size == formats[i].blue.length &&
fb->reserved_mask_pos == formats[i].transp.offset &&
fb->reserved_mask_size == formats[i].transp.length)
fb->blue_mask_size == formats[i].blue.length)
pdata.format = formats[i].name;
}
if (!pdata.format)

View File

@ -304,7 +304,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc = &port->gc;
gc->of_node = np;
gc->parent = dev;
gc->label = "vf610-gpio";
gc->label = dev_name(dev);
gc->ngpio = VF610_GPIO_PER_PORT;
gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT;

View File

@ -1206,12 +1206,14 @@ static int dm_resume(void *handle)
list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
aconnector = to_amdgpu_dm_connector(connector);
if (!aconnector->dc_link)
continue;
/*
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
if (aconnector->dc_link &&
aconnector->dc_link->type == dc_connection_mst_branch)
if (aconnector->dc_link->type == dc_connection_mst_branch)
continue;
mutex_lock(&aconnector->hpd_lock);

View File

@ -426,7 +426,11 @@ static int __init stdpxxxx_ge_b850v3_init(void)
if (ret)
return ret;
return i2c_add_driver(&stdp2690_ge_b850v3_fw_driver);
ret = i2c_add_driver(&stdp2690_ge_b850v3_fw_driver);
if (ret)
i2c_del_driver(&stdp4028_ge_b850v3_fw_driver);
return ret;
}
module_init(stdpxxxx_ge_b850v3_init);

View File

@ -445,8 +445,9 @@ mode_fixup(struct drm_atomic_state *state)
encoder = new_conn_state->best_encoder;
funcs = encoder->helper_private;
ret = drm_bridge_mode_fixup(encoder->bridge, &new_crtc_state->mode,
&new_crtc_state->adjusted_mode);
ret = drm_bridge_chain_mode_fixup(encoder->bridge,
&new_crtc_state->mode,
&new_crtc_state->adjusted_mode);
if (!ret) {
DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
return -EINVAL;
@ -511,7 +512,7 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
return ret;
}
ret = drm_bridge_mode_valid(encoder->bridge, mode);
ret = drm_bridge_chain_mode_valid(encoder->bridge, mode);
if (ret != MODE_OK) {
DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
return ret;
@ -588,6 +589,7 @@ mode_valid(struct drm_atomic_state *state)
* &drm_crtc_state.connectors_changed is set when a connector is added or
* removed from the crtc. &drm_crtc_state.active_changed is set when
* &drm_crtc_state.active changes, which is used for DPMS.
* &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
* See also: drm_atomic_crtc_needs_modeset()
*
* IMPORTANT:
@ -654,6 +656,11 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
return -EINVAL;
}
if (drm_dev_has_vblank(dev))
new_crtc_state->no_vblank = false;
else
new_crtc_state->no_vblank = true;
}
ret = handle_conflicting_encoders(state, false);
@ -1030,7 +1037,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
* Each encoder has at most one connector (since we always steal
* it away), so we won't call disable hooks twice.
*/
drm_atomic_bridge_disable(encoder->bridge, old_state);
drm_atomic_bridge_chain_disable(encoder->bridge, old_state);
/* Right function depends upon target state. */
if (funcs) {
@ -1044,7 +1051,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
drm_atomic_bridge_post_disable(encoder->bridge, old_state);
drm_atomic_bridge_chain_post_disable(encoder->bridge,
old_state);
}
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
@ -1225,7 +1233,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->mode_set(encoder, mode, adjusted_mode);
}
drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
drm_bridge_chain_mode_set(encoder->bridge, mode,
adjusted_mode);
}
}
@ -1342,7 +1351,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
* Each encoder has at most one connector (since we always steal
* it away), so we won't call enable hooks twice.
*/
drm_atomic_bridge_pre_enable(encoder->bridge, old_state);
drm_atomic_bridge_chain_pre_enable(encoder->bridge, old_state);
if (funcs) {
if (funcs->atomic_enable)
@ -1353,7 +1362,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
funcs->commit(encoder);
}
drm_atomic_bridge_enable(encoder->bridge, old_state);
drm_atomic_bridge_chain_enable(encoder->bridge, old_state);
}
drm_atomic_helper_commit_writebacks(dev, old_state);
@ -2202,7 +2211,9 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
* when a job is queued, and any change to the pipeline that does not touch the
* connector is leading to timeouts when calling
* drm_atomic_helper_wait_for_vblanks() or
* drm_atomic_helper_wait_for_flip_done().
* drm_atomic_helper_wait_for_flip_done(). In addition to writeback
* connectors, this function can also fake VBLANK events for CRTCs without
* VBLANK interrupt.
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.

View File

@ -172,8 +172,8 @@ void drm_bridge_detach(struct drm_bridge *bridge)
*/
/**
* drm_bridge_mode_fixup - fixup proposed mode for all bridges in the
* encoder chain
* drm_bridge_chain_mode_fixup - fixup proposed mode for all bridges in the
* encoder chain
* @bridge: bridge control structure
* @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge
@ -186,9 +186,9 @@ void drm_bridge_detach(struct drm_bridge *bridge)
* RETURNS:
* true on success, false on failure
*/
bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
bool ret = true;
@ -198,15 +198,16 @@ bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
if (bridge->funcs->mode_fixup)
ret = bridge->funcs->mode_fixup(bridge, mode, adjusted_mode);
ret = ret && drm_bridge_mode_fixup(bridge->next, mode, adjusted_mode);
ret = ret && drm_bridge_chain_mode_fixup(bridge->next, mode,
adjusted_mode);
return ret;
}
EXPORT_SYMBOL(drm_bridge_mode_fixup);
EXPORT_SYMBOL(drm_bridge_chain_mode_fixup);
/**
* drm_bridge_mode_valid - validate the mode against all bridges in the
* encoder chain.
* drm_bridge_chain_mode_valid - validate the mode against all bridges in the
* encoder chain.
* @bridge: bridge control structure
* @mode: desired mode to be validated
*
@ -219,8 +220,9 @@ EXPORT_SYMBOL(drm_bridge_mode_fixup);
* RETURNS:
* MODE_OK on success, drm_mode_status Enum error code on failure
*/
enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
enum drm_mode_status
drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
enum drm_mode_status ret = MODE_OK;
@ -233,12 +235,12 @@ enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
if (ret != MODE_OK)
return ret;
return drm_bridge_mode_valid(bridge->next, mode);
return drm_bridge_chain_mode_valid(bridge->next, mode);
}
EXPORT_SYMBOL(drm_bridge_mode_valid);
EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
/**
* drm_bridge_disable - disables all bridges in the encoder chain
* drm_bridge_chain_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.disable op for all the bridges in the encoder
@ -247,20 +249,21 @@ EXPORT_SYMBOL(drm_bridge_mode_valid);
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_bridge_disable(struct drm_bridge *bridge)
void drm_bridge_chain_disable(struct drm_bridge *bridge)
{
if (!bridge)
return;
drm_bridge_disable(bridge->next);
drm_bridge_chain_disable(bridge->next);
if (bridge->funcs->disable)
bridge->funcs->disable(bridge);
}
EXPORT_SYMBOL(drm_bridge_disable);
EXPORT_SYMBOL(drm_bridge_chain_disable);
/**
* drm_bridge_post_disable - cleans up after disabling all bridges in the encoder chain
* drm_bridge_chain_post_disable - cleans up after disabling all bridges in the
* encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.post_disable op for all the bridges in the
@ -269,7 +272,7 @@ EXPORT_SYMBOL(drm_bridge_disable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_bridge_post_disable(struct drm_bridge *bridge)
void drm_bridge_chain_post_disable(struct drm_bridge *bridge)
{
if (!bridge)
return;
@ -277,25 +280,25 @@ void drm_bridge_post_disable(struct drm_bridge *bridge)
if (bridge->funcs->post_disable)
bridge->funcs->post_disable(bridge);
drm_bridge_post_disable(bridge->next);
drm_bridge_chain_post_disable(bridge->next);
}
EXPORT_SYMBOL(drm_bridge_post_disable);
EXPORT_SYMBOL(drm_bridge_chain_post_disable);
/**
* drm_bridge_mode_set - set proposed mode for all bridges in the
* encoder chain
* drm_bridge_chain_mode_set - set proposed mode for all bridges in the
* encoder chain
* @bridge: bridge control structure
* @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge
* @mode: desired mode to be set for the encoder chain
* @adjusted_mode: updated mode that works for this encoder chain
*
* Calls &drm_bridge_funcs.mode_set op for all the bridges in the
* encoder chain, starting from the first bridge to the last.
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
if (!bridge)
return;
@ -303,13 +306,13 @@ void drm_bridge_mode_set(struct drm_bridge *bridge,
if (bridge->funcs->mode_set)
bridge->funcs->mode_set(bridge, mode, adjusted_mode);
drm_bridge_mode_set(bridge->next, mode, adjusted_mode);
drm_bridge_chain_mode_set(bridge->next, mode, adjusted_mode);
}
EXPORT_SYMBOL(drm_bridge_mode_set);
EXPORT_SYMBOL(drm_bridge_chain_mode_set);
/**
* drm_bridge_pre_enable - prepares for enabling all
* bridges in the encoder chain
* drm_bridge_chain_pre_enable - prepares for enabling all bridges in the
* encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.pre_enable op for all the bridges in the encoder
@ -318,20 +321,20 @@ EXPORT_SYMBOL(drm_bridge_mode_set);
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_bridge_pre_enable(struct drm_bridge *bridge)
void drm_bridge_chain_pre_enable(struct drm_bridge *bridge)
{
if (!bridge)
return;
drm_bridge_pre_enable(bridge->next);
drm_bridge_chain_pre_enable(bridge->next);
if (bridge->funcs->pre_enable)
bridge->funcs->pre_enable(bridge);
}
EXPORT_SYMBOL(drm_bridge_pre_enable);
EXPORT_SYMBOL(drm_bridge_chain_pre_enable);
/**
* drm_bridge_enable - enables all bridges in the encoder chain
* drm_bridge_chain_enable - enables all bridges in the encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.enable op for all the bridges in the encoder
@ -340,7 +343,7 @@ EXPORT_SYMBOL(drm_bridge_pre_enable);
*
* Note that the bridge passed should be the one closest to the encoder
*/
void drm_bridge_enable(struct drm_bridge *bridge)
void drm_bridge_chain_enable(struct drm_bridge *bridge)
{
if (!bridge)
return;
@ -348,12 +351,12 @@ void drm_bridge_enable(struct drm_bridge *bridge)
if (bridge->funcs->enable)
bridge->funcs->enable(bridge);
drm_bridge_enable(bridge->next);
drm_bridge_chain_enable(bridge->next);
}
EXPORT_SYMBOL(drm_bridge_enable);
EXPORT_SYMBOL(drm_bridge_chain_enable);
/**
* drm_atomic_bridge_disable - disables all bridges in the encoder chain
* drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
* @state: atomic state being committed
*
@ -364,24 +367,24 @@ EXPORT_SYMBOL(drm_bridge_enable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_disable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
if (!bridge)
return;
drm_atomic_bridge_disable(bridge->next, state);
drm_atomic_bridge_chain_disable(bridge->next, state);
if (bridge->funcs->atomic_disable)
bridge->funcs->atomic_disable(bridge, state);
else if (bridge->funcs->disable)
bridge->funcs->disable(bridge);
}
EXPORT_SYMBOL(drm_atomic_bridge_disable);
EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
/**
* drm_atomic_bridge_post_disable - cleans up after disabling all bridges in the
* encoder chain
* drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
* in the encoder chain
* @bridge: bridge control structure
* @state: atomic state being committed
*
@ -392,8 +395,8 @@ EXPORT_SYMBOL(drm_atomic_bridge_disable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_post_disable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
if (!bridge)
return;
@ -403,13 +406,13 @@ void drm_atomic_bridge_post_disable(struct drm_bridge *bridge,
else if (bridge->funcs->post_disable)
bridge->funcs->post_disable(bridge);
drm_atomic_bridge_post_disable(bridge->next, state);
drm_atomic_bridge_chain_post_disable(bridge->next, state);
}
EXPORT_SYMBOL(drm_atomic_bridge_post_disable);
EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
/**
* drm_atomic_bridge_pre_enable - prepares for enabling all bridges in the
* encoder chain
* drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
* the encoder chain
* @bridge: bridge control structure
* @state: atomic state being committed
*
@ -420,23 +423,23 @@ EXPORT_SYMBOL(drm_atomic_bridge_post_disable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_pre_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
if (!bridge)
return;
drm_atomic_bridge_pre_enable(bridge->next, state);
drm_atomic_bridge_chain_pre_enable(bridge->next, state);
if (bridge->funcs->atomic_pre_enable)
bridge->funcs->atomic_pre_enable(bridge, state);
else if (bridge->funcs->pre_enable)
bridge->funcs->pre_enable(bridge);
}
EXPORT_SYMBOL(drm_atomic_bridge_pre_enable);
EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
/**
* drm_atomic_bridge_enable - enables all bridges in the encoder chain
* drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
* @bridge: bridge control structure
* @state: atomic state being committed
*
@ -447,8 +450,8 @@ EXPORT_SYMBOL(drm_atomic_bridge_pre_enable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
if (!bridge)
return;
@ -458,9 +461,9 @@ void drm_atomic_bridge_enable(struct drm_bridge *bridge,
else if (bridge->funcs->enable)
bridge->funcs->enable(bridge);
drm_atomic_bridge_enable(bridge->next, state);
drm_atomic_bridge_chain_enable(bridge->next, state);
}
EXPORT_SYMBOL(drm_atomic_bridge_enable);
EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
#ifdef CONFIG_OF
/**

View File

@ -170,7 +170,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
struct drm_bridge *next;
while (bridge) {
next = bridge->next;
next = drm_bridge_get_next_bridge(bridge);
drm_bridge_detach(bridge);
bridge = next;
}

View File

@ -178,6 +178,10 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
#ifdef __BIG_ENDIAN
{ .format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
#endif
{ .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },

View File

@ -1092,6 +1092,58 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
}
EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness);
/**
* mipi_dsi_dcs_set_display_brightness_large() - sets the 16-bit brightness value
* of the display
* @dsi: DSI peripheral device
* @brightness: brightness value
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi,
u16 brightness)
{
u8 payload[2] = { brightness >> 8, brightness & 0xff };
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
payload, sizeof(payload));
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness_large);
/**
* mipi_dsi_dcs_get_display_brightness_large() - gets the current 16-bit
* brightness value of the display
* @dsi: DSI peripheral device
* @brightness: brightness value
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
u16 *brightness)
{
u8 brightness_be[2];
ssize_t err;
err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
brightness_be, sizeof(brightness_be));
if (err <= 0) {
if (err == 0)
err = -ENODATA;
return err;
}
*brightness = (brightness_be[0] << 8) | brightness_be[1];
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness_large);
static int mipi_dsi_drv_probe(struct device *dev)
{
struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);

View File

@ -278,6 +278,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Lenovo IdeaPad Duet 3 10IGL5 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Book X90F / X91F / X91L */
.matches = {
/* Non exact match to match all versions */

View File

@ -112,7 +112,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
continue;
}
ret = drm_bridge_mode_valid(encoder->bridge, mode);
ret = drm_bridge_chain_mode_valid(encoder->bridge, mode);
if (ret != MODE_OK) {
/* There is also no point in continuing for crtc check
* here. */

Some files were not shown because too many files have changed in this diff Show More