f4b76e8165
https://source.android.com/docs/security/bulletin/2023-04-01 CVE-2022-4696 CVE-2023-20941 * tag 'ASB-2023-04-05_11-5.4' of https://android.googlesource.com/kernel/common: UPSTREAM: ext4: fix kernel BUG in 'ext4_write_inline_data_end()' UPSTREAM: hid: bigben_probe(): validate report count UPSTREAM: HID: bigben: use spinlock to safely schedule workers BACKPORT: of: base: Skip CPU nodes with "fail"/"fail-..." status UPSTREAM: HID: bigben_worker() remove unneeded check on report_field UPSTREAM: HID: bigben: use spinlock to protect concurrent accesses UPSTREAM: hwrng: virtio - add an internal buffer UPSTREAM: ext4: fix another off-by-one fsmap error on 1k block filesystems UPSTREAM: ext4: refuse to create ea block when umounted UPSTREAM: ext4: optimize ea_inode block expansion UPSTREAM: ext4: allocate extended attribute value in vmalloc area BACKPORT: FROMGIT: cgroup: Use separate src/dst nodes when preloading css_sets for migration Revert "iommu: Add gfp parameter to iommu_ops::map" Revert "iommu/amd: Pass gfp flags to iommu_map_page() in amd_iommu_map()" Revert "RDMA/usnic: use iommu_map_atomic() under spin_lock()" Linux 5.4.233 bpf: add missing header file include Revert "net/sched: taprio: make qdisc_leaf() see the per-netdev-queue pfifo child qdiscs" ext4: Fix function prototype mismatch for ext4_feat_ktype wifi: mwifiex: Add missing compatible string for SD8787 uaccess: Add speculation barrier to copy_from_user() mac80211: mesh: embedd mesh_paths and mpp_paths into ieee80211_if_mesh drm/i915/gvt: fix double free bug in split_2MB_gtt_entry alarmtimer: Prevent starvation by small intervals and SIG_IGN powerpc: dts: t208x: Disable 10G on MAC1 and MAC2 can: kvaser_usb: hydra: help gcc-13 to figure out cmd_len KVM: VMX: Execute IBPB on emulated VM-exit when guest has IBRS KVM: x86: Fail emulation during EMULTYPE_SKIP on any exception random: always mix cycle counter in add_latent_entropy() powerpc: dts: t208x: Mark MAC1 and MAC2 as 10G wifi: rtl8xxxu: gen2: Turn on the rate control drm/etnaviv: don't truncate physical page address drm: etnaviv: fix common struct sg_table related issues scatterlist: add generic wrappers for iterating over sgtable objects dma-mapping: add generic helpers for mapping sgtable objects Linux 5.4.232 iommu/amd: Pass gfp flags to iommu_map_page() in amd_iommu_map() net: sched: sch: Fix off by one in htb_activate_prios() ASoC: SOF: Intel: hda-dai: fix possible stream_tag leak nilfs2: fix underflow in second superblock position calculations kvm: initialize all of the kvm_debugregs structure before sending it to userspace i40e: Add checking for null for nlmsg_find_attr() ipv6: Fix tcp socket connection with DSCP. ipv6: Fix datagram socket connection with DSCP. ixgbe: add double of VLAN header when computing the max MTU net: mpls: fix stale pointer if allocation fails during device rename net: stmmac: Restrict warning on disabling DMA store and fwd mode bnxt_en: Fix mqprio and XDP ring checking logic net: stmmac: fix order of dwmac5 FlexPPS parametrization sequence net/usb: kalmia: Don't pass act_len in usb_bulk_msg error path dccp/tcp: Avoid negative sk_forward_alloc by ipv6_pinfo.pktoptions. sctp: sctp_sock_filter(): avoid list_entry() on possibly empty list net: bgmac: fix BCM5358 support by setting correct flags i40e: add double of VLAN header when computing the max MTU ixgbe: allow to increase MTU to 3K with XDP enabled revert "squashfs: harden sanity check in squashfs_read_xattr_id_table" net: Fix unwanted sign extension in netdev_stats_to_stats64() Revert "mm: Always release pages to the buddy allocator in memblock_free_late()." hugetlb: check for undefined shift on 32 bit architectures sched/psi: Fix use-after-free in ep_remove_wait_queue() ALSA: hda/realtek - fixed wrong gpio assigned ALSA: hda/conexant: add a new hda codec SN6180 mmc: mmc_spi: fix error handling in mmc_spi_probe() mmc: sdio: fix possible resource leaks in some error paths ipv4: Fix incorrect route flushing when source address is deleted Revert "ipv4: Fix incorrect route flushing when source address is deleted" xfs: sync lazy sb accounting on quiesce of read-only mounts xfs: prevent UAF in xfs_log_item_in_current_chkpt xfs: fix the forward progress assertion in xfs_iwalk_run_callbacks xfs: ensure inobt record walks always make forward progress xfs: fix missing CoW blocks writeback conversion retry xfs: only relog deferred intent items if free space in the log gets low xfs: expose the log push threshold xfs: periodically relog deferred intent items xfs: change the order in which child and parent defer ops are finished xfs: fix an incore inode UAF in xfs_bui_recover xfs: clean up xfs_bui_item_recover iget/trans_alloc/ilock ordering xfs: clean up bmap intent item recovery checking xfs: xfs_defer_capture should absorb remaining transaction reservation xfs: xfs_defer_capture should absorb remaining block reservations xfs: proper replay of deferred ops queued during log recovery xfs: fix finobt btree block recovery ordering xfs: log new intent items created as part of finishing recovered intent items xfs: refactor xfs_defer_finish_noroll xfs: turn dfp_intent into a xfs_log_item xfs: merge the ->diff_items defer op into ->create_intent xfs: merge the ->log_item defer op into ->create_intent xfs: factor out a xfs_defer_create_intent helper xfs: remove the xfs_inode_log_item_t typedef xfs: remove the xfs_efd_log_item_t typedef xfs: remove the xfs_efi_log_item_t typedef netfilter: nft_tproxy: restrict to prerouting hook btrfs: free device in btrfs_close_devices for a single device filesystem aio: fix mremap after fork null-deref nvme-fc: fix a missing queue put in nvmet_fc_ls_create_association s390/decompressor: specify __decompress() buf len to avoid overflow net: sched: sch: Bounds check priority net: stmmac: do not stop RX_CLK in Rx LPI state for qcs404 SoC net/rose: Fix to not accept on connected socket tools/virtio: fix the vringh test for virtio ring changes ASoC: cs42l56: fix DT probe selftests/bpf: Verify copy_register_state() preserves parent/live fields migrate: hugetlb: check for hugetlb shared PMD in node migration bpf: Always return target ifindex in bpf_fib_lookup nvme-pci: Move enumeration by class to be last in the table arm64: dts: meson-axg: Make mmc host controller interrupts level-sensitive arm64: dts: meson-g12-common: Make mmc host controller interrupts level-sensitive arm64: dts: meson-gx: Make mmc host controller interrupts level-sensitive riscv: Fixup race condition on PG_dcache_clean in flush_icache_pte ceph: flush cap releases when the session is flushed usb: typec: altmodes/displayport: Fix probe pin assign check usb: core: add quirk for Alcor Link AK9563 smartcard reader net: USB: Fix wrong-direction WARNING in plusb.c pinctrl: intel: Restore the pins that used to be in Direct IRQ mode pinctrl: single: fix potential NULL dereference pinctrl: aspeed: Fix confusing types in return value ALSA: pci: lx6464es: fix a debug loop selftests: forwarding: lib: quote the sysctl values rds: rds_rm_zerocopy_callback() use list_first_entry() ice: Do not use WQ_MEM_RECLAIM flag for workqueue ionic: clean interrupt before enabling queue to avoid credit race net: phy: meson-gxl: use MMD access dummy stubs for GXL, internal PHY bonding: fix error checking in bond_debug_reregister() xfrm: fix bug with DSCP copy to v6 from v4 tunnel RDMA/usnic: use iommu_map_atomic() under spin_lock() iommu: Add gfp parameter to iommu_ops::map IB/IPoIB: Fix legacy IPoIB due to wrong number of queues IB/hfi1: Restore allocated resources on failed copyout can: j1939: do not wait 250 ms if the same addr was already claimed tracing: Fix poll() and select() do not work on per_cpu trace_pipe and trace_pipe_raw ALSA: emux: Avoid potential array out-of-bound in snd_emux_xg_control() btrfs: zlib: zero-initialize zlib workspace btrfs: limit device extents to the device size iio:adc:twl6030: Enable measurement of VAC wifi: brcmfmac: Check the count value of channel spec to prevent out-of-bounds reads f2fs: fix to do sanity check on i_extra_isize in is_alive() fbdev: smscufx: fix error handling code in ufx_usb_probe powerpc/imc-pmu: Revert nest_init_lock to being a mutex serial: 8250_dma: Fix DMA Rx rearm race serial: 8250_dma: Fix DMA Rx completion race xprtrdma: Fix regbuf data not freed in rpcrdma_req_create() mm: swap: properly update readahead statistics in unuse_pte_range() nvmem: core: fix cell removal on error Squashfs: fix handling and sanity checking of xattr_ids count mm/swapfile: add cond_resched() in get_swap_pages() fpga: stratix10-soc: Fix return value check in s10_ops_write_init() mm: hugetlb: proc: check for hugetlb shared PMD in /proc/PID/smaps riscv: disable generation of unwind tables parisc: Wire up PTRACE_GETREGS/PTRACE_SETREGS for compat case parisc: Fix return code of pdc_iodc_print() iio:adc:twl6030: Enable measurements of VUSB, VBAT and others iio: adc: berlin2-adc: Add missing of_node_put() in error path iio: hid: fix the retval in accel_3d_capture_sample efi: Accept version 2 of memory attributes table watchdog: diag288_wdt: fix __diag288() inline assembly watchdog: diag288_wdt: do not use stack buffers for hardware data fbcon: Check font dimension limits Input: i8042 - add Clevo PCX0DX to i8042 quirk table Input: i8042 - add TUXEDO devices to i8042 quirk tables Input: i8042 - merge quirk tables Input: i8042 - move __initconst to fix code styling warning vc_screen: move load of struct vc_data pointer in vcs_read() to avoid UAF usb: gadget: f_fs: Fix unbalanced spinlock in __ffs_ep0_queue_wait usb: dwc3: qcom: enable vbus override when in OTG dr-mode usb: dwc3: dwc3-qcom: Fix typo in the dwc3 vbus override API iio: adc: stm32-dfsdm: fill module aliases net/x25: Fix to not accept on connected socket i2c: rk3x: fix a bunch of kernel-doc warnings scsi: iscsi_tcp: Fix UAF during login when accessing the shost ipaddress scsi: target: core: Fix warning on RT kernels efi: fix potential NULL deref in efi_mem_reserve_persistent net: openvswitch: fix flow memory leak in ovs_flow_cmd_new virtio-net: Keep stop() to follow mirror sequence of open() selftests: net: udpgso_bench_tx: Cater for pending datagrams zerocopy benchmarking selftests: net: udpgso_bench: Fix racing bug between the rx/tx programs selftests: net: udpgso_bench_rx/tx: Stop when wrong CLI args are provided selftests: net: udpgso_bench_rx: Fix 'used uninitialized' compiler warning ata: libata: Fix sata_down_spd_limit() when no link speed is reported can: j1939: fix errant WARN_ON_ONCE in j1939_session_deactivate net: phy: meson-gxl: Add generic dummy stubs for MMD register access squashfs: harden sanity check in squashfs_read_xattr_id_table netfilter: br_netfilter: disable sabotage_in hook after first suppression netrom: Fix use-after-free caused by accept on already connected socket fix "direction" argument of iov_iter_kvec() fix iov_iter_bvec() "direction" argument WRITE is "data source", not destination... scsi: Revert "scsi: core: map PQ=1, PDT=other values to SCSI_SCAN_TARGET_PRESENT" arm64: dts: imx8mm: Fix pad control for UART1_DTE_RX ALSA: hda/via: Avoid potential array out-of-bound in add_secret_dac_path() ASoC: Intel: bytcr_rt5651: Drop reference count of ACPI device after use bus: sunxi-rsb: Fix error handling in sunxi_rsb_init() firewire: fix memory leak for payload of request subaction to IEC 61883-1 FCP region Linux 5.4.231 Revert "xprtrdma: Fix regbuf data not freed in rpcrdma_req_create()" usb: host: xhci-plat: add wakeup entry at sysfs Bluetooth: fix null ptr deref on hci_sync_conn_complete_evt ipv6: ensure sane device mtu in tunnels exit: Use READ_ONCE() for all oops/warn limit reads docs: Fix path paste-o for /sys/kernel/warn_count panic: Expose "warn_count" to sysfs panic: Introduce warn_limit panic: Consolidate open-coded panic_on_warn checks exit: Allow oops_limit to be disabled exit: Expose "oops_count" to sysfs exit: Put an upper limit on how often we can oops ia64: make IA64_MCA_RECOVERY bool instead of tristate csky: Fix function name in csky_alignment() and die() h8300: Fix build errors from do_exit() to make_task_dead() transition hexagon: Fix function name in die() objtool: Add a missing comma to avoid string concatenation exit: Add and use make_task_dead. mm: kasan: do not panic if both panic_on_warn and kasan_multishot set panic: unset panic_on_warn inside panic() sysctl: add a new register_sysctl_init() interface dmaengine: imx-sdma: Fix a possible memory leak in sdma_transfer_init blk-cgroup: fix missing pd_online_fn() while activating policy bpf: Skip task with pid=1 in send_signal_common() ARM: dts: imx: Fix pca9547 i2c-mux node name x86/asm: Fix an assembler warning with current binutils clk: Fix pointer casting to prevent oops in devm_clk_release() perf/x86/amd: fix potential integer overflow on shift of a int netfilter: conntrack: unify established states for SCTP paths x86/i8259: Mark legacy PIC interrupts with IRQ_LEVEL block: fix and cleanup bio_check_ro nfsd: Ensure knfsd shuts down when the "nfsd" pseudofs is unmounted Revert "Input: synaptics - switch touchpad on HP Laptop 15-da3001TU to RMI mode" net: mdio-mux-meson-g12a: force internal PHY off on mux switch net: xgene: Move shared header file into include/linux net/phy/mdio-i2c: Move header file to include/linux/mdio net/tg3: resolve deadlock in tg3_reset_task() during EEH thermal: intel: int340x: Add locking to int340x_thermal_get_trip_type() net: ravb: Fix possible hang if RIS2_QFF1 happen sctp: fail if no bound addresses can be used for a given scope net/sched: sch_taprio: do not schedule in taprio_reset() netrom: Fix use-after-free of a listening socket. netfilter: conntrack: fix vtag checks for ABORT/SHUTDOWN_COMPLETE ipv4: prevent potential spectre v1 gadget in fib_metrics_match() ipv4: prevent potential spectre v1 gadget in ip_metrics_convert() netlink: annotate data races around sk_state netlink: annotate data races around dst_portid and dst_group netlink: annotate data races around nlk->portid netfilter: nft_set_rbtree: skip elements in transaction from garbage collection net: fix UaF in netns ops registration error path netlink: prevent potential spectre v1 gadgets EDAC/qcom: Do not pass llcc_driv_data as edac_device_ctl_info's pvt_info EDAC/device: Respect any driver-supplied workqueue polling value ARM: 9280/1: mm: fix warning on phys_addr_t to void pointer assignment thermal: intel: int340x: Protect trip temperature from concurrent updates KVM: x86/vmx: Do not skip segment attributes if unusable bit is set cifs: Fix oops due to uncleared server->smbd_conn in reconnect ftrace/scripts: Update the instructions for ftrace-bisect.sh trace_events_hist: add check for return value of 'create_hist_field' tracing: Make sure trace_printk() can output as soon as it can be used module: Don't wait for GOING modules scsi: hpsa: Fix allocation size for scsi_host_alloc() Bluetooth: hci_sync: cancel cmd_timer if hci_open failed Revert "Revert "xhci: Set HCD flag to defer primary roothub registration"" fs: reiserfs: remove useless new_opts in reiserfs_remount netfilter: conntrack: do not renew entry stuck in tcp SYN_SENT state Revert "selftests/bpf: check null propagation only neither reg is PTR_TO_BTF_ID" mmc: sdhci-esdhc-imx: correct the tuning start tap and step setting mmc: sdhci-esdhc-imx: disable the CMD CRC check for standard tuning mmc: sdhci-esdhc-imx: clear pending interrupt and halt cqhci lockref: stop doing cpu_relax in the cmpxchg loop platform/x86: asus-nb-wmi: Add alternate mapping for KEY_SCREENLOCK platform/x86: touchscreen_dmi: Add info for the CSL Panther Tab HD scsi: hisi_sas: Set a port invalid only if there are no devices attached when refreshing port id KVM: s390: interrupt: use READ_ONCE() before cmpxchg() spi: spidev: remove debug messages that access spidev->spi without locking ASoC: fsl-asoc-card: Fix naming of AC'97 CODEC widgets ASoC: fsl_ssi: Rename AC'97 streams to avoid collisions with AC'97 CODEC cpufreq: armada-37xx: stop using 0 as NULL pointer s390/debug: add _ASM_S390_ prefix to header guard drm: Add orientation quirk for Lenovo ideapad D330-10IGL ASoC: fsl_micfil: Correct the number of steps on SX controls cpufreq: Add Tegra234 to cpufreq-dt-platdev blocklist tcp: fix rate_app_limited to default to 1 net: dsa: microchip: ksz9477: port map correction in ALU table entry register driver core: Fix test_async_probe_init saves device in wrong array w1: fix WARNING after calling w1_process() w1: fix deadloop in __w1_remove_master_device() tcp: avoid the lookup process failing to get sk in ehash table dmaengine: xilinx_dma: call of_node_put() when breaking out of for_each_child_of_node() dmaengine: xilinx_dma: Fix devm_platform_ioremap_resource error handling dmaengine: xilinx_dma: use devm_platform_ioremap_resource() HID: betop: check shape of output reports net: macb: fix PTP TX timestamp failure due to packet padding dmaengine: Fix double increment of client_count in dma_chan_get() drm/panfrost: fix GENERIC_ATOMIC64 dependency net: mlx5: eliminate anonymous module_init & module_exit usb: gadget: f_fs: Ensure ep0req is dequeued before free_request usb: gadget: f_fs: Prevent race during ffs_ep0_queue_wait HID: revert CHERRY_MOUSE_000C quirk net: stmmac: fix invalid call to mdiobus_get_phy() HID: check empty report_list in bigben_probe() HID: check empty report_list in hid_validate_values() net: mdio: validate parameter addr in mdiobus_get_phy() net: usb: sr9700: Handle negative len l2tp: Don't sleep and disable BH under writer-side sk_callback_lock l2tp: Serialize access to sk_user_data with sk_callback_lock net: fix a concurrency bug in l2tp_tunnel_register() net/sched: sch_taprio: fix possible use-after-free wifi: rndis_wlan: Prevent buffer overflow in rndis_query_oid gpio: mxc: Always set GPIOs used as interrupt source to INPUT mode net: wan: Add checks for NULL for utdm in undo_uhdlc_init and unmap_si_regs net: nfc: Fix use-after-free in local_cleanup() phy: rockchip-inno-usb2: Fix missing clk_disable_unprepare() in rockchip_usb2phy_power_on() bpf: Fix pointer-leak due to insufficient speculative store bypass mitigation amd-xgbe: Delay AN timeout during KR training amd-xgbe: TX Flow Ctrl Registers are h/w ver dependent affs: initialize fsdata in affs_truncate() IB/hfi1: Fix expected receive setup error exit issues IB/hfi1: Reserve user expected TIDs IB/hfi1: Reject a zero-length user expected buffer RDMA/core: Fix ib block iterator counter overflow tomoyo: fix broken dependency on *.conf.default EDAC/highbank: Fix memory leak in highbank_mc_probe() HID: intel_ish-hid: Add check for ishtp_dma_tx_map ARM: imx: add missing of_node_put() ARM: imx35: Retrieve the IIM base address from devicetree ARM: imx31: Retrieve the IIM base address from devicetree ARM: imx27: Retrieve the SYSCTRL base address from devicetree ARM: dts: imx6qdl-gw560x: Remove incorrect 'uart-has-rtscts' memory: mvebu-devbus: Fix missing clk_disable_unprepare in mvebu_devbus_probe() memory: atmel-sdramc: Fix missing clk_disable_unprepare in atmel_ramc_probe() clk: Provide new devm_clk helpers for prepared and enabled clocks clk: generalize devm_clk_get() a bit Linux 5.4.230 mm/khugepaged: fix collapse_pte_mapped_thp() to allow anon_vma x86/fpu: Use _Alignof to avoid undefined behavior in TYPE_ALIGN drm/amd/display: Fix COLOR_SPACE_YCBCR2020_TYPE matrix drm/amd/display: Fix set scaling doesn's work drm/i915: re-disable RC6p on Sandy Bridge gsmi: fix null-deref in gsmi_get_variable serial: atmel: fix incorrect baudrate setup dmaengine: tegra210-adma: fix global intr clear serial: pch_uart: Pass correct sg to dma_unmap_sg() dt-bindings: phy: g12a-usb3-pcie-phy: fix compatible string documentation usb-storage: apply IGNORE_UAS only for HIKSEMI MD202 on RTL9210 usb: gadget: f_ncm: fix potential NULL ptr deref in ncm_bitrate() usb: gadget: g_webcam: Send color matching descriptor per frame usb: typec: altmodes/displayport: Fix pin assignment calculation usb: typec: altmodes/displayport: Add pin assignment helper usb: host: ehci-fsl: Fix module alias USB: serial: cp210x: add SCALANCE LPE-9000 device id USB: gadgetfs: Fix race between mounting and unmounting cifs: do not include page data when checking signature btrfs: fix race between quota rescan and disable leading to NULL pointer deref mmc: sunxi-mmc: Fix clock refcount imbalance during unbind comedi: adv_pci1760: Fix PWM instruction handling usb: core: hub: disable autosuspend for TI TUSB8041 misc: fastrpc: Fix use-after-free race condition for maps misc: fastrpc: Don't remove map on creater_process and device_release USB: misc: iowarrior: fix up header size for USB_DEVICE_ID_CODEMERCS_IOW100 USB: serial: option: add Quectel EM05CN modem USB: serial: option: add Quectel EM05CN (SG) modem USB: serial: option: add Quectel EC200U modem USB: serial: option: add Quectel EM05-G (RS) modem USB: serial: option: add Quectel EM05-G (CS) modem USB: serial: option: add Quectel EM05-G (GR) modem prlimit: do_prlimit needs to have a speculation check xhci: Detect lpm incapable xHC USB3 roothub ports from ACPI tables usb: acpi: add helper to check port lpm capability using acpi _DSM xhci: Add a flag to disable USB3 lpm on a xhci root port level. xhci: Add update_hub_device override for PCI xHCI hosts xhci: Fix null pointer dereference when host dies usb: xhci: Check endpoint is valid before dereferencing it xhci-pci: set the dma max_seg_size ALSA: hda/realtek - Turn on power early drm/i915/gt: Reset twice efi: fix userspace infinite retry read efivars after EFI runtime services page fault nilfs2: fix general protection fault in nilfs_btree_insert() Add exception protection processing for vd in axi_chan_handle_err function wifi: brcmfmac: fix regression for Broadcom PCIe wifi devices f2fs: let's avoid panic if extent_tree is not created RDMA/srp: Move large values to a new enum for gcc13 net/ethtool/ioctl: return -EOPNOTSUPP if we have no phy stats selftests/bpf: check null propagation only neither reg is PTR_TO_BTF_ID pNFS/filelayout: Fix coalescing test for single DS Revert "net: add atomic_long_t to net_device_stats fields" Revert "PM/devfreq: governor: Add a private governor_data for governor" Linux 5.4.229 tipc: call tipc_lxc_xmit without holding node_read_lock ocfs2: fix freeing uninitialized resource on ocfs2_dlm_shutdown tipc: Add a missing case of TIPC_DIRECT_MSG type tty: serial: tegra: Handle RX transfer in PIO mode if DMA wasn't started tipc: fix use-after-free in tipc_disc_rcv() Revert "usb: ulpi: defer ulpi_register on ulpi_read_id timeout" mm: Always release pages to the buddy allocator in memblock_free_late(). efi: fix NULL-deref in init error path arm64: cmpxchg_double*: hazard against entire exchange variable arm64: atomics: remove LL/SC trampolines arm64: atomics: format whitespace consistently drm/virtio: Fix GEM handle creation UAF x86/resctrl: Fix task CLOSID/RMID update race x86/resctrl: Use task_curr() instead of task_struct->on_cpu to prevent unnecessary IPI iommu/mediatek-v1: Fix an error handling path in mtk_iommu_v1_probe() iommu/mediatek-v1: Add error handle for mtk_iommu_probe net/mlx5: Fix ptp max frequency adjustment range net/mlx5: Rename ptp clock info net/sched: act_mpls: Fix warning during failed attribute validation nfc: pn533: Wait for out_urb's completion in pn533_usb_send_frame() hvc/xen: lock console list traversal tipc: fix unexpected link reset due to discovery messages tipc: eliminate checking netns if node established tipc: improve throughput between nodes in netns regulator: da9211: Use irq handler when ready EDAC/device: Fix period calculation in edac_device_reset_delay_period() x86/boot: Avoid using Intel mnemonics in AT&T syntax asm powerpc/imc-pmu: Fix use of mutex in IRQs disabled section netfilter: ipset: Fix overflow before widen in the bitmap_ip_create() function. ext4: fix uninititialized value in 'ext4_evict_inode' ext4: fix use-after-free in ext4_orphan_cleanup ext4: lost matching-pair of trace in ext4_truncate ext4: fix bug_on in __es_tree_search caused by bad quota inode quota: Factor out setup of quota inode jbd2: use the correct print format usb: ulpi: defer ulpi_register on ulpi_read_id timeout wifi: wilc1000: sdio: fix module autoloading ipv6: raw: Deduct extension header length in rawv6_push_pending_frames ixgbe: fix pci device refcount leak platform/x86: sony-laptop: Don't turn off 0x153 keyboard backlight during probe drm/msm/adreno: Make adreno quirks not overwrite each other cifs: Fix uninitialized memory read for smb311 posix symlink create ALSA: hda/hdmi: Add a HP device 0x8715 to force connect list ALSA: pcm: Move rwsem lock inside snd_ctl_elem_read to prevent UAF net/ulp: prevent ULP without clone op from entering the LISTEN status s390/percpu: add READ_ONCE() to arch_this_cpu_to_op_simple() s390/kexec: fix ipl report address for kdump perf auxtrace: Fix address filter duplicate symbol selection docs: Fix the docs build with Sphinx 6.0 efi: tpm: Avoid READ_ONCE() for accessing the event log KVM: arm64: Fix S1PTW handling on RO memslots net: sched: disallow noqueue for qdisc classes driver core: Fix bus_type.match() error handling in __driver_attach() selftests: set the BUILD variable to absolute path selftests: Fix kselftest O=objdir build from cluttering top level objdir parisc: Align parisc MADV_XXX constants with all other architectures mbcache: Avoid nesting of cache->c_list_lock under bit locks hfs/hfsplus: avoid WARN_ON() for sanity check, use proper error handling hfs/hfsplus: use WARN_ON for sanity check ext4: don't allow journal inode to have encrypt flag riscv: uaccess: fix type of 0 variable on error in get_user() nfsd: fix handling of readdir in v4root vs. mount upcall timeout x86/bugs: Flush IBP in ib_prctl_set() ASoC: Intel: bytcr_rt5640: Add quirk for the Advantech MICA-071 tablet udf: Fix extension of the last extent in the file caif: fix memory leak in cfctrl_linkup_request() drm/i915: unpin on error in intel_vgpu_shadow_mm_pin() usb: rndis_host: Secure rndis_query check against int overflow drivers/net/bonding/bond_3ad: return when there's no aggregator perf tools: Fix resources leak in perf_data__open_dir() net: sched: cbq: dont intepret cls results when asked to drop net: sched: atm: dont intepret cls results when asked to drop RDMA/mlx5: Fix validation of max_rd_atomic caps for DC RDMA/uverbs: Silence shiftTooManyBitsSigned warning net: phy: xgmiitorgmii: Fix refcount leak in xgmiitorgmii_probe net: amd-xgbe: add missed tasklet_kill vhost: fix range used in translate_desc() nfc: Fix potential resource leaks qlcnic: prevent ->dcb use-after-free on qlcnic_dcb_enable() failure net: sched: fix memory leak in tcindex_set_parms net: hns3: add interrupts re-initialization while doing VF FLR nfsd: shut down the NFSv4 state objects before the filecache bpf: pull before calling skb_postpull_rcsum() SUNRPC: ensure the matching upcall is in-flight upon downcall ext4: fix deadlock due to mbcache entry corruption mbcache: automatically delete entries from cache on freeing ext4: fix race when reusing xattr blocks ext4: unindent codeblock in ext4_xattr_block_set() ext4: remove EA inode entry from mbcache on inode eviction mbcache: add functions to delete entry if unused mbcache: don't reclaim used entries ext4: use kmemdup() to replace kmalloc + memcpy fs: ext4: initialize fsdata in pagecache_write() ext4: use memcpy_to_page() in pagecache_write() mm/highmem: Lift memcpy_[to|from]_page to core ext4: correct inconsistent error msg in nojournal mode ext4: goto right label 'failed_mount3a' ravb: Fix "failed to switch device to config mode" message during unbind KVM: nVMX: Properly expose ENABLE_USR_WAIT_PAUSE control to L1 KVM: VMX: Fix the spelling of CPU_BASED_USE_TSC_OFFSETTING KVM: VMX: Rename NMI_PENDING to NMI_WINDOW KVM: VMX: Rename INTERRUPT_PENDING to INTERRUPT_WINDOW KVM: retpolines: x86: eliminate retpoline from vmx.c exit handlers KVM: x86: optimize more exit handlers in vmx.c perf probe: Fix to get the DW_AT_decl_file and DW_AT_call_file as unsinged data perf probe: Use dwarf_attr_integrate as generic DWARF attr accessor dm thin: resume even if in FAIL mode media: s5p-mfc: Fix in register read and write for H264 media: s5p-mfc: Clear workbit to handle error condition media: s5p-mfc: Fix to handle reference queue during finishing PM/devfreq: governor: Add a private governor_data for governor btrfs: replace strncpy() with strscpy() ext4: allocate extended attribute value in vmalloc area ext4: avoid unaccounted block allocation when expanding inode ext4: initialize quota before expanding inode in setproject ioctl ext4: fix inode leak in ext4_xattr_inode_create() on an error path ext4: avoid BUG_ON when creating xattrs ext4: fix error code return to user-space in ext4_get_branch() ext4: fix corruption when online resizing a 1K bigalloc fs ext4: fix delayed allocation bug in ext4_clu_mapped for bigalloc + inline ext4: init quota for 'old.inode' in 'ext4_rename' ext4: fix bug_on in __es_tree_search caused by bad boot loader inode ext4: fix reserved cluster accounting in __es_remove_extent() ext4: add helper to check quota inums ext4: add EXT4_IGET_BAD flag to prevent unexpected bad inode ext4: fix undefined behavior in bit shift for ext4_check_flag_values ext4: add inode table check in __ext4_get_inode_loc to aovid possible infinite loop drm/vmwgfx: Validate the box size for the snooped cursor drm/connector: send hotplug uevent on connector cleanup device_cgroup: Roll back to original exceptions after copy failure parisc: led: Fix potential null-ptr-deref in start_task() iommu/amd: Fix ivrs_acpihid cmdline parsing code crypto: n2 - add missing hash statesize PCI/sysfs: Fix double free in error path PCI: Fix pci_device_is_present() for VFs by checking PF ipmi: fix use after free in _ipmi_destroy_user() ima: Fix a potential NULL pointer access in ima_restore_measurement_list mtd: spi-nor: Check for zero erase size in spi_nor_find_best_erase_type() ipmi: fix long wait in unload when IPMI disconnect efi: Add iMac Pro 2017 to uefi skip cert quirk md/bitmap: Fix bitmap chunk size overflow issues cifs: fix missing display of three mount options cifs: fix confusing debug message media: dvb-core: Fix UAF due to refcount races at releasing media: dvb-core: Fix double free in dvb_register_device() ARM: 9256/1: NWFPE: avoid compiler-generated __aeabi_uldivmod tracing: Fix infinite loop in tracing_read_pipe on overflowed print_trace_line tracing/hist: Fix wrong return value in parse_action_params() x86/microcode/intel: Do not retry microcode reloading on the APs tracing/hist: Fix out-of-bound write on 'action_data.var_ref_idx' dm cache: set needs_check flag after aborting metadata dm cache: Fix UAF in destroy() dm clone: Fix UAF in clone_dtr() dm integrity: Fix UAF in dm_integrity_dtr() dm thin: Fix UAF in run_timer_softirq() dm thin: Use last transaction's pmd->root when commit failed dm thin: Fix ABBA deadlock between shrink_slab and dm_pool_abort_metadata dm cache: Fix ABBA deadlock between shrink_slab and dm_cache_metadata_abort binfmt: Fix error return code in load_elf_fdpic_binary() binfmt: Move install_exec_creds after setup_new_exec to match binfmt_elf cpufreq: Init completion before kobject_init_and_add() selftests: Use optional USERCFLAGS and USERLDFLAGS arm64: dts: qcom: sdm850-lenovo-yoga-c630: correct I2C12 pins drive strength ARM: ux500: do not directly dereference __iomem btrfs: fix resolving backrefs for inline extent followed by prealloc mmc: sdhci-sprd: Disable CLK_AUTO when the clock is less than 400K ktest.pl minconfig: Unset configs instead of just removing them kest.pl: Fix grub2 menu handling for rebooting soc: qcom: Select REMAP_MMIO for LLCC driver media: stv0288: use explicitly signed char net/af_packet: make sure to pull mac header net/af_packet: add VLAN support for AF_PACKET SOCK_RAW GSO SUNRPC: Don't leak netobj memory when gss_read_proxy_verf() fails tpm: tpm_tis: Add the missed acpi_put_table() to fix memory leak tpm: tpm_crb: Add the missed acpi_put_table() to fix memory leak mmc: vub300: fix warning - do not call blocking ops when !TASK_RUNNING f2fs: should put a page when checking the summary info mm, compaction: fix fast_isolate_around() to stay within boundaries md: fix a crash in mempool_free pnode: terminate at peers of source ALSA: line6: fix stack overflow in line6_midi_transmit ALSA: line6: correct midi status byte when receiving data from podxt ovl: Use ovl mounter's fsuid and fsgid in ovl_link() hfsplus: fix bug causing custom uid and gid being unable to be assigned with mount HID: plantronics: Additional PIDs for double volume key presses quirk HID: multitouch: fix Asus ExpertBook P2 P2451FA trackpoint powerpc/rtas: avoid scheduling in rtas_os_term() powerpc/rtas: avoid device tree lookups in rtas_os_term() objtool: Fix SEGFAULT nvme: fix the NVME_CMD_EFFECTS_CSE_MASK definition nvme: resync include/linux/nvme.h with nvmecli ata: ahci: Fix PCS quirk application for suspend nvme-pci: fix doorbell buffer value endianness cifs: fix oops during encryption media: dvbdev: fix refcnt bug media: dvbdev: fix build warning due to comments gcov: add support for checksum field regulator: core: fix deadlock on regulator enable iio: adc128s052: add proper .data members in adc128_of_match table iio: adc: ad_sigma_delta: do not use internal iio_dev lock reiserfs: Add missing calls to reiserfs_security_free() HID: wacom: Ensure bootloader PID is usable in hidraw mode usb: dwc3: core: defer probe on ulpi_read_id timeout ALSA: hda/hdmi: Add HP Device 0x8711 to force connect list ALSA: hda/realtek: Add quirk for Lenovo TianYi510Pro-14IOB pstore: Make sure CONFIG_PSTORE_PMSG selects CONFIG_RT_MUTEXES pstore: Switch pmsg_lock to an rt_mutex to avoid priority inversion ASoC: rt5670: Remove unbalanced pm_runtime_put() ASoC: rockchip: spdif: Add missing clk_disable_unprepare() in rk_spdif_runtime_resume() ASoC: wm8994: Fix potential deadlock ASoC: rockchip: pdm: Add missing clk_disable_unprepare() in rockchip_pdm_runtime_resume() ASoC: audio-graph-card: fix refcount leak of cpu_ep in __graph_for_each_link() ASoC: mediatek: mt8173-rt5650-rt5514: fix refcount leak in mt8173_rt5650_rt5514_dev_probe() ASoC: Intel: Skylake: Fix driver hang during shutdown ALSA: hda: add snd_hdac_stop_streams() helper ALSA/ASoC: hda: move/rename snd_hdac_ext_stop_streams to hdac_stream.c orangefs: Fix kmemleak in orangefs_{kernel,client}_debug_init() orangefs: Fix kmemleak in orangefs_prepare_debugfs_help_string() drm/sti: Fix return type of sti_{dvo,hda,hdmi}_connector_mode_valid() drm/fsl-dcu: Fix return type of fsl_dcu_drm_connector_mode_valid() hugetlbfs: fix null-ptr-deref in hugetlbfs_parse_param() clk: st: Fix memory leak in st_of_quadfs_setup() media: si470x: Fix use-after-free in si470x_int_in_callback() mmc: f-sdh30: Add quirks for broken timeout clock capability regulator: core: fix use_count leakage when handling boot-on blk-mq: fix possible memleak when register 'hctx' failed media: dvb-usb: fix memory leak in dvb_usb_adapter_init() media: dvbdev: adopts refcnt to avoid UAF media: dvb-frontends: fix leak of memory fw bpf: Prevent decl_tag from being referenced in func_proto arg ppp: associate skb with a device at tx mrp: introduce active flags to prevent UAF when applicant uninit net: add atomic_long_t to net_device_stats fields md/raid1: stop mdx_raid1 thread when raid1 array run failed drivers/md/md-bitmap: check the return value of md_bitmap_get_counter() drm/sti: Use drm_mode_copy() drm/rockchip: Use drm_mode_copy() s390/lcs: Fix return type of lcs_start_xmit() s390/netiucv: Fix return type of netiucv_tx() s390/ctcm: Fix return type of ctc{mp,}m_tx() igb: Do not free q_vector unless new one was allocated wifi: brcmfmac: Fix potential shift-out-of-bounds in brcmf_fw_alloc_request() hamradio: baycom_epp: Fix return type of baycom_send_packet() net: ethernet: ti: Fix return type of netcp_ndo_start_xmit() bpf: make sure skb->len != 0 when redirecting to a tunneling device ipmi: fix memleak when unload ipmi driver ASoC: codecs: rt298: Add quirk for KBL-R RVP platform wifi: ar5523: Fix use-after-free on ar5523_cmd() timed out wifi: ath9k: verify the expected usb_endpoints are present brcmfmac: return error when getting invalid max_flowrings from dongle drm/etnaviv: add missing quirks for GC300 hfs: fix OOB Read in __hfs_brec_find acct: fix potential integer overflow in encode_comp_t() nilfs2: fix shift-out-of-bounds/overflow in nilfs_sb2_bad_offset() ACPICA: Fix error code path in acpi_ds_call_control_method() fs: jfs: fix shift-out-of-bounds in dbDiscardAG udf: Avoid double brelse() in udf_rename() fs: jfs: fix shift-out-of-bounds in dbAllocAG binfmt_misc: fix shift-out-of-bounds in check_special_flags rcu: Fix __this_cpu_read() lockdep warning in rcu_force_quiescent_state() net: stream: purge sk_error_queue in sk_stream_kill_queues() myri10ge: Fix an error handling path in myri10ge_probe() rxrpc: Fix missing unlock in rxrpc_do_sendmsg() net_sched: reject TCF_EM_SIMPLE case for complex ematch module mailbox: zynq-ipi: fix error handling while device_register() fails skbuff: Account for tail adjustment during pull operations openvswitch: Fix flow lookup to use unmasked key rtc: mxc_v2: Add missing clk_disable_unprepare() r6040: Fix kmemleak in probe and remove nfc: pn533: Clear nfc_target before being used mISDN: hfcmulti: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave() mISDN: hfcpci: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave() mISDN: hfcsusb: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave() nfsd: under NFSv4.1, fix double svc_xprt_put on rpc_create failure NFSD: Add tracepoints to NFSD's duplicate reply cache nfsd: Define the file access mode enum for tracing rtc: pic32: Move devm_rtc_allocate_device earlier in pic32_rtc_probe() rtc: st-lpc: Add missing clk_disable_unprepare in st_rtc_probe() remoteproc: qcom_q6v5_pas: Fix missing of_node_put() in adsp_alloc_memory_region() remoteproc: sysmon: fix memory leak in qcom_add_sysmon_subdev() pwm: sifive: Call pwm_sifive_update_clock() while mutex is held selftests/powerpc: Fix resource leaks powerpc/hv-gpci: Fix hv_gpci event list powerpc/83xx/mpc832x_rdb: call platform_device_put() in error case in of_fsl_spi_probe() powerpc/perf: callchain validate kernel stack pointer bounds powerpc/xive: add missing iounmap() in error path in xive_spapr_populate_irq_data() cxl: Fix refcount leak in cxl_calc_capp_routing powerpc/52xx: Fix a resource leak in an error handling path macintosh/macio-adb: check the return value of ioremap() macintosh: fix possible memory leak in macio_add_one_device() iommu/fsl_pamu: Fix resource leak in fsl_pamu_probe() iommu/amd: Fix pci device refcount leak in ppr_notifier() rtc: pcf85063: Fix reading alarm rtc: snvs: Allow a time difference on clock register read include/uapi/linux/swab: Fix potentially missing __always_inline RDMA/siw: Fix pointer cast warning power: supply: fix null pointer dereferencing in power_supply_get_battery_info HSI: omap_ssi_core: Fix error handling in ssi_init() perf symbol: correction while adjusting symbol perf trace: Handle failure when trace point folder is missed perf trace: Use macro RAW_SYSCALL_ARGS_NUM to replace number perf trace: Add a strtoul() method to 'struct syscall_arg_fmt' perf trace: Allow associating scnprintf routines with well known arg names perf trace: Add the syscall_arg_fmt pointer to syscall_arg perf trace: Factor out the initialization of syscal_arg_fmt->scnprintf perf trace: Separate 'struct syscall_fmt' definition from syscall_fmts variable perf trace: Return error if a system call doesn't exist power: supply: fix residue sysfs file in error handle route of __power_supply_register() HSI: omap_ssi_core: fix possible memory leak in ssi_probe() HSI: omap_ssi_core: fix unbalanced pm_runtime_disable() fbdev: uvesafb: Fixes an error handling path in uvesafb_probe() fbdev: vermilion: decrease reference count in error path fbdev: via: Fix error in via_core_init() fbdev: pm2fb: fix missing pci_disable_device() fbdev: ssd1307fb: Drop optional dependency samples: vfio-mdev: Fix missing pci_disable_device() in mdpy_fb_probe() tracing/hist: Fix issue of losting command info in error_log usb: storage: Add check for kcalloc i2c: ismt: Fix an out-of-bounds bug in ismt_access() vme: Fix error not catched in fake_init() staging: rtl8192e: Fix potential use-after-free in rtllib_rx_Monitor() staging: rtl8192u: Fix use after free in ieee80211_rx() i2c: pxa-pci: fix missing pci_disable_device() on error in ce4100_i2c_probe chardev: fix error handling in cdev_device_add() mcb: mcb-parse: fix error handing in chameleon_parse_gdd() drivers: mcb: fix resource leak in mcb_probe() usb: gadget: f_hid: fix refcount leak on error path usb: gadget: f_hid: fix f_hidg lifetime vs cdev usb: gadget: f_hid: optional SETUP/SET_REPORT mode usb: roles: fix of node refcount leak in usb_role_switch_is_parent() counter: stm32-lptimer-cnt: fix the check on arr and cmp registers update cxl: fix possible null-ptr-deref in cxl_pci_init_afu|adapter() cxl: fix possible null-ptr-deref in cxl_guest_init_afu|adapter() misc: sgi-gru: fix use-after-free error in gru_set_context_option, gru_fault and gru_handle_user_call_os misc: tifm: fix possible memory leak in tifm_7xx1_switch_media() misc: ocxl: fix possible name leak in ocxl_file_register_afu() test_firmware: fix memory leak in test_firmware_init() serial: sunsab: Fix error handling in sunsab_init() serial: altera_uart: fix locking in polling mode tty: serial: altera_uart_{r,t}x_chars() need only uart_port tty: serial: clean up stop-tx part in altera_uart_tx_chars() serial: pch: Fix PCI device refcount leak in pch_request_dma() serial: pl011: Do not clear RX FIFO & RX interrupt in unthrottle. serial: amba-pl011: avoid SBSA UART accessing DMACR register usb: typec: tcpci: fix of node refcount leak in tcpci_register_port() usb: typec: Check for ops->exit instead of ops->enter in altmode_exit staging: vme_user: Fix possible UAF in tsi148_dma_list_add usb: fotg210-udc: Fix ages old endianness issues uio: uio_dmem_genirq: Fix deadlock between irq config and handling uio: uio_dmem_genirq: Fix missing unlock in irq configuration vfio: platform: Do not pass return buffer to ACPI _RST method class: fix possible memory leak in __class_register() serial: tegra: Read DMA status before terminating tty: serial: tegra: Activate RX DMA transfer by request drivers: dio: fix possible memory leak in dio_init() IB/IPoIB: Fix queue count inconsistency for PKEY child interfaces hwrng: geode - Fix PCI device refcount leak hwrng: amd - Fix PCI device refcount leak crypto: img-hash - Fix variable dereferenced before check 'hdev->req' orangefs: Fix sysfs not cleanup when dev init failed RDMA/hfi1: Fix error return code in parse_platform_config() crypto: omap-sham - Use pm_runtime_resume_and_get() in omap_sham_probe() f2fs: avoid victim selection from previous victim section RDMA/nldev: Add checks for nla_nest_start() in fill_stat_counter_qps() scsi: snic: Fix possible UAF in snic_tgt_create() scsi: fcoe: Fix transport not deattached when fcoe_if_init() fails scsi: ipr: Fix WARNING in ipr_init() scsi: fcoe: Fix possible name leak when device_register() fails scsi: hpsa: Fix possible memory leak in hpsa_add_sas_device() scsi: hpsa: Fix error handling in hpsa_add_sas_host() scsi: mpt3sas: Fix possible resource leaks in mpt3sas_transport_port_add() crypto: tcrypt - Fix multibuffer skcipher speed test mem leak scsi: hpsa: Fix possible memory leak in hpsa_init_one() RDMA/rxe: Fix NULL-ptr-deref in rxe_qp_do_cleanup() when socket create failed crypto: ccree - Make cc_debugfs_global_fini() available for module init function RDMA/hfi: Decrease PCI device reference count in error path PCI: Check for alloc failure in pci_request_irq() crypto: ccree - Remove debugfs when platform_driver_register failed crypto: ccree - swap SHA384 and SHA512 larval hashes at build time scsi: scsi_debug: Fix a warning in resp_write_scat() RDMA/siw: Set defined status for work completion with undefined status RDMA/nldev: Return "-EAGAIN" if the cm_id isn't from expected port RDMA/siw: Fix immediate work request flush to completion queue f2fs: fix normal discard process RDMA/core: Fix order of nldev_exit call apparmor: Use pointer to struct aa_label for lbs_cred apparmor: Fix abi check to include v8 abi apparmor: fix lockdep warning when removing a namespace apparmor: fix a memleak in multi_transaction_new() stmmac: fix potential division by 0 Bluetooth: RFCOMM: don't call kfree_skb() under spin_lock_irqsave() Bluetooth: hci_core: don't call kfree_skb() under spin_lock_irqsave() Bluetooth: hci_bcsp: don't call kfree_skb() under spin_lock_irqsave() Bluetooth: hci_h5: don't call kfree_skb() under spin_lock_irqsave() Bluetooth: hci_ll: don't call kfree_skb() under spin_lock_irqsave() Bluetooth: hci_qca: don't call kfree_skb() under spin_lock_irqsave() Bluetooth: btusb: don't call kfree_skb() under spin_lock_irqsave() ntb_netdev: Use dev_kfree_skb_any() in interrupt context net: lan9303: Fix read error execution path can: tcan4x5x: Remove invalid write in clear_interrupts net: amd-xgbe: Check only the minimum speed for active/passive cables net: amd-xgbe: Fix logic around active and passive cables net: amd: lance: don't call dev_kfree_skb() under spin_lock_irqsave() hamradio: don't call dev_kfree_skb() under spin_lock_irqsave() net: ethernet: dnet: don't call dev_kfree_skb() under spin_lock_irqsave() net: emaclite: don't call dev_kfree_skb() under spin_lock_irqsave() net: apple: bmac: don't call dev_kfree_skb() under spin_lock_irqsave() net: apple: mace: don't call dev_kfree_skb() under spin_lock_irqsave() net/tunnel: wait until all sk_user_data reader finish before releasing the sock net: farsync: Fix kmemleak when rmmods farsync ethernet: s2io: don't call dev_kfree_skb() under spin_lock_irqsave() of: overlay: fix null pointer dereferencing in find_dup_cset_node_entry() and find_dup_cset_prop() drivers: net: qlcnic: Fix potential memory leak in qlcnic_sriov_init() net: stmmac: selftests: fix potential memleak in stmmac_test_arpoffload() net: defxx: Fix missing err handling in dfx_init() net: vmw_vsock: vmci: Check memcpy_from_msg() clk: socfpga: Fix memory leak in socfpga_gate_init() clk: socfpga: use clk_hw_register for a5/c5 clk: socfpga: clk-pll: Remove unused variable 'rc' blktrace: Fix output non-blktrace event when blk_classic option enabled wifi: brcmfmac: Fix error return code in brcmf_sdio_download_firmware() wifi: rtl8xxxu: Add __packed to struct rtl8723bu_c2h spi: spi-gpio: Don't set MOSI as an input if not 3WIRE mode clk: samsung: Fix memory leak in _samsung_clk_register_pll() media: coda: Add check for kmalloc media: coda: Add check for dcoda_iram_alloc media: c8sectpfe: Add of_node_put() when breaking out of loop mmc: mmci: fix return value check of mmc_add_host() mmc: wbsd: fix return value check of mmc_add_host() mmc: via-sdmmc: fix return value check of mmc_add_host() mmc: meson-gx: fix return value check of mmc_add_host() mmc: omap_hsmmc: fix return value check of mmc_add_host() mmc: atmel-mci: fix return value check of mmc_add_host() mmc: wmt-sdmmc: fix return value check of mmc_add_host() mmc: vub300: fix return value check of mmc_add_host() mmc: toshsd: fix return value check of mmc_add_host() mmc: rtsx_usb_sdmmc: fix return value check of mmc_add_host() mmc: pxamci: fix return value check of mmc_add_host() mmc: mxcmmc: fix return value check of mmc_add_host() mmc: moxart: fix return value check of mmc_add_host() mmc: alcor: fix return value check of mmc_add_host() NFSv4.x: Fail client initialisation if state manager thread can't run SUNRPC: Fix missing release socket in rpc_sockname() xprtrdma: Fix regbuf data not freed in rpcrdma_req_create() ALSA: mts64: fix possible null-ptr-defer in snd_mts64_interrupt media: saa7164: fix missing pci_disable_device() bpf, sockmap: fix race in sock_map_free() regulator: core: fix resource leak in regulator_register() configfs: fix possible memory leak in configfs_create_dir() hsr: Avoid double remove of a node. clk: qcom: clk-krait: fix wrong div2 functions regulator: core: fix module refcount leak in set_supply() wifi: cfg80211: Fix not unregister reg_pdev when load_builtin_regdb_keys() fails spi: spidev: mask SPI_CS_HIGH in SPI_IOC_RD_MODE bonding: uninitialized variable in bond_miimon_inspect() bpf, sockmap: Fix data loss caused by using apply_bytes on ingress redirect bpf, sockmap: Fix repeated calls to sock_put() when msg has more_data netfilter: conntrack: set icmpv6 redirects as RELATED ASoC: pcm512x: Fix PM disable depth imbalance in pcm512x_probe drm/amdgpu: Fix PCI device refcount leak in amdgpu_atrm_get_bios() drm/radeon: Fix PCI device refcount leak in radeon_atrm_get_bios() ASoC: mediatek: mt8173: Enable IRQ when pdata is ready wifi: iwlwifi: mvm: fix double free on tx path. ALSA: asihpi: fix missing pci_disable_device() NFSv4: Fix a deadlock between nfs4_open_recover_helper() and delegreturn NFSv4.2: Fix initialisation of struct nfs4_label NFSv4.2: Fix a memory stomp in decode_attr_security_label NFSv4.2: Clear FATTR4_WORD2_SECURITY_LABEL when done decoding ASoC: mediatek: mtk-btcvsd: Add checks for write and read of mtk_btcvsd_snd ASoC: dt-bindings: wcd9335: fix reset line polarity in example drm/tegra: Add missing clk_disable_unprepare() in tegra_dc_probe() media: s5p-mfc: Add variant data for MFC v7 hardware for Exynos 3250 SoC media: dvb-usb: az6027: fix null-ptr-deref in az6027_i2c_xfer() media: dvb-core: Fix ignored return value in dvb_register_frontend() pinctrl: pinconf-generic: add missing of_node_put() clk: imx: replace osc_hdmi with dummy clk: imx8mn: correct the usb1_ctrl parent to be usb_bus media: imon: fix a race condition in send_packet() mtd: maps: pxa2xx-flash: fix memory leak in probe bonding: fix link recovery in mode 2 when updelay is nonzero bonding: Rename slave_arr to usable_slaves bonding: Export skip slave logic to function clk: rockchip: Fix memory leak in rockchip_clk_register_pll() regulator: core: use kfree_const() to free space conditionally ALSA: seq: fix undefined behavior in bit shift for SNDRV_SEQ_FILTER_USE_EVENT ALSA: pcm: fix undefined behavior in bit shift for SNDRV_PCM_RATE_KNOT HID: hid-sensor-custom: set fixed size for custom attributes bpf: Move skb->len == 0 checks into __bpf_redirect media: videobuf-dma-contig: use dma_mmap_coherent media: platform: exynos4-is: Fix error handling in fimc_md_init() media: solo6x10: fix possible memory leak in solo_sysfs_init() Input: elants_i2c - properly handle the reset GPIO when power is off mtd: lpddr2_nvm: Fix possible null-ptr-deref wifi: ath10k: Fix return value in ath10k_pci_init() ima: Fix misuse of dereference of pointer in template_desc_init_fields() integrity: Fix memory leakage in keyring allocation error path amdgpu/pm: prevent array underflow in vega20_odn_edit_dpm_table() regulator: core: fix unbalanced of node refcount in regulator_dev_lookup() ASoC: pxa: fix null-pointer dereference in filter() drm/mediatek: Modify dpi power on/off sequence. drm/radeon: Add the missed acpi_put_table() to fix memory leak rxrpc: Fix ack.bufferSize to be 0 when generating an ack net, proc: Provide PROC_FS=n fallback for proc_create_net_single_write() media: camss: Clean up received buffers on failed start of streaming wifi: rsi: Fix handling of 802.3 EAPOL frames sent via control port mtd: Fix device name leak when register device failed in add_mtd_device() bpf: propagate precision in ALU/ALU64 operations media: vivid: fix compose size exceed boundary ima: Handle -ESTALE returned by ima_filter_rule_match() ima: Fix fall-through warnings for Clang ima: Rename internal filter rule functions drm/panel/panel-sitronix-st7701: Remove panel on DSI attach failure spi: Update reference to struct spi_controller clk: renesas: r9a06g032: Repair grave increment error can: kvaser_usb: Compare requested bittiming parameters with actual parameters in do_set_{,data}_bittiming can: kvaser_usb: Add struct kvaser_usb_busparams can: kvaser_usb_leaf: Fix bogus restart events can: kvaser_usb_leaf: Fix wrong CAN state after stopping can: kvaser_usb_leaf: Fix improved state not being reported can: kvaser_usb_leaf: Set Warning state even without bus errors can: kvaser_usb: kvaser_usb_leaf: Handle CMD_ERROR_EVENT can: kvaser_usb: kvaser_usb_leaf: Rename {leaf,usbcan}_cmd_error_event to {leaf,usbcan}_cmd_can_error_event can: kvaser_usb: kvaser_usb_leaf: Get capabilities from device can: kvaser_usb: do not increase tx statistics when sending error message frames media: i2c: ad5820: Fix error path pata_ipx4xx_cf: Fix unsigned comparison with less than zero wifi: rtl8xxxu: Fix reading the vendor of combo chips wifi: ath9k: hif_usb: Fix use-after-free in ath9k_hif_usb_reg_in_cb() wifi: ath9k: hif_usb: fix memory leak of urbs in ath9k_hif_usb_dealloc_tx_urbs() rapidio: devices: fix missing put_device in mport_cdev_open hfs: Fix OOB Write in hfs_asc2mac relay: fix type mismatch when allocating memory in relay_create_buf() eventfd: change int to __u64 in eventfd_signal() ifndef CONFIG_EVENTFD rapidio: fix possible UAF when kfifo_alloc() fails fs: sysv: Fix sysv_nblocks() returns wrong value MIPS: OCTEON: warn only once if deprecated link status is being used MIPS: BCM63xx: Add check for NULL for clk in clk_enable platform/x86: mxm-wmi: fix memleak in mxm_wmi_call_mx[ds|mx]() PM: runtime: Do not call __rpm_callback() from rpm_idle() PM: runtime: Improve path in rpm_idle() when no callback xen/privcmd: Fix a possible warning in privcmd_ioctl_mmap_resource() x86/xen: Fix memory leak in xen_init_lock_cpu() x86/xen: Fix memory leak in xen_smp_intr_init{_pv}() xen/events: only register debug interrupt for 2-level events uprobes/x86: Allow to probe a NOP instruction with 0x66 prefix ACPICA: Fix use-after-free in acpi_ut_copy_ipackage_to_ipackage() clocksource/drivers/sh_cmt: Make sure channel clock supply is enabled rapidio: rio: fix possible name leak in rio_register_mport() rapidio: fix possible name leaks when rio_add_device() fails ocfs2: fix memory leak in ocfs2_mount_volume() ocfs2: rewrite error handling of ocfs2_fill_super ocfs2: ocfs2_mount_volume does cleanup job before return error debugfs: fix error when writing negative value to atomic_t debugfs file docs: fault-injection: fix non-working usage of negative values lib/notifier-error-inject: fix error when writing -errno to debugfs file libfs: add DEFINE_SIMPLE_ATTRIBUTE_SIGNED for signed value cpufreq: amd_freq_sensitivity: Add missing pci_dev_put() genirq/irqdesc: Don't try to remove non-existing sysfs files nfsd: don't call nfsd_file_put from client states seqfile display EDAC/i10nm: fix refcount leak in pci_get_dev_wrapper() irqchip: gic-pm: Use pm_runtime_resume_and_get() in gic_probe() perf/x86/intel/uncore: Fix reference count leak in hswep_has_limit_sbox() PNP: fix name memory leak in pnp_alloc_dev() selftests/efivarfs: Add checking of the test return value MIPS: vpe-cmp: fix possible memory leak while module exiting MIPS: vpe-mt: fix possible memory leak while module exiting ocfs2: fix memory leak in ocfs2_stack_glue_init() lib/fonts: fix undefined behavior in bit shift for get_default_font proc: fixup uptime selftest timerqueue: Use rb_entry_safe() in timerqueue_getnext() perf: Fix possible memleak in pmu_dev_alloc() selftests/ftrace: event_triggers: wait longer for test_event_enable fs: don't audit the capability check in simple_xattr_list() PM: hibernate: Fix mistake in kerneldoc comment alpha: fix syscall entry in !AUDUT_SYSCALL case cpuidle: dt: Return the correct numbers of parsed idle states tpm/tpm_crb: Fix error message in __crb_relinquish_locality() pstore: Avoid kcore oops by vmap()ing with VM_IOREMAP ARM: mmp: fix timer_read delay pstore/ram: Fix error return code in ramoops_probe() arm64: dts: armada-3720-turris-mox: Add missing interrupt for RTC ARM: dts: turris-omnia: Add switch port 6 node ARM: dts: turris-omnia: Add ethernet aliases ARM: dts: armada-39x: Fix assigned-addresses for every PCIe Root Port ARM: dts: armada-38x: Fix assigned-addresses for every PCIe Root Port ARM: dts: armada-375: Fix assigned-addresses for every PCIe Root Port ARM: dts: armada-xp: Fix assigned-addresses for every PCIe Root Port ARM: dts: armada-370: Fix assigned-addresses for every PCIe Root Port ARM: dts: dove: Fix assigned-addresses for every PCIe Root Port arm64: dts: mediatek: mt6797: Fix 26M oscillator unit name arm64: dts: mt2712-evb: Fix usb vbus regulators unit names arm64: dts: mt2712-evb: Fix vproc fixed regulators unit names arm64: dts: mt2712e: Fix unit address for pinctrl node arm64: dts: mt2712e: Fix unit_address_vs_reg warning for oscillators perf/smmuv3: Fix hotplug callback leak in arm_smmu_pmu_init() perf: arm_dsu: Fix hotplug callback leak in dsu_pmu_init() soc: ti: smartreflex: Fix PM disable depth imbalance in omap_sr_probe soc: ti: knav_qmss_queue: Fix PM disable depth imbalance in knav_queue_probe soc: ti: knav_qmss_queue: Use pm_runtime_resume_and_get instead of pm_runtime_get_sync arm: dts: spear600: Fix clcd interrupt drivers: soc: ti: knav_qmss_queue: Mark knav_acc_firmwares as static arm64: dts: qcom: sdm845-cheza: fix AP suspend pin bias ARM: dts: qcom: apq8064: fix coresight compatible usb: musb: remove extra check in musb_gadget_vbus_draw net: loopback: use NET_NAME_PREDICTABLE for name_assign_type Bluetooth: L2CAP: Fix u8 overflow HID: uclogic: Add HID_QUIRK_HIDINPUT_FORCE quirk HID: ite: Enable QUIRK_TOUCHPAD_ON_OFF_REPORT on Acer Aspire Switch V 10 HID: ite: Enable QUIRK_TOUCHPAD_ON_OFF_REPORT on Acer Aspire Switch 10E HID: ite: Add support for Acer S1002 keyboard-dock xen-netback: move removal of "hotplug-status" to the right place igb: Initialize mailbox message for VF reset USB: serial: f81534: fix division by zero on line-speed change USB: serial: f81232: fix division by zero on line-speed change USB: serial: cp210x: add Kamstrup RF sniffer PIDs USB: serial: option: add Quectel EM05-G modem usb: gadget: uvc: Prevent buffer overflow in setup handler udf: Fix extending file within last block udf: Do not bother looking for prealloc extents if i_lenExtents matches i_size udf: Fix preallocation discarding at indirect extent boundary udf: Discard preallocation before extending file with a hole tracing/ring-buffer: Only do full wait when cpu != RING_BUFFER_ALL_CPUS ANDROID: Add more hvc devices for virtio-console. Revert "can: af_can: fix NULL pointer dereference in can_rcv_filter" ANDROID: Revert "tracing/ring-buffer: Have polling block on watermark" Linux 5.4.228 ASoC: ops: Correct bounds check for second channel on SX controls can: mcba_usb: Fix termination command argument can: sja1000: fix size of OCR_MODE_MASK define pinctrl: meditatek: Startup with the IRQs disabled ASoC: ops: Check bounds for second channel in snd_soc_put_volsw_sx() nfp: fix use-after-free in area_cache_get() block: unhash blkdev part inode when the part is deleted mm/hugetlb: fix races when looking up a CONT-PTE/PMD size hugetlb page x86/smpboot: Move rcu_cpu_starting() earlier net: bpf: Allow TC programs to call BPF_FUNC_skb_change_head Linux 5.4.227 can: esd_usb: Allow REC and TEC to return to zero net: mvneta: Fix an out of bounds check ipv6: avoid use-after-free in ip6_fragment() net: plip: don't call kfree_skb/dev_kfree_skb() under spin_lock_irq() xen/netback: fix build warning ethernet: aeroflex: fix potential skb leak in greth_init_rings() ipv4: Fix incorrect route flushing when table ID 0 is used ipv4: Fix incorrect route flushing when source address is deleted tipc: Fix potential OOB in tipc_link_proto_rcv() net: hisilicon: Fix potential use-after-free in hix5hd2_rx() net: hisilicon: Fix potential use-after-free in hisi_femac_rx() net: thunderx: Fix missing destroy_workqueue of nicvf_rx_mode_wq net: stmmac: fix "snps,axi-config" node property parsing nvme initialize core quirks before calling nvme_init_subsystem NFC: nci: Bounds check struct nfc_target arrays i40e: Disallow ip4 and ip6 l4_4_bytes i40e: Fix for VF MAC address 0 i40e: Fix not setting default xps_cpus after reset net: mvneta: Prevent out of bounds read in mvneta_config_rss() xen-netfront: Fix NULL sring after live migration net: encx24j600: Fix invalid logic in reading of MISTAT register net: encx24j600: Add parentheses to fix precedence mac802154: fix missing INIT_LIST_HEAD in ieee802154_if_add() selftests: rtnetlink: correct xfrm policy rule in kci_test_ipsec_offload net: dsa: ksz: Check return value Bluetooth: Fix not cleanup led when bt_init fails Bluetooth: 6LoWPAN: add missing hci_dev_put() in get_l2cap_conn() af_unix: Get user_ns from in_skb in unix_diag_get_exact(). igb: Allocate MSI-X vector when testing e1000e: Fix TX dispatch condition gpio: amd8111: Fix PCI device reference count leak drm/bridge: ti-sn65dsi86: Fix output polarity setting bug ca8210: Fix crash by zero initializing data ieee802154: cc2520: Fix error return code in cc2520_hw_init() can: af_can: fix NULL pointer dereference in can_rcv_filter HID: core: fix shift-out-of-bounds in hid_report_raw_event HID: hid-lg4ff: Add check for empty lbuf HID: usbhid: Add ALWAYS_POLL quirk for some mice drm/shmem-helper: Remove errant put in error path KVM: s390: vsie: Fix the initialization of the epoch extension (epdx) field mm/gup: fix gup_pud_range() for dax memcg: fix possible use-after-free in memcg_write_event_control() media: v4l2-dv-timings.c: fix too strict blanking sanity checks Revert "net: dsa: b53: Fix valid setting for MDB entries" xen/netback: don't call kfree_skb() with interrupts disabled xen/netback: do some code cleanup xen/netback: Ensure protocol headers don't fall in the non-linear area mm/khugepaged: invoke MMU notifiers in shmem/file collapse paths mm/khugepaged: fix GUP-fast interaction by sending IPI mm/khugepaged: take the right locks for page table retraction net: usb: qmi_wwan: add u-blox 0x1342 composition 9p/xen: check logical size for buffer size fbcon: Use kzalloc() in fbcon_prepare_logo() regulator: twl6030: fix get status of twl6032 regulators ASoC: soc-pcm: Add NULL check in BE reparenting btrfs: send: avoid unaligned encoded writes when attempting to clone range ALSA: seq: Fix function prototype mismatch in snd_seq_expand_var_event regulator: slg51000: Wait after asserting CS pin 9p/fd: Use P9_HDRSZ for header size ARM: dts: rockchip: disable arm_global_timer on rk3066 and rk3188 ARM: 9266/1: mm: fix no-MMU ZERO_PAGE() implementation ARM: 9251/1: perf: Fix stacktraces for tracepoint events in THUMB2 kernels ARM: dts: rockchip: rk3188: fix lcdc1-rgb24 node name ARM: dts: rockchip: fix ir-receiver node names arm: dts: rockchip: fix node name for hym8563 rtc arm64: dts: rockchip: keep I2S1 disabled for GPIO function on ROCK Pi 4 series Conflicts: Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml arch/arm64/boot/dts/vendor/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml arch/arm64/boot/dts/vendor/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml arch/arm64/include/asm/atomic_ll_sc.h drivers/edac/qcom_edac.c drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c drivers/net/ethernet/stmicro/stmmac/stmmac_main.c drivers/usb/gadget/function/f_fs.c drivers/usb/host/xhci-plat.c sound/soc/soc-pcm.c Change-Id: I4e8cffcac6c78ecf1a16d24ee01551747552fdf2
2477 lines
67 KiB
C
2477 lines
67 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
#include <linux/kernel.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/err.h>
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/memremap.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/rmap.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/swapops.h>
|
|
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/rwsem.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/migrate.h>
|
|
#include <linux/mm_inline.h>
|
|
#include <linux/sched/mm.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include "internal.h"
|
|
|
|
struct follow_page_context {
|
|
struct dev_pagemap *pgmap;
|
|
unsigned int page_mask;
|
|
};
|
|
|
|
/**
|
|
* put_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
|
|
* @pages: array of pages to be maybe marked dirty, and definitely released.
|
|
* @npages: number of pages in the @pages array.
|
|
* @make_dirty: whether to mark the pages dirty
|
|
*
|
|
* "gup-pinned page" refers to a page that has had one of the get_user_pages()
|
|
* variants called on that page.
|
|
*
|
|
* For each page in the @pages array, make that page (or its head page, if a
|
|
* compound page) dirty, if @make_dirty is true, and if the page was previously
|
|
* listed as clean. In any case, releases all pages using put_user_page(),
|
|
* possibly via put_user_pages(), for the non-dirty case.
|
|
*
|
|
* Please see the put_user_page() documentation for details.
|
|
*
|
|
* set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
|
|
* required, then the caller should a) verify that this is really correct,
|
|
* because _lock() is usually required, and b) hand code it:
|
|
* set_page_dirty_lock(), put_user_page().
|
|
*
|
|
*/
|
|
void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
|
|
bool make_dirty)
|
|
{
|
|
unsigned long index;
|
|
|
|
/*
|
|
* TODO: this can be optimized for huge pages: if a series of pages is
|
|
* physically contiguous and part of the same compound page, then a
|
|
* single operation to the head page should suffice.
|
|
*/
|
|
|
|
if (!make_dirty) {
|
|
put_user_pages(pages, npages);
|
|
return;
|
|
}
|
|
|
|
for (index = 0; index < npages; index++) {
|
|
struct page *page = compound_head(pages[index]);
|
|
/*
|
|
* Checking PageDirty at this point may race with
|
|
* clear_page_dirty_for_io(), but that's OK. Two key
|
|
* cases:
|
|
*
|
|
* 1) This code sees the page as already dirty, so it
|
|
* skips the call to set_page_dirty(). That could happen
|
|
* because clear_page_dirty_for_io() called
|
|
* page_mkclean(), followed by set_page_dirty().
|
|
* However, now the page is going to get written back,
|
|
* which meets the original intention of setting it
|
|
* dirty, so all is well: clear_page_dirty_for_io() goes
|
|
* on to call TestClearPageDirty(), and write the page
|
|
* back.
|
|
*
|
|
* 2) This code sees the page as clean, so it calls
|
|
* set_page_dirty(). The page stays dirty, despite being
|
|
* written back, so it gets written back again in the
|
|
* next writeback cycle. This is harmless.
|
|
*/
|
|
if (!PageDirty(page))
|
|
set_page_dirty_lock(page);
|
|
put_user_page(page);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(put_user_pages_dirty_lock);
|
|
|
|
/**
|
|
* put_user_pages() - release an array of gup-pinned pages.
|
|
* @pages: array of pages to be marked dirty and released.
|
|
* @npages: number of pages in the @pages array.
|
|
*
|
|
* For each page in the @pages array, release the page using put_user_page().
|
|
*
|
|
* Please see the put_user_page() documentation for details.
|
|
*/
|
|
void put_user_pages(struct page **pages, unsigned long npages)
|
|
{
|
|
unsigned long index;
|
|
|
|
/*
|
|
* TODO: this can be optimized for huge pages: if a series of pages is
|
|
* physically contiguous and part of the same compound page, then a
|
|
* single operation to the head page should suffice.
|
|
*/
|
|
for (index = 0; index < npages; index++)
|
|
put_user_page(pages[index]);
|
|
}
|
|
EXPORT_SYMBOL(put_user_pages);
|
|
|
|
#ifdef CONFIG_MMU
|
|
static struct page *no_page_table(struct vm_area_struct *vma,
|
|
unsigned int flags)
|
|
{
|
|
/*
|
|
* When core dumping an enormous anonymous area that nobody
|
|
* has touched so far, we don't want to allocate unnecessary pages or
|
|
* page tables. Return error instead of NULL to skip handle_mm_fault,
|
|
* then get_dump_page() will return NULL to leave a hole in the dump.
|
|
* But we can only make this optimization where a hole would surely
|
|
* be zero-filled if handle_mm_fault() actually did handle it.
|
|
*/
|
|
if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
|
|
return ERR_PTR(-EFAULT);
|
|
return NULL;
|
|
}
|
|
|
|
static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
|
|
pte_t *pte, unsigned int flags)
|
|
{
|
|
/* No page to get reference */
|
|
if (flags & FOLL_GET)
|
|
return -EFAULT;
|
|
|
|
if (flags & FOLL_TOUCH) {
|
|
pte_t entry = *pte;
|
|
|
|
if (flags & FOLL_WRITE)
|
|
entry = pte_mkdirty(entry);
|
|
entry = pte_mkyoung(entry);
|
|
|
|
if (!pte_same(*pte, entry)) {
|
|
set_pte_at(vma->vm_mm, address, pte, entry);
|
|
update_mmu_cache(vma, address, pte);
|
|
}
|
|
}
|
|
|
|
/* Proper page table entry exists, but no corresponding struct page */
|
|
return -EEXIST;
|
|
}
|
|
|
|
/*
|
|
* FOLL_FORCE can write to even unwritable pte's, but only
|
|
* after we've gone through a COW cycle and they are dirty.
|
|
*/
|
|
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
|
|
{
|
|
return pte_write(pte) ||
|
|
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
|
|
}
|
|
|
|
/*
|
|
* A (separate) COW fault might break the page the other way and
|
|
* get_user_pages() would return the page from what is now the wrong
|
|
* VM. So we need to force a COW break at GUP time even for reads.
|
|
*/
|
|
static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags)
|
|
{
|
|
return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET);
|
|
}
|
|
|
|
static struct page *follow_page_pte(struct vm_area_struct *vma,
|
|
unsigned long address, pmd_t *pmd, unsigned int flags,
|
|
struct dev_pagemap **pgmap)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct page *page;
|
|
spinlock_t *ptl;
|
|
pte_t *ptep, pte;
|
|
|
|
/*
|
|
* Considering PTE level hugetlb, like continuous-PTE hugetlb on
|
|
* ARM64 architecture.
|
|
*/
|
|
if (is_vm_hugetlb_page(vma)) {
|
|
page = follow_huge_pmd_pte(vma, address, flags);
|
|
if (page)
|
|
return page;
|
|
return no_page_table(vma, flags);
|
|
}
|
|
|
|
retry:
|
|
if (unlikely(pmd_bad(*pmd)))
|
|
return no_page_table(vma, flags);
|
|
|
|
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
pte = *ptep;
|
|
if (!pte_present(pte)) {
|
|
swp_entry_t entry;
|
|
/*
|
|
* KSM's break_ksm() relies upon recognizing a ksm page
|
|
* even while it is being migrated, so for that case we
|
|
* need migration_entry_wait().
|
|
*/
|
|
if (likely(!(flags & FOLL_MIGRATION)))
|
|
goto no_page;
|
|
if (pte_none(pte))
|
|
goto no_page;
|
|
entry = pte_to_swp_entry(pte);
|
|
if (!is_migration_entry(entry))
|
|
goto no_page;
|
|
pte_unmap_unlock(ptep, ptl);
|
|
migration_entry_wait(mm, pmd, address);
|
|
goto retry;
|
|
}
|
|
if ((flags & FOLL_NUMA) && pte_protnone(pte))
|
|
goto no_page;
|
|
if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
|
|
pte_unmap_unlock(ptep, ptl);
|
|
return NULL;
|
|
}
|
|
|
|
page = vm_normal_page(vma, address, pte);
|
|
if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
|
|
/*
|
|
* Only return device mapping pages in the FOLL_GET case since
|
|
* they are only valid while holding the pgmap reference.
|
|
*/
|
|
*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
|
|
if (*pgmap)
|
|
page = pte_page(pte);
|
|
else
|
|
goto no_page;
|
|
} else if (unlikely(!page)) {
|
|
if (flags & FOLL_DUMP) {
|
|
/* Avoid special (like zero) pages in core dumps */
|
|
page = ERR_PTR(-EFAULT);
|
|
goto out;
|
|
}
|
|
|
|
if (is_zero_pfn(pte_pfn(pte))) {
|
|
page = pte_page(pte);
|
|
} else {
|
|
int ret;
|
|
|
|
ret = follow_pfn_pte(vma, address, ptep, flags);
|
|
page = ERR_PTR(ret);
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
if (flags & FOLL_SPLIT && PageTransCompound(page)) {
|
|
int ret;
|
|
get_page(page);
|
|
pte_unmap_unlock(ptep, ptl);
|
|
lock_page(page);
|
|
ret = split_huge_page(page);
|
|
unlock_page(page);
|
|
put_page(page);
|
|
if (ret)
|
|
return ERR_PTR(ret);
|
|
goto retry;
|
|
}
|
|
|
|
if (flags & FOLL_GET) {
|
|
if (unlikely(!try_get_page(page))) {
|
|
page = ERR_PTR(-ENOMEM);
|
|
goto out;
|
|
}
|
|
}
|
|
if (flags & FOLL_TOUCH) {
|
|
if ((flags & FOLL_WRITE) &&
|
|
!pte_dirty(pte) && !PageDirty(page))
|
|
set_page_dirty(page);
|
|
/*
|
|
* pte_mkyoung() would be more correct here, but atomic care
|
|
* is needed to avoid losing the dirty bit: it is easier to use
|
|
* mark_page_accessed().
|
|
*/
|
|
mark_page_accessed(page);
|
|
}
|
|
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
|
|
/* Do not mlock pte-mapped THP */
|
|
if (PageTransCompound(page))
|
|
goto out;
|
|
|
|
/*
|
|
* The preliminary mapping check is mainly to avoid the
|
|
* pointless overhead of lock_page on the ZERO_PAGE
|
|
* which might bounce very badly if there is contention.
|
|
*
|
|
* If the page is already locked, we don't need to
|
|
* handle it now - vmscan will handle it later if and
|
|
* when it attempts to reclaim the page.
|
|
*/
|
|
if (page->mapping && trylock_page(page)) {
|
|
lru_add_drain(); /* push cached pages to LRU */
|
|
/*
|
|
* Because we lock page here, and migration is
|
|
* blocked by the pte's page reference, and we
|
|
* know the page is still mapped, we don't even
|
|
* need to check for file-cache page truncation.
|
|
*/
|
|
mlock_vma_page(page);
|
|
unlock_page(page);
|
|
}
|
|
}
|
|
out:
|
|
pte_unmap_unlock(ptep, ptl);
|
|
return page;
|
|
no_page:
|
|
pte_unmap_unlock(ptep, ptl);
|
|
if (!pte_none(pte))
|
|
return NULL;
|
|
return no_page_table(vma, flags);
|
|
}
|
|
|
|
static struct page *follow_pmd_mask(struct vm_area_struct *vma,
|
|
unsigned long address, pud_t *pudp,
|
|
unsigned int flags,
|
|
struct follow_page_context *ctx)
|
|
{
|
|
pmd_t *pmd, pmdval;
|
|
spinlock_t *ptl;
|
|
struct page *page;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
pmd = pmd_offset(pudp, address);
|
|
/*
|
|
* The READ_ONCE() will stabilize the pmdval in a register or
|
|
* on the stack so that it will stop changing under the code.
|
|
*/
|
|
pmdval = READ_ONCE(*pmd);
|
|
if (pmd_none(pmdval))
|
|
return no_page_table(vma, flags);
|
|
if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) {
|
|
page = follow_huge_pmd_pte(vma, address, flags);
|
|
if (page)
|
|
return page;
|
|
return no_page_table(vma, flags);
|
|
}
|
|
if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
|
|
page = follow_huge_pd(vma, address,
|
|
__hugepd(pmd_val(pmdval)), flags,
|
|
PMD_SHIFT);
|
|
if (page)
|
|
return page;
|
|
return no_page_table(vma, flags);
|
|
}
|
|
retry:
|
|
if (!pmd_present(pmdval)) {
|
|
if (likely(!(flags & FOLL_MIGRATION)))
|
|
return no_page_table(vma, flags);
|
|
VM_BUG_ON(thp_migration_supported() &&
|
|
!is_pmd_migration_entry(pmdval));
|
|
if (is_pmd_migration_entry(pmdval))
|
|
pmd_migration_entry_wait(mm, pmd);
|
|
pmdval = READ_ONCE(*pmd);
|
|
/*
|
|
* MADV_DONTNEED may convert the pmd to null because
|
|
* mmap_sem is held in read mode
|
|
*/
|
|
if (pmd_none(pmdval))
|
|
return no_page_table(vma, flags);
|
|
goto retry;
|
|
}
|
|
if (pmd_devmap(pmdval)) {
|
|
ptl = pmd_lock(mm, pmd);
|
|
page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
|
|
spin_unlock(ptl);
|
|
if (page)
|
|
return page;
|
|
}
|
|
if (likely(!pmd_trans_huge(pmdval)))
|
|
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
|
|
|
|
if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
|
|
return no_page_table(vma, flags);
|
|
|
|
retry_locked:
|
|
ptl = pmd_lock(mm, pmd);
|
|
if (unlikely(pmd_none(*pmd))) {
|
|
spin_unlock(ptl);
|
|
return no_page_table(vma, flags);
|
|
}
|
|
if (unlikely(!pmd_present(*pmd))) {
|
|
spin_unlock(ptl);
|
|
if (likely(!(flags & FOLL_MIGRATION)))
|
|
return no_page_table(vma, flags);
|
|
pmd_migration_entry_wait(mm, pmd);
|
|
goto retry_locked;
|
|
}
|
|
if (unlikely(!pmd_trans_huge(*pmd))) {
|
|
spin_unlock(ptl);
|
|
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
|
|
}
|
|
if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
|
|
int ret;
|
|
page = pmd_page(*pmd);
|
|
if (is_huge_zero_page(page)) {
|
|
spin_unlock(ptl);
|
|
ret = 0;
|
|
split_huge_pmd(vma, pmd, address);
|
|
if (pmd_trans_unstable(pmd))
|
|
ret = -EBUSY;
|
|
} else if (flags & FOLL_SPLIT) {
|
|
if (unlikely(!try_get_page(page))) {
|
|
spin_unlock(ptl);
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
spin_unlock(ptl);
|
|
lock_page(page);
|
|
ret = split_huge_page(page);
|
|
unlock_page(page);
|
|
put_page(page);
|
|
if (pmd_none(*pmd))
|
|
return no_page_table(vma, flags);
|
|
} else { /* flags & FOLL_SPLIT_PMD */
|
|
spin_unlock(ptl);
|
|
split_huge_pmd(vma, pmd, address);
|
|
ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
|
|
}
|
|
|
|
return ret ? ERR_PTR(ret) :
|
|
follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
|
|
}
|
|
page = follow_trans_huge_pmd(vma, address, pmd, flags);
|
|
spin_unlock(ptl);
|
|
ctx->page_mask = HPAGE_PMD_NR - 1;
|
|
return page;
|
|
}
|
|
|
|
static struct page *follow_pud_mask(struct vm_area_struct *vma,
|
|
unsigned long address, p4d_t *p4dp,
|
|
unsigned int flags,
|
|
struct follow_page_context *ctx)
|
|
{
|
|
pud_t *pud;
|
|
spinlock_t *ptl;
|
|
struct page *page;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
pud = pud_offset(p4dp, address);
|
|
if (pud_none(*pud))
|
|
return no_page_table(vma, flags);
|
|
if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
|
|
page = follow_huge_pud(mm, address, pud, flags);
|
|
if (page)
|
|
return page;
|
|
return no_page_table(vma, flags);
|
|
}
|
|
if (is_hugepd(__hugepd(pud_val(*pud)))) {
|
|
page = follow_huge_pd(vma, address,
|
|
__hugepd(pud_val(*pud)), flags,
|
|
PUD_SHIFT);
|
|
if (page)
|
|
return page;
|
|
return no_page_table(vma, flags);
|
|
}
|
|
if (pud_devmap(*pud)) {
|
|
ptl = pud_lock(mm, pud);
|
|
page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
|
|
spin_unlock(ptl);
|
|
if (page)
|
|
return page;
|
|
}
|
|
if (unlikely(pud_bad(*pud)))
|
|
return no_page_table(vma, flags);
|
|
|
|
return follow_pmd_mask(vma, address, pud, flags, ctx);
|
|
}
|
|
|
|
static struct page *follow_p4d_mask(struct vm_area_struct *vma,
|
|
unsigned long address, pgd_t *pgdp,
|
|
unsigned int flags,
|
|
struct follow_page_context *ctx)
|
|
{
|
|
p4d_t *p4d;
|
|
struct page *page;
|
|
|
|
p4d = p4d_offset(pgdp, address);
|
|
if (p4d_none(*p4d))
|
|
return no_page_table(vma, flags);
|
|
BUILD_BUG_ON(p4d_huge(*p4d));
|
|
if (unlikely(p4d_bad(*p4d)))
|
|
return no_page_table(vma, flags);
|
|
|
|
if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
|
|
page = follow_huge_pd(vma, address,
|
|
__hugepd(p4d_val(*p4d)), flags,
|
|
P4D_SHIFT);
|
|
if (page)
|
|
return page;
|
|
return no_page_table(vma, flags);
|
|
}
|
|
return follow_pud_mask(vma, address, p4d, flags, ctx);
|
|
}
|
|
|
|
/**
|
|
* follow_page_mask - look up a page descriptor from a user-virtual address
|
|
* @vma: vm_area_struct mapping @address
|
|
* @address: virtual address to look up
|
|
* @flags: flags modifying lookup behaviour
|
|
* @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
|
|
* pointer to output page_mask
|
|
*
|
|
* @flags can have FOLL_ flags set, defined in <linux/mm.h>
|
|
*
|
|
* When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
|
|
* the device's dev_pagemap metadata to avoid repeating expensive lookups.
|
|
*
|
|
* On output, the @ctx->page_mask is set according to the size of the page.
|
|
*
|
|
* Return: the mapped (struct page *), %NULL if no mapping exists, or
|
|
* an error pointer if there is a mapping to something not represented
|
|
* by a page descriptor (see also vm_normal_page()).
|
|
*/
|
|
static struct page *follow_page_mask(struct vm_area_struct *vma,
|
|
unsigned long address, unsigned int flags,
|
|
struct follow_page_context *ctx)
|
|
{
|
|
pgd_t *pgd;
|
|
struct page *page;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
ctx->page_mask = 0;
|
|
|
|
/* make this handle hugepd */
|
|
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
|
|
if (!IS_ERR(page)) {
|
|
BUG_ON(flags & FOLL_GET);
|
|
return page;
|
|
}
|
|
|
|
pgd = pgd_offset(mm, address);
|
|
|
|
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
|
|
return no_page_table(vma, flags);
|
|
|
|
if (pgd_huge(*pgd)) {
|
|
page = follow_huge_pgd(mm, address, pgd, flags);
|
|
if (page)
|
|
return page;
|
|
return no_page_table(vma, flags);
|
|
}
|
|
if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
|
|
page = follow_huge_pd(vma, address,
|
|
__hugepd(pgd_val(*pgd)), flags,
|
|
PGDIR_SHIFT);
|
|
if (page)
|
|
return page;
|
|
return no_page_table(vma, flags);
|
|
}
|
|
|
|
return follow_p4d_mask(vma, address, pgd, flags, ctx);
|
|
}
|
|
|
|
struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
|
|
unsigned int foll_flags)
|
|
{
|
|
struct follow_page_context ctx = { NULL };
|
|
struct page *page;
|
|
|
|
page = follow_page_mask(vma, address, foll_flags, &ctx);
|
|
if (ctx.pgmap)
|
|
put_dev_pagemap(ctx.pgmap);
|
|
return page;
|
|
}
|
|
|
|
static int get_gate_page(struct mm_struct *mm, unsigned long address,
|
|
unsigned int gup_flags, struct vm_area_struct **vma,
|
|
struct page **page)
|
|
{
|
|
pgd_t *pgd;
|
|
p4d_t *p4d;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
int ret = -EFAULT;
|
|
|
|
/* user gate pages are read-only */
|
|
if (gup_flags & FOLL_WRITE)
|
|
return -EFAULT;
|
|
if (address > TASK_SIZE)
|
|
pgd = pgd_offset_k(address);
|
|
else
|
|
pgd = pgd_offset_gate(mm, address);
|
|
if (pgd_none(*pgd))
|
|
return -EFAULT;
|
|
p4d = p4d_offset(pgd, address);
|
|
if (p4d_none(*p4d))
|
|
return -EFAULT;
|
|
pud = pud_offset(p4d, address);
|
|
if (pud_none(*pud))
|
|
return -EFAULT;
|
|
pmd = pmd_offset(pud, address);
|
|
if (!pmd_present(*pmd))
|
|
return -EFAULT;
|
|
VM_BUG_ON(pmd_trans_huge(*pmd));
|
|
pte = pte_offset_map(pmd, address);
|
|
if (pte_none(*pte))
|
|
goto unmap;
|
|
*vma = get_gate_vma(mm);
|
|
if (!page)
|
|
goto out;
|
|
*page = vm_normal_page(*vma, address, *pte);
|
|
if (!*page) {
|
|
if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
|
|
goto unmap;
|
|
*page = pte_page(*pte);
|
|
}
|
|
if (unlikely(!try_get_page(*page))) {
|
|
ret = -ENOMEM;
|
|
goto unmap;
|
|
}
|
|
out:
|
|
ret = 0;
|
|
unmap:
|
|
pte_unmap(pte);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* mmap_sem must be held on entry. If @nonblocking != NULL and
|
|
* *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
|
|
* If it is, *@nonblocking will be set to 0 and -EBUSY returned.
|
|
*/
|
|
static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
|
|
unsigned long address, unsigned int *flags, int *nonblocking)
|
|
{
|
|
unsigned int fault_flags = 0;
|
|
vm_fault_t ret;
|
|
|
|
/* mlock all present pages, but do not fault in new pages */
|
|
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
|
|
return -ENOENT;
|
|
if (*flags & FOLL_WRITE)
|
|
fault_flags |= FAULT_FLAG_WRITE;
|
|
if (*flags & FOLL_REMOTE)
|
|
fault_flags |= FAULT_FLAG_REMOTE;
|
|
if (nonblocking)
|
|
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
|
|
if (*flags & FOLL_NOWAIT)
|
|
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
|
|
if (*flags & FOLL_TRIED) {
|
|
VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
|
|
fault_flags |= FAULT_FLAG_TRIED;
|
|
}
|
|
|
|
ret = handle_mm_fault(vma, address, fault_flags);
|
|
if (ret & VM_FAULT_ERROR) {
|
|
int err = vm_fault_to_errno(ret, *flags);
|
|
|
|
if (err)
|
|
return err;
|
|
BUG();
|
|
}
|
|
|
|
if (tsk) {
|
|
if (ret & VM_FAULT_MAJOR)
|
|
tsk->maj_flt++;
|
|
else
|
|
tsk->min_flt++;
|
|
}
|
|
|
|
if (ret & VM_FAULT_RETRY) {
|
|
if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
|
|
*nonblocking = 0;
|
|
return -EBUSY;
|
|
}
|
|
|
|
/*
|
|
* The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
|
|
* necessary, even if maybe_mkwrite decided not to set pte_write. We
|
|
* can thus safely do subsequent page lookups as if they were reads.
|
|
* But only do so when looping for pte_write is futile: in some cases
|
|
* userspace may also be wanting to write to the gotten user page,
|
|
* which a read fault here might prevent (a readonly page might get
|
|
* reCOWed by userspace write).
|
|
*/
|
|
if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
|
|
*flags |= FOLL_COW;
|
|
return 0;
|
|
}
|
|
|
|
static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
|
|
{
|
|
vm_flags_t vm_flags = vma->vm_flags;
|
|
int write = (gup_flags & FOLL_WRITE);
|
|
int foreign = (gup_flags & FOLL_REMOTE);
|
|
|
|
if (vm_flags & (VM_IO | VM_PFNMAP))
|
|
return -EFAULT;
|
|
|
|
if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
|
|
return -EFAULT;
|
|
|
|
if (write) {
|
|
if (!(vm_flags & VM_WRITE)) {
|
|
if (!(gup_flags & FOLL_FORCE))
|
|
return -EFAULT;
|
|
/*
|
|
* We used to let the write,force case do COW in a
|
|
* VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
|
|
* set a breakpoint in a read-only mapping of an
|
|
* executable, without corrupting the file (yet only
|
|
* when that file had been opened for writing!).
|
|
* Anon pages in shared mappings are surprising: now
|
|
* just reject it.
|
|
*/
|
|
if (!is_cow_mapping(vm_flags))
|
|
return -EFAULT;
|
|
}
|
|
} else if (!(vm_flags & VM_READ)) {
|
|
if (!(gup_flags & FOLL_FORCE))
|
|
return -EFAULT;
|
|
/*
|
|
* Is there actually any vma we can reach here which does not
|
|
* have VM_MAYREAD set?
|
|
*/
|
|
if (!(vm_flags & VM_MAYREAD))
|
|
return -EFAULT;
|
|
}
|
|
/*
|
|
* gups are always data accesses, not instruction
|
|
* fetches, so execute=false here
|
|
*/
|
|
if (!arch_vma_access_permitted(vma, write, false, foreign))
|
|
return -EFAULT;
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* __get_user_pages() - pin user pages in memory
|
|
* @tsk: task_struct of target task
|
|
* @mm: mm_struct of target mm
|
|
* @start: starting user address
|
|
* @nr_pages: number of pages from start to pin
|
|
* @gup_flags: flags modifying pin behaviour
|
|
* @pages: array that receives pointers to the pages pinned.
|
|
* Should be at least nr_pages long. Or NULL, if caller
|
|
* only intends to ensure the pages are faulted in.
|
|
* @vmas: array of pointers to vmas corresponding to each page.
|
|
* Or NULL if the caller does not require them.
|
|
* @nonblocking: whether waiting for disk IO or mmap_sem contention
|
|
*
|
|
* Returns number of pages pinned. This may be fewer than the number
|
|
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
|
* were pinned, returns -errno. Each page returned must be released
|
|
* with a put_page() call when it is finished with. vmas will only
|
|
* remain valid while mmap_sem is held.
|
|
*
|
|
* Must be called with mmap_sem held. It may be released. See below.
|
|
*
|
|
* __get_user_pages walks a process's page tables and takes a reference to
|
|
* each struct page that each user address corresponds to at a given
|
|
* instant. That is, it takes the page that would be accessed if a user
|
|
* thread accesses the given user virtual address at that instant.
|
|
*
|
|
* This does not guarantee that the page exists in the user mappings when
|
|
* __get_user_pages returns, and there may even be a completely different
|
|
* page there in some cases (eg. if mmapped pagecache has been invalidated
|
|
* and subsequently re faulted). However it does guarantee that the page
|
|
* won't be freed completely. And mostly callers simply care that the page
|
|
* contains data that was valid *at some point in time*. Typically, an IO
|
|
* or similar operation cannot guarantee anything stronger anyway because
|
|
* locks can't be held over the syscall boundary.
|
|
*
|
|
* If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
|
|
* the page is written to, set_page_dirty (or set_page_dirty_lock, as
|
|
* appropriate) must be called after the page is finished with, and
|
|
* before put_page is called.
|
|
*
|
|
* If @nonblocking != NULL, __get_user_pages will not wait for disk IO
|
|
* or mmap_sem contention, and if waiting is needed to pin all pages,
|
|
* *@nonblocking will be set to 0. Further, if @gup_flags does not
|
|
* include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
|
|
* this case.
|
|
*
|
|
* A caller using such a combination of @nonblocking and @gup_flags
|
|
* must therefore hold the mmap_sem for reading only, and recognize
|
|
* when it's been released. Otherwise, it must be held for either
|
|
* reading or writing and will not be released.
|
|
*
|
|
* In most cases, get_user_pages or get_user_pages_fast should be used
|
|
* instead of __get_user_pages. __get_user_pages should be used only if
|
|
* you need some special @gup_flags.
|
|
*/
|
|
static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|
unsigned long start, unsigned long nr_pages,
|
|
unsigned int gup_flags, struct page **pages,
|
|
struct vm_area_struct **vmas, int *nonblocking)
|
|
{
|
|
long ret = 0, i = 0;
|
|
struct vm_area_struct *vma = NULL;
|
|
struct follow_page_context ctx = { NULL };
|
|
|
|
if (!nr_pages)
|
|
return 0;
|
|
|
|
start = untagged_addr(start);
|
|
|
|
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
|
|
|
|
/*
|
|
* If FOLL_FORCE is set then do not force a full fault as the hinting
|
|
* fault information is unrelated to the reference behaviour of a task
|
|
* using the address space
|
|
*/
|
|
if (!(gup_flags & FOLL_FORCE))
|
|
gup_flags |= FOLL_NUMA;
|
|
|
|
do {
|
|
struct page *page;
|
|
unsigned int foll_flags = gup_flags;
|
|
unsigned int page_increm;
|
|
|
|
/* first iteration or cross vma bound */
|
|
if (!vma || start >= vma->vm_end) {
|
|
vma = find_extend_vma(mm, start);
|
|
if (!vma && in_gate_area(mm, start)) {
|
|
ret = get_gate_page(mm, start & PAGE_MASK,
|
|
gup_flags, &vma,
|
|
pages ? &pages[i] : NULL);
|
|
if (ret)
|
|
goto out;
|
|
ctx.page_mask = 0;
|
|
goto next_page;
|
|
}
|
|
|
|
if (!vma || check_vma_flags(vma, gup_flags)) {
|
|
ret = -EFAULT;
|
|
goto out;
|
|
}
|
|
if (is_vm_hugetlb_page(vma)) {
|
|
i = follow_hugetlb_page(mm, vma, pages, vmas,
|
|
&start, &nr_pages, i,
|
|
gup_flags, nonblocking);
|
|
continue;
|
|
}
|
|
}
|
|
retry:
|
|
/*
|
|
* If we have a pending SIGKILL, don't keep faulting pages and
|
|
* potentially allocating memory.
|
|
*/
|
|
if (fatal_signal_pending(current)) {
|
|
ret = -ERESTARTSYS;
|
|
goto out;
|
|
}
|
|
cond_resched();
|
|
|
|
page = follow_page_mask(vma, start, foll_flags, &ctx);
|
|
if (!page) {
|
|
ret = faultin_page(tsk, vma, start, &foll_flags,
|
|
nonblocking);
|
|
switch (ret) {
|
|
case 0:
|
|
goto retry;
|
|
case -EBUSY:
|
|
ret = 0;
|
|
/* FALLTHRU */
|
|
case -EFAULT:
|
|
case -ENOMEM:
|
|
case -EHWPOISON:
|
|
goto out;
|
|
case -ENOENT:
|
|
goto next_page;
|
|
}
|
|
BUG();
|
|
} else if (PTR_ERR(page) == -EEXIST) {
|
|
/*
|
|
* Proper page table entry exists, but no corresponding
|
|
* struct page.
|
|
*/
|
|
goto next_page;
|
|
} else if (IS_ERR(page)) {
|
|
ret = PTR_ERR(page);
|
|
goto out;
|
|
}
|
|
if (pages) {
|
|
pages[i] = page;
|
|
flush_anon_page(vma, page, start);
|
|
flush_dcache_page(page);
|
|
ctx.page_mask = 0;
|
|
}
|
|
next_page:
|
|
if (vmas) {
|
|
vmas[i] = vma;
|
|
ctx.page_mask = 0;
|
|
}
|
|
page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
|
|
if (page_increm > nr_pages)
|
|
page_increm = nr_pages;
|
|
i += page_increm;
|
|
start += page_increm * PAGE_SIZE;
|
|
nr_pages -= page_increm;
|
|
} while (nr_pages);
|
|
out:
|
|
if (ctx.pgmap)
|
|
put_dev_pagemap(ctx.pgmap);
|
|
return i ? i : ret;
|
|
}
|
|
|
|
static bool vma_permits_fault(struct vm_area_struct *vma,
|
|
unsigned int fault_flags)
|
|
{
|
|
bool write = !!(fault_flags & FAULT_FLAG_WRITE);
|
|
bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
|
|
vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
|
|
|
|
if (!(vm_flags & vma->vm_flags))
|
|
return false;
|
|
|
|
/*
|
|
* The architecture might have a hardware protection
|
|
* mechanism other than read/write that can deny access.
|
|
*
|
|
* gup always represents data access, not instruction
|
|
* fetches, so execute=false here:
|
|
*/
|
|
if (!arch_vma_access_permitted(vma, write, false, foreign))
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
/*
|
|
* fixup_user_fault() - manually resolve a user page fault
|
|
* @tsk: the task_struct to use for page fault accounting, or
|
|
* NULL if faults are not to be recorded.
|
|
* @mm: mm_struct of target mm
|
|
* @address: user address
|
|
* @fault_flags:flags to pass down to handle_mm_fault()
|
|
* @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller
|
|
* does not allow retry
|
|
*
|
|
* This is meant to be called in the specific scenario where for locking reasons
|
|
* we try to access user memory in atomic context (within a pagefault_disable()
|
|
* section), this returns -EFAULT, and we want to resolve the user fault before
|
|
* trying again.
|
|
*
|
|
* Typically this is meant to be used by the futex code.
|
|
*
|
|
* The main difference with get_user_pages() is that this function will
|
|
* unconditionally call handle_mm_fault() which will in turn perform all the
|
|
* necessary SW fixup of the dirty and young bits in the PTE, while
|
|
* get_user_pages() only guarantees to update these in the struct page.
|
|
*
|
|
* This is important for some architectures where those bits also gate the
|
|
* access permission to the page because they are maintained in software. On
|
|
* such architectures, gup() will not be enough to make a subsequent access
|
|
* succeed.
|
|
*
|
|
* This function will not return with an unlocked mmap_sem. So it has not the
|
|
* same semantics wrt the @mm->mmap_sem as does filemap_fault().
|
|
*/
|
|
int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
|
|
unsigned long address, unsigned int fault_flags,
|
|
bool *unlocked)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
vm_fault_t ret, major = 0;
|
|
|
|
address = untagged_addr(address);
|
|
|
|
if (unlocked)
|
|
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
|
|
|
|
retry:
|
|
vma = find_extend_vma(mm, address);
|
|
if (!vma || address < vma->vm_start)
|
|
return -EFAULT;
|
|
|
|
if (!vma_permits_fault(vma, fault_flags))
|
|
return -EFAULT;
|
|
|
|
ret = handle_mm_fault(vma, address, fault_flags);
|
|
major |= ret & VM_FAULT_MAJOR;
|
|
if (ret & VM_FAULT_ERROR) {
|
|
int err = vm_fault_to_errno(ret, 0);
|
|
|
|
if (err)
|
|
return err;
|
|
BUG();
|
|
}
|
|
|
|
if (ret & VM_FAULT_RETRY) {
|
|
down_read(&mm->mmap_sem);
|
|
if (!(fault_flags & FAULT_FLAG_TRIED)) {
|
|
*unlocked = true;
|
|
fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
|
fault_flags |= FAULT_FLAG_TRIED;
|
|
goto retry;
|
|
}
|
|
}
|
|
|
|
if (tsk) {
|
|
if (major)
|
|
tsk->maj_flt++;
|
|
else
|
|
tsk->min_flt++;
|
|
}
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(fixup_user_fault);
|
|
|
|
static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
|
|
struct mm_struct *mm,
|
|
unsigned long start,
|
|
unsigned long nr_pages,
|
|
struct page **pages,
|
|
struct vm_area_struct **vmas,
|
|
int *locked,
|
|
unsigned int flags)
|
|
{
|
|
long ret, pages_done;
|
|
bool lock_dropped;
|
|
|
|
if (locked) {
|
|
/* if VM_FAULT_RETRY can be returned, vmas become invalid */
|
|
BUG_ON(vmas);
|
|
/* check caller initialized locked */
|
|
BUG_ON(*locked != 1);
|
|
}
|
|
|
|
if (pages)
|
|
flags |= FOLL_GET;
|
|
|
|
pages_done = 0;
|
|
lock_dropped = false;
|
|
for (;;) {
|
|
ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
|
|
vmas, locked);
|
|
if (!locked)
|
|
/* VM_FAULT_RETRY couldn't trigger, bypass */
|
|
return ret;
|
|
|
|
/* VM_FAULT_RETRY cannot return errors */
|
|
if (!*locked) {
|
|
BUG_ON(ret < 0);
|
|
BUG_ON(ret >= nr_pages);
|
|
}
|
|
|
|
if (ret > 0) {
|
|
nr_pages -= ret;
|
|
pages_done += ret;
|
|
if (!nr_pages)
|
|
break;
|
|
}
|
|
if (*locked) {
|
|
/*
|
|
* VM_FAULT_RETRY didn't trigger or it was a
|
|
* FOLL_NOWAIT.
|
|
*/
|
|
if (!pages_done)
|
|
pages_done = ret;
|
|
break;
|
|
}
|
|
/*
|
|
* VM_FAULT_RETRY triggered, so seek to the faulting offset.
|
|
* For the prefault case (!pages) we only update counts.
|
|
*/
|
|
if (likely(pages))
|
|
pages += ret;
|
|
start += ret << PAGE_SHIFT;
|
|
|
|
/*
|
|
* Repeat on the address that fired VM_FAULT_RETRY
|
|
* without FAULT_FLAG_ALLOW_RETRY but with
|
|
* FAULT_FLAG_TRIED.
|
|
*/
|
|
*locked = 1;
|
|
lock_dropped = true;
|
|
down_read(&mm->mmap_sem);
|
|
ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
|
|
pages, NULL, NULL);
|
|
if (ret != 1) {
|
|
BUG_ON(ret > 1);
|
|
if (!pages_done)
|
|
pages_done = ret;
|
|
break;
|
|
}
|
|
nr_pages--;
|
|
pages_done++;
|
|
if (!nr_pages)
|
|
break;
|
|
if (likely(pages))
|
|
pages++;
|
|
start += PAGE_SIZE;
|
|
}
|
|
if (lock_dropped && *locked) {
|
|
/*
|
|
* We must let the caller know we temporarily dropped the lock
|
|
* and so the critical section protected by it was lost.
|
|
*/
|
|
up_read(&mm->mmap_sem);
|
|
*locked = 0;
|
|
}
|
|
return pages_done;
|
|
}
|
|
|
|
/*
|
|
* get_user_pages_remote() - pin user pages in memory
|
|
* @tsk: the task_struct to use for page fault accounting, or
|
|
* NULL if faults are not to be recorded.
|
|
* @mm: mm_struct of target mm
|
|
* @start: starting user address
|
|
* @nr_pages: number of pages from start to pin
|
|
* @gup_flags: flags modifying lookup behaviour
|
|
* @pages: array that receives pointers to the pages pinned.
|
|
* Should be at least nr_pages long. Or NULL, if caller
|
|
* only intends to ensure the pages are faulted in.
|
|
* @vmas: array of pointers to vmas corresponding to each page.
|
|
* Or NULL if the caller does not require them.
|
|
* @locked: pointer to lock flag indicating whether lock is held and
|
|
* subsequently whether VM_FAULT_RETRY functionality can be
|
|
* utilised. Lock must initially be held.
|
|
*
|
|
* Returns number of pages pinned. This may be fewer than the number
|
|
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
|
* were pinned, returns -errno. Each page returned must be released
|
|
* with a put_page() call when it is finished with. vmas will only
|
|
* remain valid while mmap_sem is held.
|
|
*
|
|
* Must be called with mmap_sem held for read or write.
|
|
*
|
|
* get_user_pages walks a process's page tables and takes a reference to
|
|
* each struct page that each user address corresponds to at a given
|
|
* instant. That is, it takes the page that would be accessed if a user
|
|
* thread accesses the given user virtual address at that instant.
|
|
*
|
|
* This does not guarantee that the page exists in the user mappings when
|
|
* get_user_pages returns, and there may even be a completely different
|
|
* page there in some cases (eg. if mmapped pagecache has been invalidated
|
|
* and subsequently re faulted). However it does guarantee that the page
|
|
* won't be freed completely. And mostly callers simply care that the page
|
|
* contains data that was valid *at some point in time*. Typically, an IO
|
|
* or similar operation cannot guarantee anything stronger anyway because
|
|
* locks can't be held over the syscall boundary.
|
|
*
|
|
* If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
|
|
* is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
|
|
* be called after the page is finished with, and before put_page is called.
|
|
*
|
|
* get_user_pages is typically used for fewer-copy IO operations, to get a
|
|
* handle on the memory by some means other than accesses via the user virtual
|
|
* addresses. The pages may be submitted for DMA to devices or accessed via
|
|
* their kernel linear mapping (via the kmap APIs). Care should be taken to
|
|
* use the correct cache flushing APIs.
|
|
*
|
|
* See also get_user_pages_fast, for performance critical applications.
|
|
*
|
|
* get_user_pages should be phased out in favor of
|
|
* get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
|
|
* should use get_user_pages because it cannot pass
|
|
* FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
|
|
*/
|
|
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
|
|
unsigned long start, unsigned long nr_pages,
|
|
unsigned int gup_flags, struct page **pages,
|
|
struct vm_area_struct **vmas, int *locked)
|
|
{
|
|
/*
|
|
* FIXME: Current FOLL_LONGTERM behavior is incompatible with
|
|
* FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
|
|
* vmas. As there are no users of this flag in this call we simply
|
|
* disallow this option for now.
|
|
*/
|
|
if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
|
|
return -EINVAL;
|
|
|
|
return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
|
|
locked,
|
|
gup_flags | FOLL_TOUCH | FOLL_REMOTE);
|
|
}
|
|
EXPORT_SYMBOL(get_user_pages_remote);
|
|
|
|
/**
|
|
* populate_vma_page_range() - populate a range of pages in the vma.
|
|
* @vma: target vma
|
|
* @start: start address
|
|
* @end: end address
|
|
* @nonblocking:
|
|
*
|
|
* This takes care of mlocking the pages too if VM_LOCKED is set.
|
|
*
|
|
* return 0 on success, negative error code on error.
|
|
*
|
|
* vma->vm_mm->mmap_sem must be held.
|
|
*
|
|
* If @nonblocking is NULL, it may be held for read or write and will
|
|
* be unperturbed.
|
|
*
|
|
* If @nonblocking is non-NULL, it must held for read only and may be
|
|
* released. If it's released, *@nonblocking will be set to 0.
|
|
*/
|
|
long populate_vma_page_range(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end, int *nonblocking)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
unsigned long nr_pages = (end - start) / PAGE_SIZE;
|
|
int gup_flags;
|
|
|
|
VM_BUG_ON(start & ~PAGE_MASK);
|
|
VM_BUG_ON(end & ~PAGE_MASK);
|
|
VM_BUG_ON_VMA(start < vma->vm_start, vma);
|
|
VM_BUG_ON_VMA(end > vma->vm_end, vma);
|
|
VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
|
|
|
|
gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
|
|
if (vma->vm_flags & VM_LOCKONFAULT)
|
|
gup_flags &= ~FOLL_POPULATE;
|
|
/*
|
|
* We want to touch writable mappings with a write fault in order
|
|
* to break COW, except for shared mappings because these don't COW
|
|
* and we would not want to dirty them for nothing.
|
|
*/
|
|
if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
|
|
gup_flags |= FOLL_WRITE;
|
|
|
|
/*
|
|
* We want mlock to succeed for regions that have any permissions
|
|
* other than PROT_NONE.
|
|
*/
|
|
if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
|
|
gup_flags |= FOLL_FORCE;
|
|
|
|
/*
|
|
* We made sure addr is within a VMA, so the following will
|
|
* not result in a stack expansion that recurses back here.
|
|
*/
|
|
return __get_user_pages(current, mm, start, nr_pages, gup_flags,
|
|
NULL, NULL, nonblocking);
|
|
}
|
|
|
|
/*
|
|
* __mm_populate - populate and/or mlock pages within a range of address space.
|
|
*
|
|
* This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
|
|
* flags. VMAs must be already marked with the desired vm_flags, and
|
|
* mmap_sem must not be held.
|
|
*/
|
|
int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
unsigned long end, nstart, nend;
|
|
struct vm_area_struct *vma = NULL;
|
|
int locked = 0;
|
|
long ret = 0;
|
|
|
|
end = start + len;
|
|
|
|
for (nstart = start; nstart < end; nstart = nend) {
|
|
/*
|
|
* We want to fault in pages for [nstart; end) address range.
|
|
* Find first corresponding VMA.
|
|
*/
|
|
if (!locked) {
|
|
locked = 1;
|
|
down_read(&mm->mmap_sem);
|
|
vma = find_vma(mm, nstart);
|
|
} else if (nstart >= vma->vm_end)
|
|
vma = vma->vm_next;
|
|
if (!vma || vma->vm_start >= end)
|
|
break;
|
|
/*
|
|
* Set [nstart; nend) to intersection of desired address
|
|
* range with the first VMA. Also, skip undesirable VMA types.
|
|
*/
|
|
nend = min(end, vma->vm_end);
|
|
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
|
|
continue;
|
|
if (nstart < vma->vm_start)
|
|
nstart = vma->vm_start;
|
|
/*
|
|
* Now fault in a range of pages. populate_vma_page_range()
|
|
* double checks the vma flags, so that it won't mlock pages
|
|
* if the vma was already munlocked.
|
|
*/
|
|
ret = populate_vma_page_range(vma, nstart, nend, &locked);
|
|
if (ret < 0) {
|
|
if (ignore_errors) {
|
|
ret = 0;
|
|
continue; /* continue at next VMA */
|
|
}
|
|
break;
|
|
}
|
|
nend = nstart + ret * PAGE_SIZE;
|
|
ret = 0;
|
|
}
|
|
if (locked)
|
|
up_read(&mm->mmap_sem);
|
|
return ret; /* 0 or negative error code */
|
|
}
|
|
|
|
/**
|
|
* get_dump_page() - pin user page in memory while writing it to core dump
|
|
* @addr: user address
|
|
*
|
|
* Returns struct page pointer of user page pinned for dump,
|
|
* to be freed afterwards by put_page().
|
|
*
|
|
* Returns NULL on any kind of failure - a hole must then be inserted into
|
|
* the corefile, to preserve alignment with its headers; and also returns
|
|
* NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
|
|
* allowing a hole to be left in the corefile to save diskspace.
|
|
*
|
|
* Called without mmap_sem, but after all other threads have been killed.
|
|
*/
|
|
#ifdef CONFIG_ELF_CORE
|
|
struct page *get_dump_page(unsigned long addr)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
struct page *page;
|
|
|
|
if (__get_user_pages(current, current->mm, addr, 1,
|
|
FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
|
|
NULL) < 1)
|
|
return NULL;
|
|
flush_cache_page(vma, addr, page_to_pfn(page));
|
|
return page;
|
|
}
|
|
#endif /* CONFIG_ELF_CORE */
|
|
#else /* CONFIG_MMU */
|
|
static long __get_user_pages_locked(struct task_struct *tsk,
|
|
struct mm_struct *mm, unsigned long start,
|
|
unsigned long nr_pages, struct page **pages,
|
|
struct vm_area_struct **vmas, int *locked,
|
|
unsigned int foll_flags)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
unsigned long vm_flags;
|
|
int i;
|
|
|
|
/* calculate required read or write permissions.
|
|
* If FOLL_FORCE is set, we only require the "MAY" flags.
|
|
*/
|
|
vm_flags = (foll_flags & FOLL_WRITE) ?
|
|
(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
|
|
vm_flags &= (foll_flags & FOLL_FORCE) ?
|
|
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
|
|
|
|
for (i = 0; i < nr_pages; i++) {
|
|
vma = find_vma(mm, start);
|
|
if (!vma)
|
|
goto finish_or_fault;
|
|
|
|
/* protect what we can, including chardevs */
|
|
if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
|
|
!(vm_flags & vma->vm_flags))
|
|
goto finish_or_fault;
|
|
|
|
if (pages) {
|
|
pages[i] = virt_to_page(start);
|
|
if (pages[i])
|
|
get_page(pages[i]);
|
|
}
|
|
if (vmas)
|
|
vmas[i] = vma;
|
|
start = (start + PAGE_SIZE) & PAGE_MASK;
|
|
}
|
|
|
|
return i;
|
|
|
|
finish_or_fault:
|
|
return i ? : -EFAULT;
|
|
}
|
|
#endif /* !CONFIG_MMU */
|
|
|
|
#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
|
|
static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
|
|
{
|
|
long i;
|
|
struct vm_area_struct *vma_prev = NULL;
|
|
|
|
for (i = 0; i < nr_pages; i++) {
|
|
struct vm_area_struct *vma = vmas[i];
|
|
|
|
if (vma == vma_prev)
|
|
continue;
|
|
|
|
vma_prev = vma;
|
|
|
|
if (vma_is_fsdax(vma))
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
#ifdef CONFIG_CMA
|
|
static struct page *new_non_cma_page(struct page *page, unsigned long private)
|
|
{
|
|
/*
|
|
* We want to make sure we allocate the new page from the same node
|
|
* as the source page.
|
|
*/
|
|
int nid = page_to_nid(page);
|
|
/*
|
|
* Trying to allocate a page for migration. Ignore allocation
|
|
* failure warnings. We don't force __GFP_THISNODE here because
|
|
* this node here is the node where we have CMA reservation and
|
|
* in some case these nodes will have really less non movable
|
|
* allocation memory.
|
|
*/
|
|
gfp_t gfp_mask = GFP_USER | __GFP_NOWARN;
|
|
|
|
if (PageHighMem(page))
|
|
gfp_mask |= __GFP_HIGHMEM;
|
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
if (PageHuge(page)) {
|
|
struct hstate *h = page_hstate(page);
|
|
/*
|
|
* We don't want to dequeue from the pool because pool pages will
|
|
* mostly be from the CMA region.
|
|
*/
|
|
return alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
|
|
}
|
|
#endif
|
|
if (PageTransHuge(page)) {
|
|
struct page *thp;
|
|
/*
|
|
* ignore allocation failure warnings
|
|
*/
|
|
gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
|
|
|
|
/*
|
|
* Remove the movable mask so that we don't allocate from
|
|
* CMA area again.
|
|
*/
|
|
thp_gfpmask &= ~__GFP_MOVABLE;
|
|
thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
|
|
if (!thp)
|
|
return NULL;
|
|
prep_transhuge_page(thp);
|
|
return thp;
|
|
}
|
|
|
|
return __alloc_pages_node(nid, gfp_mask, 0);
|
|
}
|
|
|
|
static long check_and_migrate_cma_pages(struct task_struct *tsk,
|
|
struct mm_struct *mm,
|
|
unsigned long start,
|
|
unsigned long nr_pages,
|
|
struct page **pages,
|
|
struct vm_area_struct **vmas,
|
|
unsigned int gup_flags)
|
|
{
|
|
unsigned long i;
|
|
unsigned long step;
|
|
bool drain_allow = true;
|
|
bool migrate_allow = true;
|
|
LIST_HEAD(cma_page_list);
|
|
|
|
check_again:
|
|
for (i = 0; i < nr_pages;) {
|
|
|
|
struct page *head = compound_head(pages[i]);
|
|
|
|
/*
|
|
* gup may start from a tail page. Advance step by the left
|
|
* part.
|
|
*/
|
|
step = compound_nr(head) - (pages[i] - head);
|
|
/*
|
|
* If we get a page from the CMA zone, since we are going to
|
|
* be pinning these entries, we might as well move them out
|
|
* of the CMA zone if possible.
|
|
*/
|
|
if (is_migrate_cma_page(head)) {
|
|
if (PageHuge(head))
|
|
isolate_huge_page(head, &cma_page_list);
|
|
else {
|
|
if (!PageLRU(head) && drain_allow) {
|
|
lru_add_drain_all();
|
|
drain_allow = false;
|
|
}
|
|
|
|
if (!isolate_lru_page(head)) {
|
|
list_add_tail(&head->lru, &cma_page_list);
|
|
mod_node_page_state(page_pgdat(head),
|
|
NR_ISOLATED_ANON +
|
|
page_is_file_cache(head),
|
|
hpage_nr_pages(head));
|
|
}
|
|
}
|
|
}
|
|
|
|
i += step;
|
|
}
|
|
|
|
if (!list_empty(&cma_page_list)) {
|
|
/*
|
|
* drop the above get_user_pages reference.
|
|
*/
|
|
for (i = 0; i < nr_pages; i++)
|
|
put_page(pages[i]);
|
|
|
|
if (migrate_pages(&cma_page_list, new_non_cma_page,
|
|
NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
|
|
/*
|
|
* some of the pages failed migration. Do get_user_pages
|
|
* without migration.
|
|
*/
|
|
migrate_allow = false;
|
|
|
|
if (!list_empty(&cma_page_list))
|
|
putback_movable_pages(&cma_page_list);
|
|
}
|
|
/*
|
|
* We did migrate all the pages, Try to get the page references
|
|
* again migrating any new CMA pages which we failed to isolate
|
|
* earlier.
|
|
*/
|
|
nr_pages = __get_user_pages_locked(tsk, mm, start, nr_pages,
|
|
pages, vmas, NULL,
|
|
gup_flags);
|
|
|
|
if ((nr_pages > 0) && migrate_allow) {
|
|
drain_allow = true;
|
|
goto check_again;
|
|
}
|
|
}
|
|
|
|
return nr_pages;
|
|
}
|
|
#else
|
|
static long check_and_migrate_cma_pages(struct task_struct *tsk,
|
|
struct mm_struct *mm,
|
|
unsigned long start,
|
|
unsigned long nr_pages,
|
|
struct page **pages,
|
|
struct vm_area_struct **vmas,
|
|
unsigned int gup_flags)
|
|
{
|
|
return nr_pages;
|
|
}
|
|
#endif /* CONFIG_CMA */
|
|
|
|
/*
|
|
* __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
|
|
* allows us to process the FOLL_LONGTERM flag.
|
|
*/
|
|
static long __gup_longterm_locked(struct task_struct *tsk,
|
|
struct mm_struct *mm,
|
|
unsigned long start,
|
|
unsigned long nr_pages,
|
|
struct page **pages,
|
|
struct vm_area_struct **vmas,
|
|
unsigned int gup_flags)
|
|
{
|
|
struct vm_area_struct **vmas_tmp = vmas;
|
|
unsigned long flags = 0;
|
|
long rc, i;
|
|
|
|
if (gup_flags & FOLL_LONGTERM) {
|
|
if (!pages)
|
|
return -EINVAL;
|
|
|
|
if (!vmas_tmp) {
|
|
vmas_tmp = kcalloc(nr_pages,
|
|
sizeof(struct vm_area_struct *),
|
|
GFP_KERNEL);
|
|
if (!vmas_tmp)
|
|
return -ENOMEM;
|
|
}
|
|
flags = memalloc_nocma_save();
|
|
}
|
|
|
|
rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages,
|
|
vmas_tmp, NULL, gup_flags);
|
|
|
|
if (gup_flags & FOLL_LONGTERM) {
|
|
memalloc_nocma_restore(flags);
|
|
if (rc < 0)
|
|
goto out;
|
|
|
|
if (check_dax_vmas(vmas_tmp, rc)) {
|
|
for (i = 0; i < rc; i++)
|
|
put_page(pages[i]);
|
|
rc = -EOPNOTSUPP;
|
|
goto out;
|
|
}
|
|
|
|
rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages,
|
|
vmas_tmp, gup_flags);
|
|
}
|
|
|
|
out:
|
|
if (vmas_tmp != vmas)
|
|
kfree(vmas_tmp);
|
|
return rc;
|
|
}
|
|
#else /* !CONFIG_FS_DAX && !CONFIG_CMA */
|
|
static __always_inline long __gup_longterm_locked(struct task_struct *tsk,
|
|
struct mm_struct *mm,
|
|
unsigned long start,
|
|
unsigned long nr_pages,
|
|
struct page **pages,
|
|
struct vm_area_struct **vmas,
|
|
unsigned int flags)
|
|
{
|
|
return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
|
|
NULL, flags);
|
|
}
|
|
#endif /* CONFIG_FS_DAX || CONFIG_CMA */
|
|
|
|
/*
|
|
* This is the same as get_user_pages_remote(), just with a
|
|
* less-flexible calling convention where we assume that the task
|
|
* and mm being operated on are the current task's and don't allow
|
|
* passing of a locked parameter. We also obviously don't pass
|
|
* FOLL_REMOTE in here.
|
|
*/
|
|
long get_user_pages(unsigned long start, unsigned long nr_pages,
|
|
unsigned int gup_flags, struct page **pages,
|
|
struct vm_area_struct **vmas)
|
|
{
|
|
return __gup_longterm_locked(current, current->mm, start, nr_pages,
|
|
pages, vmas, gup_flags | FOLL_TOUCH);
|
|
}
|
|
EXPORT_SYMBOL(get_user_pages);
|
|
|
|
/*
|
|
* We can leverage the VM_FAULT_RETRY functionality in the page fault
|
|
* paths better by using either get_user_pages_locked() or
|
|
* get_user_pages_unlocked().
|
|
*
|
|
* get_user_pages_locked() is suitable to replace the form:
|
|
*
|
|
* down_read(&mm->mmap_sem);
|
|
* do_something()
|
|
* get_user_pages(tsk, mm, ..., pages, NULL);
|
|
* up_read(&mm->mmap_sem);
|
|
*
|
|
* to:
|
|
*
|
|
* int locked = 1;
|
|
* down_read(&mm->mmap_sem);
|
|
* do_something()
|
|
* get_user_pages_locked(tsk, mm, ..., pages, &locked);
|
|
* if (locked)
|
|
* up_read(&mm->mmap_sem);
|
|
*/
|
|
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
|
|
unsigned int gup_flags, struct page **pages,
|
|
int *locked)
|
|
{
|
|
/*
|
|
* FIXME: Current FOLL_LONGTERM behavior is incompatible with
|
|
* FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
|
|
* vmas. As there are no users of this flag in this call we simply
|
|
* disallow this option for now.
|
|
*/
|
|
if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
|
|
return -EINVAL;
|
|
|
|
return __get_user_pages_locked(current, current->mm, start, nr_pages,
|
|
pages, NULL, locked,
|
|
gup_flags | FOLL_TOUCH);
|
|
}
|
|
EXPORT_SYMBOL(get_user_pages_locked);
|
|
|
|
/*
|
|
* get_user_pages_unlocked() is suitable to replace the form:
|
|
*
|
|
* down_read(&mm->mmap_sem);
|
|
* get_user_pages(tsk, mm, ..., pages, NULL);
|
|
* up_read(&mm->mmap_sem);
|
|
*
|
|
* with:
|
|
*
|
|
* get_user_pages_unlocked(tsk, mm, ..., pages);
|
|
*
|
|
* It is functionally equivalent to get_user_pages_fast so
|
|
* get_user_pages_fast should be used instead if specific gup_flags
|
|
* (e.g. FOLL_FORCE) are not required.
|
|
*/
|
|
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
|
struct page **pages, unsigned int gup_flags)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
int locked = 1;
|
|
long ret;
|
|
|
|
/*
|
|
* FIXME: Current FOLL_LONGTERM behavior is incompatible with
|
|
* FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
|
|
* vmas. As there are no users of this flag in this call we simply
|
|
* disallow this option for now.
|
|
*/
|
|
if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
|
|
return -EINVAL;
|
|
|
|
down_read(&mm->mmap_sem);
|
|
ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
|
|
&locked, gup_flags | FOLL_TOUCH);
|
|
if (locked)
|
|
up_read(&mm->mmap_sem);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(get_user_pages_unlocked);
|
|
|
|
/*
|
|
* Fast GUP
|
|
*
|
|
* get_user_pages_fast attempts to pin user pages by walking the page
|
|
* tables directly and avoids taking locks. Thus the walker needs to be
|
|
* protected from page table pages being freed from under it, and should
|
|
* block any THP splits.
|
|
*
|
|
* One way to achieve this is to have the walker disable interrupts, and
|
|
* rely on IPIs from the TLB flushing code blocking before the page table
|
|
* pages are freed. This is unsuitable for architectures that do not need
|
|
* to broadcast an IPI when invalidating TLBs.
|
|
*
|
|
* Another way to achieve this is to batch up page table containing pages
|
|
* belonging to more than one mm_user, then rcu_sched a callback to free those
|
|
* pages. Disabling interrupts will allow the fast_gup walker to both block
|
|
* the rcu_sched callback, and an IPI that we broadcast for splitting THPs
|
|
* (which is a relatively rare event). The code below adopts this strategy.
|
|
*
|
|
* Before activating this code, please be aware that the following assumptions
|
|
* are currently made:
|
|
*
|
|
* *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
|
|
* free pages containing page tables or TLB flushing requires IPI broadcast.
|
|
*
|
|
* *) ptes can be read atomically by the architecture.
|
|
*
|
|
* *) access_ok is sufficient to validate userspace address ranges.
|
|
*
|
|
* The last two assumptions can be relaxed by the addition of helper functions.
|
|
*
|
|
* This code is based heavily on the PowerPC implementation by Nick Piggin.
|
|
*/
|
|
#ifdef CONFIG_HAVE_FAST_GUP
|
|
#ifdef CONFIG_GUP_GET_PTE_LOW_HIGH
|
|
/*
|
|
* WARNING: only to be used in the get_user_pages_fast() implementation.
|
|
*
|
|
* With get_user_pages_fast(), we walk down the pagetables without taking any
|
|
* locks. For this we would like to load the pointers atomically, but sometimes
|
|
* that is not possible (e.g. without expensive cmpxchg8b on x86_32 PAE). What
|
|
* we do have is the guarantee that a PTE will only either go from not present
|
|
* to present, or present to not present or both -- it will not switch to a
|
|
* completely different present page without a TLB flush in between; something
|
|
* that we are blocking by holding interrupts off.
|
|
*
|
|
* Setting ptes from not present to present goes:
|
|
*
|
|
* ptep->pte_high = h;
|
|
* smp_wmb();
|
|
* ptep->pte_low = l;
|
|
*
|
|
* And present to not present goes:
|
|
*
|
|
* ptep->pte_low = 0;
|
|
* smp_wmb();
|
|
* ptep->pte_high = 0;
|
|
*
|
|
* We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
|
|
* We load pte_high *after* loading pte_low, which ensures we don't see an older
|
|
* value of pte_high. *Then* we recheck pte_low, which ensures that we haven't
|
|
* picked up a changed pte high. We might have gotten rubbish values from
|
|
* pte_low and pte_high, but we are guaranteed that pte_low will not have the
|
|
* present bit set *unless* it is 'l'. Because get_user_pages_fast() only
|
|
* operates on present ptes we're safe.
|
|
*/
|
|
static inline pte_t gup_get_pte(pte_t *ptep)
|
|
{
|
|
pte_t pte;
|
|
|
|
do {
|
|
pte.pte_low = ptep->pte_low;
|
|
smp_rmb();
|
|
pte.pte_high = ptep->pte_high;
|
|
smp_rmb();
|
|
} while (unlikely(pte.pte_low != ptep->pte_low));
|
|
|
|
return pte;
|
|
}
|
|
#else /* CONFIG_GUP_GET_PTE_LOW_HIGH */
|
|
/*
|
|
* We require that the PTE can be read atomically.
|
|
*/
|
|
static inline pte_t gup_get_pte(pte_t *ptep)
|
|
{
|
|
return READ_ONCE(*ptep);
|
|
}
|
|
#endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */
|
|
|
|
static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
|
|
struct page **pages)
|
|
{
|
|
while ((*nr) - nr_start) {
|
|
struct page *page = pages[--(*nr)];
|
|
|
|
ClearPageReferenced(page);
|
|
put_page(page);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Return the compund head page with ref appropriately incremented,
|
|
* or NULL if that failed.
|
|
*/
|
|
static inline struct page *try_get_compound_head(struct page *page, int refs)
|
|
{
|
|
struct page *head = compound_head(page);
|
|
if (WARN_ON_ONCE(page_ref_count(head) < 0))
|
|
return NULL;
|
|
if (unlikely(!page_cache_add_speculative(head, refs)))
|
|
return NULL;
|
|
return head;
|
|
}
|
|
|
|
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
|
|
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
|
|
unsigned int flags, struct page **pages, int *nr)
|
|
{
|
|
struct dev_pagemap *pgmap = NULL;
|
|
int nr_start = *nr, ret = 0;
|
|
pte_t *ptep, *ptem;
|
|
|
|
ptem = ptep = pte_offset_map(&pmd, addr);
|
|
do {
|
|
pte_t pte = gup_get_pte(ptep);
|
|
struct page *head, *page;
|
|
|
|
/*
|
|
* Similar to the PMD case below, NUMA hinting must take slow
|
|
* path using the pte_protnone check.
|
|
*/
|
|
if (pte_protnone(pte))
|
|
goto pte_unmap;
|
|
|
|
if (!pte_access_permitted(pte, flags & FOLL_WRITE))
|
|
goto pte_unmap;
|
|
|
|
if (pte_devmap(pte)) {
|
|
if (unlikely(flags & FOLL_LONGTERM))
|
|
goto pte_unmap;
|
|
|
|
pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
|
|
if (unlikely(!pgmap)) {
|
|
undo_dev_pagemap(nr, nr_start, pages);
|
|
goto pte_unmap;
|
|
}
|
|
} else if (pte_special(pte))
|
|
goto pte_unmap;
|
|
|
|
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
|
page = pte_page(pte);
|
|
|
|
head = try_get_compound_head(page, 1);
|
|
if (!head)
|
|
goto pte_unmap;
|
|
|
|
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
|
|
put_page(head);
|
|
goto pte_unmap;
|
|
}
|
|
|
|
VM_BUG_ON_PAGE(compound_head(page) != head, page);
|
|
|
|
SetPageReferenced(page);
|
|
pages[*nr] = page;
|
|
(*nr)++;
|
|
|
|
} while (ptep++, addr += PAGE_SIZE, addr != end);
|
|
|
|
ret = 1;
|
|
|
|
pte_unmap:
|
|
if (pgmap)
|
|
put_dev_pagemap(pgmap);
|
|
pte_unmap(ptem);
|
|
return ret;
|
|
}
|
|
#else
|
|
|
|
/*
|
|
* If we can't determine whether or not a pte is special, then fail immediately
|
|
* for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
|
|
* to be special.
|
|
*
|
|
* For a futex to be placed on a THP tail page, get_futex_key requires a
|
|
* __get_user_pages_fast implementation that can pin pages. Thus it's still
|
|
* useful to have gup_huge_pmd even if we can't operate on ptes.
|
|
*/
|
|
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
|
|
unsigned int flags, struct page **pages, int *nr)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
|
|
|
|
#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
|
static int __gup_device_huge(unsigned long pfn, unsigned long addr,
|
|
unsigned long end, struct page **pages, int *nr)
|
|
{
|
|
int nr_start = *nr;
|
|
struct dev_pagemap *pgmap = NULL;
|
|
|
|
do {
|
|
struct page *page = pfn_to_page(pfn);
|
|
|
|
pgmap = get_dev_pagemap(pfn, pgmap);
|
|
if (unlikely(!pgmap)) {
|
|
undo_dev_pagemap(nr, nr_start, pages);
|
|
return 0;
|
|
}
|
|
SetPageReferenced(page);
|
|
pages[*nr] = page;
|
|
get_page(page);
|
|
(*nr)++;
|
|
pfn++;
|
|
} while (addr += PAGE_SIZE, addr != end);
|
|
|
|
if (pgmap)
|
|
put_dev_pagemap(pgmap);
|
|
return 1;
|
|
}
|
|
|
|
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
|
|
unsigned long end, struct page **pages, int *nr)
|
|
{
|
|
unsigned long fault_pfn;
|
|
int nr_start = *nr;
|
|
|
|
fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
|
|
if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
|
|
return 0;
|
|
|
|
if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
|
|
undo_dev_pagemap(nr, nr_start, pages);
|
|
return 0;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
|
|
unsigned long end, struct page **pages, int *nr)
|
|
{
|
|
unsigned long fault_pfn;
|
|
int nr_start = *nr;
|
|
|
|
fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
|
|
if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
|
|
return 0;
|
|
|
|
if (unlikely(pud_val(orig) != pud_val(*pudp))) {
|
|
undo_dev_pagemap(nr, nr_start, pages);
|
|
return 0;
|
|
}
|
|
return 1;
|
|
}
|
|
#else
|
|
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
|
|
unsigned long end, struct page **pages, int *nr)
|
|
{
|
|
BUILD_BUG();
|
|
return 0;
|
|
}
|
|
|
|
static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
|
|
unsigned long end, struct page **pages, int *nr)
|
|
{
|
|
BUILD_BUG();
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_ARCH_HAS_HUGEPD
|
|
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
|
|
unsigned long sz)
|
|
{
|
|
unsigned long __boundary = (addr + sz) & ~(sz-1);
|
|
return (__boundary - 1 < end - 1) ? __boundary : end;
|
|
}
|
|
|
|
static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
|
unsigned long end, unsigned int flags,
|
|
struct page **pages, int *nr)
|
|
{
|
|
unsigned long pte_end;
|
|
struct page *head, *page;
|
|
pte_t pte;
|
|
int refs;
|
|
|
|
pte_end = (addr + sz) & ~(sz-1);
|
|
if (pte_end < end)
|
|
end = pte_end;
|
|
|
|
pte = READ_ONCE(*ptep);
|
|
|
|
if (!pte_access_permitted(pte, flags & FOLL_WRITE))
|
|
return 0;
|
|
|
|
/* hugepages are never "special" */
|
|
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
|
|
|
refs = 0;
|
|
head = pte_page(pte);
|
|
|
|
page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
|
|
do {
|
|
VM_BUG_ON(compound_head(page) != head);
|
|
pages[*nr] = page;
|
|
(*nr)++;
|
|
page++;
|
|
refs++;
|
|
} while (addr += PAGE_SIZE, addr != end);
|
|
|
|
head = try_get_compound_head(head, refs);
|
|
if (!head) {
|
|
*nr -= refs;
|
|
return 0;
|
|
}
|
|
|
|
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
|
|
/* Could be optimized better */
|
|
*nr -= refs;
|
|
while (refs--)
|
|
put_page(head);
|
|
return 0;
|
|
}
|
|
|
|
SetPageReferenced(head);
|
|
return 1;
|
|
}
|
|
|
|
static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
|
|
unsigned int pdshift, unsigned long end, unsigned int flags,
|
|
struct page **pages, int *nr)
|
|
{
|
|
pte_t *ptep;
|
|
unsigned long sz = 1UL << hugepd_shift(hugepd);
|
|
unsigned long next;
|
|
|
|
ptep = hugepte_offset(hugepd, addr, pdshift);
|
|
do {
|
|
next = hugepte_addr_end(addr, end, sz);
|
|
if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
|
|
return 0;
|
|
} while (ptep++, addr = next, addr != end);
|
|
|
|
return 1;
|
|
}
|
|
#else
|
|
static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
|
|
unsigned int pdshift, unsigned long end, unsigned int flags,
|
|
struct page **pages, int *nr)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_ARCH_HAS_HUGEPD */
|
|
|
|
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
|
|
unsigned long end, unsigned int flags,
|
|
struct page **pages, int *nr)
|
|
{
|
|
struct page *head, *page;
|
|
int refs;
|
|
|
|
if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
|
|
return 0;
|
|
|
|
if (pmd_devmap(orig)) {
|
|
if (unlikely(flags & FOLL_LONGTERM))
|
|
return 0;
|
|
return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
|
|
}
|
|
|
|
refs = 0;
|
|
page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
|
|
do {
|
|
pages[*nr] = page;
|
|
(*nr)++;
|
|
page++;
|
|
refs++;
|
|
} while (addr += PAGE_SIZE, addr != end);
|
|
|
|
head = try_get_compound_head(pmd_page(orig), refs);
|
|
if (!head) {
|
|
*nr -= refs;
|
|
return 0;
|
|
}
|
|
|
|
if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
|
|
*nr -= refs;
|
|
while (refs--)
|
|
put_page(head);
|
|
return 0;
|
|
}
|
|
|
|
SetPageReferenced(head);
|
|
return 1;
|
|
}
|
|
|
|
static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
|
|
unsigned long end, unsigned int flags, struct page **pages, int *nr)
|
|
{
|
|
struct page *head, *page;
|
|
int refs;
|
|
|
|
if (!pud_access_permitted(orig, flags & FOLL_WRITE))
|
|
return 0;
|
|
|
|
if (pud_devmap(orig)) {
|
|
if (unlikely(flags & FOLL_LONGTERM))
|
|
return 0;
|
|
return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
|
|
}
|
|
|
|
refs = 0;
|
|
page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
|
|
do {
|
|
pages[*nr] = page;
|
|
(*nr)++;
|
|
page++;
|
|
refs++;
|
|
} while (addr += PAGE_SIZE, addr != end);
|
|
|
|
head = try_get_compound_head(pud_page(orig), refs);
|
|
if (!head) {
|
|
*nr -= refs;
|
|
return 0;
|
|
}
|
|
|
|
if (unlikely(pud_val(orig) != pud_val(*pudp))) {
|
|
*nr -= refs;
|
|
while (refs--)
|
|
put_page(head);
|
|
return 0;
|
|
}
|
|
|
|
SetPageReferenced(head);
|
|
return 1;
|
|
}
|
|
|
|
static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
|
|
unsigned long end, unsigned int flags,
|
|
struct page **pages, int *nr)
|
|
{
|
|
int refs;
|
|
struct page *head, *page;
|
|
|
|
if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
|
|
return 0;
|
|
|
|
BUILD_BUG_ON(pgd_devmap(orig));
|
|
refs = 0;
|
|
page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
|
|
do {
|
|
pages[*nr] = page;
|
|
(*nr)++;
|
|
page++;
|
|
refs++;
|
|
} while (addr += PAGE_SIZE, addr != end);
|
|
|
|
head = try_get_compound_head(pgd_page(orig), refs);
|
|
if (!head) {
|
|
*nr -= refs;
|
|
return 0;
|
|
}
|
|
|
|
if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
|
|
*nr -= refs;
|
|
while (refs--)
|
|
put_page(head);
|
|
return 0;
|
|
}
|
|
|
|
SetPageReferenced(head);
|
|
return 1;
|
|
}
|
|
|
|
static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
|
|
unsigned int flags, struct page **pages, int *nr)
|
|
{
|
|
unsigned long next;
|
|
pmd_t *pmdp;
|
|
|
|
pmdp = pmd_offset_lockless(pudp, pud, addr);
|
|
do {
|
|
pmd_t pmd = READ_ONCE(*pmdp);
|
|
|
|
next = pmd_addr_end(addr, end);
|
|
if (!pmd_present(pmd))
|
|
return 0;
|
|
|
|
if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
|
|
pmd_devmap(pmd))) {
|
|
/*
|
|
* NUMA hinting faults need to be handled in the GUP
|
|
* slowpath for accounting purposes and so that they
|
|
* can be serialised against THP migration.
|
|
*/
|
|
if (pmd_protnone(pmd))
|
|
return 0;
|
|
|
|
if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
|
|
pages, nr))
|
|
return 0;
|
|
|
|
} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
|
|
/*
|
|
* architecture have different format for hugetlbfs
|
|
* pmd format and THP pmd format
|
|
*/
|
|
if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
|
|
PMD_SHIFT, next, flags, pages, nr))
|
|
return 0;
|
|
} else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
|
|
return 0;
|
|
} while (pmdp++, addr = next, addr != end);
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
|
|
unsigned int flags, struct page **pages, int *nr)
|
|
{
|
|
unsigned long next;
|
|
pud_t *pudp;
|
|
|
|
pudp = pud_offset_lockless(p4dp, p4d, addr);
|
|
do {
|
|
pud_t pud = READ_ONCE(*pudp);
|
|
|
|
next = pud_addr_end(addr, end);
|
|
if (pud_none(pud))
|
|
return 0;
|
|
if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
|
|
if (!gup_huge_pud(pud, pudp, addr, next, flags,
|
|
pages, nr))
|
|
return 0;
|
|
} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
|
|
if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
|
|
PUD_SHIFT, next, flags, pages, nr))
|
|
return 0;
|
|
} else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
|
|
return 0;
|
|
} while (pudp++, addr = next, addr != end);
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
|
|
unsigned int flags, struct page **pages, int *nr)
|
|
{
|
|
unsigned long next;
|
|
p4d_t *p4dp;
|
|
|
|
p4dp = p4d_offset_lockless(pgdp, pgd, addr);
|
|
do {
|
|
p4d_t p4d = READ_ONCE(*p4dp);
|
|
|
|
next = p4d_addr_end(addr, end);
|
|
if (p4d_none(p4d))
|
|
return 0;
|
|
BUILD_BUG_ON(p4d_huge(p4d));
|
|
if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
|
|
if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
|
|
P4D_SHIFT, next, flags, pages, nr))
|
|
return 0;
|
|
} else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
|
|
return 0;
|
|
} while (p4dp++, addr = next, addr != end);
|
|
|
|
return 1;
|
|
}
|
|
|
|
static void gup_pgd_range(unsigned long addr, unsigned long end,
|
|
unsigned int flags, struct page **pages, int *nr)
|
|
{
|
|
unsigned long next;
|
|
pgd_t *pgdp;
|
|
|
|
pgdp = pgd_offset(current->mm, addr);
|
|
do {
|
|
pgd_t pgd = READ_ONCE(*pgdp);
|
|
|
|
next = pgd_addr_end(addr, end);
|
|
if (pgd_none(pgd))
|
|
return;
|
|
if (unlikely(pgd_huge(pgd))) {
|
|
if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
|
|
pages, nr))
|
|
return;
|
|
} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
|
|
if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
|
|
PGDIR_SHIFT, next, flags, pages, nr))
|
|
return;
|
|
} else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
|
|
return;
|
|
} while (pgdp++, addr = next, addr != end);
|
|
}
|
|
#else
|
|
static inline void gup_pgd_range(unsigned long addr, unsigned long end,
|
|
unsigned int flags, struct page **pages, int *nr)
|
|
{
|
|
}
|
|
#endif /* CONFIG_HAVE_FAST_GUP */
|
|
|
|
#ifndef gup_fast_permitted
|
|
/*
|
|
* Check if it's allowed to use __get_user_pages_fast() for the range, or
|
|
* we need to fall back to the slow version:
|
|
*/
|
|
static bool gup_fast_permitted(unsigned long start, unsigned long end)
|
|
{
|
|
return true;
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
|
|
* the regular GUP.
|
|
* Note a difference with get_user_pages_fast: this always returns the
|
|
* number of pages pinned, 0 if no pages were pinned.
|
|
*
|
|
* If the architecture does not support this function, simply return with no
|
|
* pages pinned.
|
|
*
|
|
* Careful, careful! COW breaking can go either way, so a non-write
|
|
* access can get ambiguous page results. If you call this function without
|
|
* 'write' set, you'd better be sure that you're ok with that ambiguity.
|
|
*/
|
|
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|
struct page **pages)
|
|
{
|
|
unsigned long len, end;
|
|
unsigned long flags;
|
|
int nr = 0;
|
|
|
|
start = untagged_addr(start) & PAGE_MASK;
|
|
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
|
end = start + len;
|
|
|
|
if (end <= start)
|
|
return 0;
|
|
if (unlikely(!access_ok((void __user *)start, len)))
|
|
return 0;
|
|
|
|
/*
|
|
* Disable interrupts. We use the nested form as we can already have
|
|
* interrupts disabled by get_futex_key.
|
|
*
|
|
* With interrupts disabled, we block page table pages from being
|
|
* freed from under us. See struct mmu_table_batch comments in
|
|
* include/asm-generic/tlb.h for more details.
|
|
*
|
|
* We do not adopt an rcu_read_lock(.) here as we also want to
|
|
* block IPIs that come from THPs splitting.
|
|
*
|
|
* NOTE! We allow read-only gup_fast() here, but you'd better be
|
|
* careful about possible COW pages. You'll get _a_ COW page, but
|
|
* not necessarily the one you intended to get depending on what
|
|
* COW event happens after this. COW may break the page copy in a
|
|
* random direction.
|
|
*/
|
|
|
|
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
|
|
gup_fast_permitted(start, end)) {
|
|
local_irq_save(flags);
|
|
gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
return nr;
|
|
}
|
|
EXPORT_SYMBOL_GPL(__get_user_pages_fast);
|
|
|
|
static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
|
|
unsigned int gup_flags, struct page **pages)
|
|
{
|
|
int ret;
|
|
|
|
/*
|
|
* FIXME: FOLL_LONGTERM does not work with
|
|
* get_user_pages_unlocked() (see comments in that function)
|
|
*/
|
|
if (gup_flags & FOLL_LONGTERM) {
|
|
down_read(¤t->mm->mmap_sem);
|
|
ret = __gup_longterm_locked(current, current->mm,
|
|
start, nr_pages,
|
|
pages, NULL, gup_flags);
|
|
up_read(¤t->mm->mmap_sem);
|
|
} else {
|
|
ret = get_user_pages_unlocked(start, nr_pages,
|
|
pages, gup_flags);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* get_user_pages_fast() - pin user pages in memory
|
|
* @start: starting user address
|
|
* @nr_pages: number of pages from start to pin
|
|
* @gup_flags: flags modifying pin behaviour
|
|
* @pages: array that receives pointers to the pages pinned.
|
|
* Should be at least nr_pages long.
|
|
*
|
|
* Attempt to pin user pages in memory without taking mm->mmap_sem.
|
|
* If not successful, it will fall back to taking the lock and
|
|
* calling get_user_pages().
|
|
*
|
|
* Returns number of pages pinned. This may be fewer than the number
|
|
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
|
* were pinned, returns -errno.
|
|
*/
|
|
int get_user_pages_fast(unsigned long start, int nr_pages,
|
|
unsigned int gup_flags, struct page **pages)
|
|
{
|
|
unsigned long addr, len, end;
|
|
int nr = 0, ret = 0;
|
|
|
|
if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
|
|
FOLL_FORCE)))
|
|
return -EINVAL;
|
|
|
|
start = untagged_addr(start) & PAGE_MASK;
|
|
addr = start;
|
|
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
|
end = start + len;
|
|
|
|
if (end <= start)
|
|
return 0;
|
|
if (unlikely(!access_ok((void __user *)start, len)))
|
|
return -EFAULT;
|
|
|
|
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
|
|
gup_fast_permitted(start, end)) {
|
|
local_irq_disable();
|
|
gup_pgd_range(addr, end, gup_flags, pages, &nr);
|
|
local_irq_enable();
|
|
ret = nr;
|
|
}
|
|
|
|
if (nr < nr_pages) {
|
|
/* Try to get the remaining pages with get_user_pages */
|
|
start += nr << PAGE_SHIFT;
|
|
pages += nr;
|
|
|
|
ret = __gup_longterm_unlocked(start, nr_pages - nr,
|
|
gup_flags, pages);
|
|
|
|
/* Have to be a bit careful with return values */
|
|
if (nr > 0) {
|
|
if (ret < 0)
|
|
ret = nr;
|
|
else
|
|
ret += nr;
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(get_user_pages_fast);
|