android_kernel_xiaomi_sm8350/mm/huge_memory.c
Srinivasarao Pathipati a6b9d0d53d Merge android11-5.4.242+(e699d54) into msm-5.4
* remotes/origin/tmp-e699d54:
  ANDROID: HID: Only utilise UHID provided exports if UHID is enabled
  ANDROID: HID; Over-ride default maximum buffer size when using UHID
  Revert "ANDROID: AVB error handler to invalidate vbmeta partition."
  UPSTREAM: mailbox: mailbox-test: fix a locking issue in mbox_test_message_write()
  UPSTREAM: mailbox: mailbox-test: Fix potential double-free in mbox_test_message_write()
  UPSTREAM: efi: rt-wrapper: Add missing include
  BACKPORT: arm64: efi: Execute runtime services from a dedicated stack
  UPSTREAM: io_uring: have io_kill_timeout() honor the request references
  UPSTREAM: io_uring: don't drop completion lock before timer is fully initialized
  UPSTREAM: io_uring: always grab lock in io_cancel_async_work()
  UPSTREAM: net: cdc_ncm: Deal with too low values of dwNtbOutMaxSize
  UPSTREAM: cdc_ncm: Fix the build warning
  UPSTREAM: cdc_ncm: Implement the 32-bit version of NCM Transfer Block
  UPSTREAM: ext4: avoid a potential slab-out-of-bounds in ext4_group_desc_csum
  UPSTREAM: ext4: fix invalid free tracking in ext4_xattr_move_to_block()
  Revert "Revert "mm/rmap: Fix anon_vma->degree ambiguity leading to double-reuse""
  FROMLIST: binder: fix UAF caused by faulty buffer cleanup
  UPSTREAM: usb: musb: mediatek: don't unregister something that wasn't registered
  UPSTREAM: net: fix NULL pointer in skb_segment_list
  UPSTREAM: xfrm/compat: prevent potential spectre v1 gadget in xfrm_xlate32_attr()
  UPSTREAM: xfrm: compat: change expression for switch in xfrm_xlate64
  UPSTREAM: perf/core: Call LSM hook after copying perf_event_attr
  UPSTREAM: ext4: fix use-after-free in ext4_xattr_set_entry
  UPSTREAM: ext4: remove duplicate definition of ext4_xattr_ibody_inline_set()
  UPSTREAM: Revert "ext4: fix use-after-free in ext4_xattr_set_entry"
  Linux 5.4.242
  ASN.1: Fix check for strdup() success
  iio: adc: at91-sama5d2_adc: fix an error code in at91_adc_allocate_trigger()
  pwm: meson: Explicitly set .polarity in .get_state()
  xfs: fix forkoff miscalculation related to XFS_LITINO(mp)
  sctp: Call inet6_destroy_sock() via sk->sk_destruct().
  dccp: Call inet6_destroy_sock() via sk->sk_destruct().
  inet6: Remove inet6_destroy_sock() in sk->sk_prot->destroy().
  tcp/udp: Call inet6_destroy_sock() in IPv6 sk->sk_destruct().
  udp: Call inet6_destroy_sock() in setsockopt(IPV6_ADDRFORM).
  ext4: fix use-after-free in ext4_xattr_set_entry
  ext4: remove duplicate definition of ext4_xattr_ibody_inline_set()
  Revert "ext4: fix use-after-free in ext4_xattr_set_entry"
  x86/purgatory: Don't generate debug info for purgatory.ro
  MIPS: Define RUNTIME_DISCARD_EXIT in LD script
  mmc: sdhci_am654: Set HIGH_SPEED_ENA for SDR12 and SDR25
  memstick: fix memory leak if card device is never registered
  nilfs2: initialize unused bytes in segment summary blocks
  iio: light: tsl2772: fix reading proximity-diodes from device tree
  xen/netback: use same error messages for same errors
  nvme-tcp: fix a possible UAF when failing to allocate an io queue
  s390/ptrace: fix PTRACE_GET_LAST_BREAK error handling
  net: dsa: b53: mmap: add phy ops
  scsi: core: Improve scsi_vpd_inquiry() checks
  scsi: megaraid_sas: Fix fw_crash_buffer_show()
  selftests: sigaltstack: fix -Wuninitialized
  Input: i8042 - add quirk for Fujitsu Lifebook A574/H
  f2fs: Fix f2fs_truncate_partial_nodes ftrace event
  e1000e: Disable TSO on i219-LM card to increase speed
  bpf: Fix incorrect verifier pruning due to missing register precision taints
  mlxfw: fix null-ptr-deref in mlxfw_mfa2_tlv_next()
  i40e: fix i40e_setup_misc_vector() error handling
  i40e: fix accessing vsi->active_filters without holding lock
  netfilter: nf_tables: fix ifdef to also consider nf_tables=m
  virtio_net: bugfix overflow inside xdp_linearize_page()
  net: sched: sch_qfq: prevent slab-out-of-bounds in qfq_activate_agg
  regulator: fan53555: Explicitly include bits header
  netfilter: br_netfilter: fix recent physdev match breakage
  arm64: dts: meson-g12-common: specify full DMC range
  ARM: dts: rockchip: fix a typo error for rk3288 spdif node
  Linux 5.4.241
  xfs: force log and push AIL to clear pinned inodes when aborting mount
  xfs: don't reuse busy extents on extent trim
  xfs: consider shutdown in bmapbt cursor delete assert
  xfs: shut down the filesystem if we screw up quota reservation
  xfs: report corruption only as a regular error
  xfs: set inode size after creating symlink
  xfs: fix up non-directory creation in SGID directories
  xfs: remove the di_version field from struct icdinode
  xfs: simplify a check in xfs_ioctl_setattr_check_cowextsize
  xfs: simplify di_flags2 inheritance in xfs_ialloc
  xfs: only check the superblock version for dinode size calculation
  xfs: add a new xfs_sb_version_has_v3inode helper
  xfs: remove the kuid/kgid conversion wrappers
  xfs: remove the icdinode di_uid/di_gid members
  xfs: ensure that the inode uid/gid match values match the icdinode ones
  xfs: merge the projid fields in struct xfs_icdinode
  xfs: show the proper user quota options
  coresight-etm4: Fix for() loop drvdata->nr_addr_cmp range bug
  watchdog: sbsa_wdog: Make sure the timeout programming is within the limits
  i2c: ocores: generate stop condition after timeout in polling mode
  ubi: Fix deadlock caused by recursively holding work_sem
  mtd: ubi: wl: Fix a couple of kernel-doc issues
  ubi: Fix failure attaching when vid_hdr offset equals to (sub)page size
  asymmetric_keys: log on fatal failures in PE/pkcs7
  verify_pefile: relax wrapper length check
  drm: panel-orientation-quirks: Add quirk for Lenovo Yoga Book X90F
  efi: sysfb_efi: Add quirk for Lenovo Yoga Book X91F/L
  i2c: imx-lpi2c: clean rx/tx buffers upon new message
  power: supply: cros_usbpd: reclassify "default case!" as debug
  net: macb: fix a memory corruption in extended buffer descriptor mode
  udp6: fix potential access to stale information
  RDMA/core: Fix GID entry ref leak when create_ah fails
  sctp: fix a potential overflow in sctp_ifwdtsn_skip
  qlcnic: check pci_reset_function result
  niu: Fix missing unwind goto in niu_alloc_channels()
  9p/xen : Fix use after free bug in xen_9pfs_front_remove due to race condition
  mtd: rawnand: stm32_fmc2: remove unsupported EDO mode
  mtd: rawnand: meson: fix bitmask for length in command word
  mtdblock: tolerate corrected bit-flips
  btrfs: fix fast csum implementation detection
  btrfs: print checksum type and implementation at mount time
  Bluetooth: Fix race condition in hidp_session_thread
  Bluetooth: L2CAP: Fix use-after-free in l2cap_disconnect_{req,rsp}
  ALSA: hda/sigmatel: fix S/PDIF out on Intel D*45* motherboards
  ALSA: firewire-tascam: add missing unwind goto in snd_tscm_stream_start_duplex()
  ALSA: i2c/cs8427: fix iec958 mixer control deactivation
  ALSA: hda/sigmatel: add pin overrides for Intel DP45SG motherboard
  ALSA: emu10k1: fix capture interrupt handler unlinking
  Revert "pinctrl: amd: Disable and mask interrupts on resume"
  irqdomain: Fix mapping-creation race
  irqdomain: Refactor __irq_domain_alloc_irqs()
  irqdomain: Look for existing mapping only once
  mm/swap: fix swap_info_struct race between swapoff and get_swap_pages()
  ring-buffer: Fix race while reader and writer are on the same page
  drm/panfrost: Fix the panfrost_mmu_map_fault_addr() error path
  net_sched: prevent NULL dereference if default qdisc setup failed
  tracing: Free error logs of tracing instances
  can: j1939: j1939_tp_tx_dat_new(): fix out-of-bounds memory access
  ftrace: Mark get_lock_parent_ip() __always_inline
  perf/core: Fix the same task check in perf_event_set_output
  ALSA: hda/realtek: Add quirk for Clevo X370SNW
  nilfs2: fix sysfs interface lifetime
  nilfs2: fix potential UAF of struct nilfs_sc_info in nilfs_segctor_thread()
  tty: serial: fsl_lpuart: avoid checking for transfer complete when UARTCTRL_SBK is asserted in lpuart32_tx_empty
  tty: serial: sh-sci: Fix Rx on RZ/G2L SCI
  tty: serial: sh-sci: Fix transmit end interrupt handler
  iio: dac: cio-dac: Fix max DAC write value check for 12-bit
  iio: adc: ti-ads7950: Set `can_sleep` flag for GPIO chip
  USB: serial: option: add Quectel RM500U-CN modem
  USB: serial: option: add Telit FE990 compositions
  usb: typec: altmodes/displayport: Fix configure initial pin assignment
  USB: serial: cp210x: add Silicon Labs IFS-USB-DATACABLE IDs
  xhci: also avoid the XHCI_ZERO_64B_REGS quirk with a passthrough iommu
  NFSD: callback request does not use correct credential for AUTH_SYS
  sunrpc: only free unix grouplist after RCU settles
  gpio: davinci: Add irq chip flag to skip set wake
  ipv6: Fix an uninit variable access bug in __ip6_make_skb()
  sctp: check send stream number after wait_for_sndbuf
  net: don't let netpoll invoke NAPI if in xmit context
  icmp: guard against too small mtu
  wifi: mac80211: fix invalid drv_sta_pre_rcu_remove calls for non-uploaded sta
  pwm: sprd: Explicitly set .polarity in .get_state()
  pwm: cros-ec: Explicitly set .polarity in .get_state()
  pinctrl: amd: Disable and mask interrupts on resume
  pinctrl: amd: disable and mask interrupts on probe
  pinctrl: amd: Use irqchip template
  smb3: fix problem with null cifs super block with previous patch
  treewide: Replace DECLARE_TASKLET() with DECLARE_TASKLET_OLD()
  Revert "treewide: Replace DECLARE_TASKLET() with DECLARE_TASKLET_OLD()"
  cgroup/cpuset: Wake up cpuset_attach_wq tasks in cpuset_cancel_attach()
  x86/PCI: Add quirk for AMD XHCI controller that loses MSI-X state in D3hot
  scsi: ses: Handle enclosure with just a primary component gracefully
  Linux 5.4.240
  gfs2: Always check inode size of inline inodes
  firmware: arm_scmi: Fix device node validation for mailbox transport
  net: sched: fix race condition in qdisc_graft()
  net_sched: add __rcu annotation to netdev->qdisc
  ext4: fix kernel BUG in 'ext4_write_inline_data_end()'
  btrfs: scan device in non-exclusive mode
  s390/uaccess: add missing earlyclobber annotations to __clear_user()
  drm/etnaviv: fix reference leak when mmaping imported buffer
  ALSA: usb-audio: Fix regression on detection of Roland VS-100
  ALSA: hda/conexant: Partial revert of a quirk for Lenovo
  NFSv4: Fix hangs when recovering open state after a server reboot
  pinctrl: at91-pio4: fix domain name assignment
  xen/netback: don't do grant copy across page boundary
  Input: goodix - add Lenovo Yoga Book X90F to nine_bytes_report DMI table
  cifs: fix DFS traversal oops without CONFIG_CIFS_DFS_UPCALL
  cifs: prevent infinite recursion in CIFSGetDFSRefer()
  Input: focaltech - use explicitly signed char type
  Input: alps - fix compatibility with -funsigned-char
  pinctrl: ocelot: Fix alt mode for ocelot
  net: mvneta: make tx buffer array agnostic
  net: dsa: mv88e6xxx: Enable IGMP snooping on user ports only
  bnxt_en: Fix typo in PCI id to device description string mapping
  i40e: fix registers dump after run ethtool adapter self test
  s390/vfio-ap: fix memory leak in vfio_ap device driver
  can: bcm: bcm_tx_setup(): fix KMSAN uninit-value in vfs_write
  net/net_failover: fix txq exceeding warning
  regulator: Handle deferred clk
  regulator: fix spelling mistake "Cant" -> "Can't"
  ptp_qoriq: fix memory leak in probe()
  scsi: megaraid_sas: Fix crash after a double completion
  mtd: rawnand: meson: invalidate cache on polling ECC bit
  mips: bmips: BCM6358: disable RAC flush for TP1
  dma-mapping: drop the dev argument to arch_sync_dma_for_*
  ca8210: Fix unsigned mac_len comparison with zero in ca8210_skb_tx()
  fbdev: au1200fb: Fix potential divide by zero
  fbdev: lxfb: Fix potential divide by zero
  fbdev: intelfb: Fix potential divide by zero
  fbdev: nvidia: Fix potential divide by zero
  sched_getaffinity: don't assume 'cpumask_size()' is fully initialized
  fbdev: tgafb: Fix potential divide by zero
  ALSA: hda/ca0132: fixup buffer overrun at tuning_ctl_set()
  ALSA: asihpi: check pao in control_message()
  md: avoid signed overflow in slot_store()
  bus: imx-weim: fix branch condition evaluates to a garbage value
  fsverity: don't drop pagecache at end of FS_IOC_ENABLE_VERITY
  ocfs2: fix data corruption after failed write
  tun: avoid double free in tun_free_netdev
  sched/fair: Sanitize vruntime of entity being migrated
  sched/fair: sanitize vruntime of entity being placed
  dm crypt: add cond_resched() to dmcrypt_write()
  dm stats: check for and propagate alloc_percpu failure
  i2c: xgene-slimpro: Fix out-of-bounds bug in xgene_slimpro_i2c_xfer()
  nilfs2: fix kernel-infoleak in nilfs_ioctl_wrap_copy()
  wifi: mac80211: fix qos on mesh interfaces
  usb: chipidea: core: fix possible concurrent when switch role
  usb: chipdea: core: fix return -EINVAL if request role is the same with current role
  usb: cdns3: Fix issue with using incorrect PCI device function
  dm thin: fix deadlock when swapping to thin device
  igb: revert rtnl_lock() that causes deadlock
  fsverity: Remove WQ_UNBOUND from fsverity read workqueue
  usb: gadget: u_audio: don't let userspace block driver unbind
  scsi: core: Add BLIST_SKIP_VPD_PAGES for SKhynix H28U74301AMR
  cifs: empty interface list when server doesn't support query interfaces
  sh: sanitize the flags on sigreturn
  net: usb: qmi_wwan: add Telit 0x1080 composition
  net: usb: cdc_mbim: avoid altsetting toggling for Telit FE990
  scsi: lpfc: Avoid usage of list iterator variable after loop
  scsi: ufs: core: Add soft dependency on governor_simpleondemand
  scsi: target: iscsi: Fix an error message in iscsi_check_key()
  selftests/bpf: check that modifier resolves after pointer
  m68k: Only force 030 bus error if PC not in exception table
  ca8210: fix mac_len negative array access
  riscv: Bump COMMAND_LINE_SIZE value to 1024
  thunderbolt: Use const qualifier for `ring_interrupt_index`
  uas: Add US_FL_NO_REPORT_OPCODES for JMicron JMS583Gen 2
  scsi: qla2xxx: Perform lockless command completion in abort path
  hwmon (it87): Fix voltage scaling for chips with 10.9mV ADCs
  platform/chrome: cros_ec_chardev: fix kernel data leak from ioctl
  Bluetooth: btsdio: fix use after free bug in btsdio_remove due to unfinished work
  Bluetooth: btqcomsmd: Fix command timeout after setting BD address
  net: mdio: thunder: Add missing fwnode_handle_put()
  hvc/xen: prevent concurrent accesses to the shared ring
  nvme-tcp: fix nvme_tcp_term_pdu to match spec
  net/sonic: use dma_mapping_error() for error check
  erspan: do not use skb_mac_header() in ndo_start_xmit()
  atm: idt77252: fix kmemleak when rmmod idt77252
  net/mlx5: Read the TC mapping of all priorities on ETS query
  bpf: Adjust insufficient default bpf_jit_limit
  keys: Do not cache key in task struct if key is requested from kernel thread
  net/ps3_gelic_net: Use dma_mapping_error
  net/ps3_gelic_net: Fix RX sk_buff length
  net: qcom/emac: Fix use after free bug in emac_remove due to race condition
  xirc2ps_cs: Fix use after free bug in xirc2ps_detach
  qed/qed_sriov: guard against NULL derefs from qed_iov_get_vf_info
  net: usb: smsc95xx: Limit packet length to skb->len
  scsi: scsi_dh_alua: Fix memleak for 'qdata' in alua_activate()
  i2c: imx-lpi2c: check only for enabled interrupt flags
  igbvf: Regard vf reset nack as success
  intel/igbvf: free irq on the error path in igbvf_request_msix()
  iavf: fix non-tunneled IPv6 UDP packet type and hashing
  iavf: fix inverted Rx hash condition leading to disabled hash
  power: supply: da9150: Fix use after free bug in da9150_charger_remove due to race condition
  net: tls: fix possible race condition between do_tls_getsockopt_conf() and do_tls_setsockopt_conf()
  Linux 5.4.239
  selftests: Fix the executable permissions for fib_tests.sh
  BACKPORT: mac80211_hwsim: notify wmediumd of used MAC addresses
  FROMGIT: mac80211_hwsim: add concurrent channels scanning support over virtio
  Revert "HID: core: Provide new max_buffer_size attribute to over-ride the default"
  Revert "HID: uhid: Over-ride the default maximum data buffer value with our own"
  Linux 5.4.238
  HID: uhid: Over-ride the default maximum data buffer value with our own
  HID: core: Provide new max_buffer_size attribute to over-ride the default
  PCI: Unify delay handling for reset and resume
  s390/ipl: add missing intersection check to ipl_report handling
  serial: 8250_em: Fix UART port type
  drm/i915: Don't use stolen memory for ring buffers with LLC
  x86/mm: Fix use of uninitialized buffer in sme_enable()
  fbdev: stifb: Provide valid pixelclock and add fb_check_var() checks
  ftrace: Fix invalid address access in lookup_rec() when index is 0
  KVM: nVMX: add missing consistency checks for CR0 and CR4
  tracing: Make tracepoint lockdep check actually test something
  tracing: Check field value in hist_field_name()
  interconnect: fix mem leak when freeing nodes
  tty: serial: fsl_lpuart: skip waiting for transmission complete when UARTCTRL_SBK is asserted
  ext4: fix possible double unlock when moving a directory
  sh: intc: Avoid spurious sizeof-pointer-div warning
  drm/amdkfd: Fix an illegal memory access
  ext4: fix task hung in ext4_xattr_delete_inode
  ext4: fail ext4_iget if special inode unallocated
  jffs2: correct logic when creating a hole in jffs2_write_begin
  mmc: atmel-mci: fix race between stop command and start of next command
  media: m5mols: fix off-by-one loop termination error
  hwmon: (ina3221) return prober error code
  hwmon: (xgene) Fix use after free bug in xgene_hwmon_remove due to race condition
  hwmon: (adt7475) Fix masking of hysteresis registers
  hwmon: (adt7475) Display smoothing attributes in correct order
  ethernet: sun: add check for the mdesc_grab()
  net/iucv: Fix size of interrupt data
  net: usb: smsc75xx: Move packet length check to prevent kernel panic in skb_pull
  ipv4: Fix incorrect table ID in IOCTL path
  block: sunvdc: add check for mdesc_grab() returning NULL
  nvmet: avoid potential UAF in nvmet_req_complete()
  net: usb: smsc75xx: Limit packet length to skb->len
  nfc: st-nci: Fix use after free bug in ndlc_remove due to race condition
  net: phy: smsc: bail out in lan87xx_read_status if genphy_read_status fails
  net: tunnels: annotate lockless accesses to dev->needed_headroom
  qed/qed_dev: guard against a possible division by zero
  i40e: Fix kernel crash during reboot when adapter is in recovery mode
  ipvlan: Make skb->skb_iif track skb->dev for l3s mode
  nfc: pn533: initialize struct pn533_out_arg properly
  tcp: tcp_make_synack() can be called from process context
  scsi: core: Fix a procfs host directory removal regression
  scsi: core: Fix a comment in function scsi_host_dev_release()
  netfilter: nft_redir: correct value of inet type `.maxattrs`
  ALSA: hda: Match only Intel devices with CONTROLLER_IN_GPU()
  ALSA: hda: Add Intel DG2 PCI ID and HDMI codec vid
  ALSA: hda: Add Alderlake-S PCI ID and HDMI codec vid
  ALSA: hda - controller is in GPU on the DG1
  ALSA: hda - add Intel DG1 PCI and HDMI ids
  scsi: mpt3sas: Fix NULL pointer access in mpt3sas_transport_port_add()
  docs: Correct missing "d_" prefix for dentry_operations member d_weak_revalidate
  clk: HI655X: select REGMAP instead of depending on it
  drm/meson: fix 1px pink line on GXM when scaling video overlay
  cifs: Move the in_send statistic to __smb_send_rqst()
  drm/panfrost: Don't sync rpm suspension after mmu flushing
  xfrm: Allow transport-mode states with AF_UNSPEC selector
  ext4: fix cgroup writeback accounting with fs-layer encryption
  ANDROID: preserve CRC for __irq_domain_add()
  Revert "drm/exynos: Don't reset bridge->next"
  Revert "drm/bridge: Rename bridge helpers targeting a bridge chain"
  Revert "drm/bridge: Introduce drm_bridge_get_next_bridge()"
  Revert "drm: Initialize struct drm_crtc_state.no_vblank from device settings"
  Revert "drm/msm/mdp5: Add check for kzalloc"
  Linux 5.4.237
  s390/dasd: add missing discipline function
  UML: define RUNTIME_DISCARD_EXIT
  sh: define RUNTIME_DISCARD_EXIT
  s390: define RUNTIME_DISCARD_EXIT to fix link error with GNU ld < 2.36
  powerpc/vmlinux.lds: Don't discard .rela* for relocatable builds
  powerpc/vmlinux.lds: Define RUNTIME_DISCARD_EXIT
  arch: fix broken BuildID for arm64 and riscv
  x86, vmlinux.lds: Add RUNTIME_DISCARD_EXIT to generic DISCARDS
  drm/i915: Don't use BAR mappings for ring buffers with LLC
  ipmi:watchdog: Set panic count to proper value on a panic
  ipmi/watchdog: replace atomic_add() and atomic_sub()
  media: ov5640: Fix analogue gain control
  PCI: Add SolidRun vendor ID
  macintosh: windfarm: Use unsigned type for 1-bit bitfields
  alpha: fix R_ALPHA_LITERAL reloc for large modules
  MIPS: Fix a compilation issue
  ext4: Fix deadlock during directory rename
  riscv: Use READ_ONCE_NOCHECK in imprecise unwinding stack mode
  net/smc: fix fallback failed while sendmsg with fastopen
  scsi: megaraid_sas: Update max supported LD IDs to 240
  btf: fix resolving BTF_KIND_VAR after ARRAY, STRUCT, UNION, PTR
  netfilter: tproxy: fix deadlock due to missing BH disable
  bnxt_en: Avoid order-5 memory allocation for TPA data
  net: caif: Fix use-after-free in cfusbl_device_notify()
  net: lan78xx: fix accessing the LAN7800's internal phy specific registers from the MAC driver
  net: usb: lan78xx: Remove lots of set but unused 'ret' variables
  selftests: nft_nat: ensuring the listening side is up before starting the client
  ila: do not generate empty messages in ila_xlat_nl_cmd_get_mapping()
  nfc: fdp: add null check of devm_kmalloc_array in fdp_nci_i2c_read_device_properties
  drm/msm/a5xx: fix setting of the CP_PREEMPT_ENABLE_LOCAL register
  ext4: Fix possible corruption when moving a directory
  scsi: core: Remove the /proc/scsi/${proc_name} directory earlier
  cifs: Fix uninitialized memory read in smb3_qfs_tcon()
  SMB3: Backup intent flag missing from some more ops
  iommu/vt-d: Fix PASID directory pointer coherency
  irqdomain: Fix domain registration race
  irqdomain: Change the type of 'size' in __irq_domain_add() to be consistent
  ipmi:ssif: Add a timer between request retries
  ipmi:ssif: Increase the message retry time
  ipmi:ssif: Remove rtc_us_timer
  ipmi:ssif: resend_msg() cannot fail
  ipmi:ssif: make ssif_i2c_send() void
  iommu/amd: Add a length limitation for the ivrs_acpihid command-line parameter
  iommu/amd: Fix ill-formed ivrs_ioapic, ivrs_hpet and ivrs_acpihid options
  iommu/amd: Add PCI segment support for ivrs_[ioapic/hpet/acpihid] commands
  nfc: change order inside nfc_se_io error path
  ext4: zero i_disksize when initializing the bootloader inode
  ext4: fix WARNING in ext4_update_inline_data
  ext4: move where set the MAY_INLINE_DATA flag is set
  ext4: fix another off-by-one fsmap error on 1k block filesystems
  ext4: fix RENAME_WHITEOUT handling for inline directories
  drm/connector: print max_requested_bpc in state debugfs
  x86/CPU/AMD: Disable XSAVES on AMD family 0x17
  fs: prevent out-of-bounds array speculation when closing a file descriptor
  Linux 5.4.236
  staging: rtl8192e: Remove call_usermodehelper starting RadioPower.sh
  staging: rtl8192e: Remove function ..dm_check_ac_dc_power calling a script
  wifi: cfg80211: Partial revert "wifi: cfg80211: Fix use after free for wext"
  Linux 5.4.235
  dt-bindings: rtc: sun6i-a31-rtc: Loosen the requirements on the clocks
  media: uvcvideo: Fix race condition with usb_kill_urb
  media: uvcvideo: Provide sync and async uvc_ctrl_status_event
  tcp: Fix listen() regression in 5.4.229.
  Bluetooth: hci_sock: purge socket queues in the destruct() callback
  x86/resctl: fix scheduler confusion with 'current'
  x86/resctrl: Apply READ_ONCE/WRITE_ONCE to task_struct.{rmid,closid}
  net: tls: avoid hanging tasks on the tx_lock
  phy: rockchip-typec: Fix unsigned comparison with less than zero
  PCI: Add ACS quirk for Wangxun NICs
  kernel/fail_function: fix memory leak with using debugfs_lookup()
  usb: uvc: Enumerate valid values for color matching
  USB: ene_usb6250: Allocate enough memory for full object
  usb: host: xhci: mvebu: Iterate over array indexes instead of using pointer math
  iio: accel: mma9551_core: Prevent uninitialized variable in mma9551_read_config_word()
  iio: accel: mma9551_core: Prevent uninitialized variable in mma9551_read_status_word()
  tools/iio/iio_utils:fix memory leak
  mei: bus-fixup:upon error print return values of send and receive
  tty: serial: fsl_lpuart: disable the CTS when send break signal
  tty: fix out-of-bounds access in tty_driver_lookup_tty()
  staging: emxx_udc: Add checks for dma_alloc_coherent()
  media: uvcvideo: Silence memcpy() run-time false positive warnings
  media: uvcvideo: Quirk for autosuspend in Logitech B910 and C910
  media: uvcvideo: Handle errors from calls to usb_string
  media: uvcvideo: Handle cameras with invalid descriptors
  mfd: arizona: Use pm_runtime_resume_and_get() to prevent refcnt leak
  firmware/efi sysfb_efi: Add quirk for Lenovo IdeaPad Duet 3
  tracing: Add NULL checks for buffer in ring_buffer_free_read_page()
  thermal: intel: BXT_PMIC: select REGMAP instead of depending on it
  thermal: intel: quark_dts: fix error pointer dereference
  scsi: ipr: Work around fortify-string warning
  rtc: sun6i: Always export the internal oscillator
  rtc: sun6i: Make external 32k oscillator optional
  vc_screen: modify vcs_size() handling in vcs_read()
  tcp: tcp_check_req() can be called from process context
  ARM: dts: spear320-hmi: correct STMPE GPIO compatible
  net/sched: act_sample: fix action bind logic
  nfc: fix memory leak of se_io context in nfc_genl_se_io
  net/mlx5: Geneve, Fix handling of Geneve object id as error code
  9p/rdma: unmap receive dma buffer in rdma_request()/post_recv()
  9p/xen: fix connection sequence
  9p/xen: fix version parsing
  net: fix __dev_kfree_skb_any() vs drop monitor
  sctp: add a refcnt in sctp_stream_priorities to avoid a nested loop
  ipv6: Add lwtunnel encap size of all siblings in nexthop calculation
  netfilter: ctnetlink: fix possible refcount leak in ctnetlink_create_conntrack()
  watchdog: pcwd_usb: Fix attempting to access uninitialized memory
  watchdog: Fix kmemleak in watchdog_cdev_register
  watchdog: at91sam9_wdt: use devm_request_irq to avoid missing free_irq() in error path
  x86: um: vdso: Add '%rcx' and '%r11' to the syscall clobber list
  ubi: ubi_wl_put_peb: Fix infinite loop when wear-leveling work failed
  ubi: Fix UAF wear-leveling entry in eraseblk_count_seq_show()
  ubifs: ubifs_writepage: Mark page dirty after writing inode failed
  ubifs: dirty_cow_znode: Fix memleak in error handling path
  ubifs: Re-statistic cleaned znode count if commit failed
  ubi: Fix possible null-ptr-deref in ubi_free_volume()
  ubifs: Fix memory leak in alloc_wbufs()
  ubi: Fix unreferenced object reported by kmemleak in ubi_resize_volume()
  ubi: Fix use-after-free when volume resizing failed
  ubifs: Reserve one leb for each journal head while doing budget
  ubifs: do_rename: Fix wrong space budget when target inode's nlink > 1
  ubifs: Fix wrong dirty space budget for dirty inode
  ubifs: Rectify space budget for ubifs_xrename()
  ubifs: Rectify space budget for ubifs_symlink() if symlink is encrypted
  ubifs: Fix build errors as symbol undefined
  ubi: ensure that VID header offset + VID header size <= alloc, size
  um: vector: Fix memory leak in vector_config
  fs: f2fs: initialize fsdata in pagecache_write()
  f2fs: use memcpy_{to,from}_page() where possible
  pwm: stm32-lp: fix the check on arr and cmp registers update
  pwm: sifive: Always let the first pwm_apply_state succeed
  pwm: sifive: Reduce time the controller lock is held
  fs/jfs: fix shift exponent db_agl2size negative
  net/sched: Retire tcindex classifier
  kbuild: Port silent mode detection to future gnu make.
  wifi: ath9k: use proper statements in conditionals
  drm/radeon: Fix eDP for single-display iMac11,2
  drm/i915/quirks: Add inverted backlight quirk for HP 14-r206nv
  PCI: Avoid FLR for AMD FCH AHCI adapters
  PCI: hotplug: Allow marking devices as disconnected during bind/unbind
  PCI/PM: Observe reset delay irrespective of bridge_d3
  scsi: ses: Fix slab-out-of-bounds in ses_intf_remove()
  scsi: ses: Fix possible desc_ptr out-of-bounds accesses
  scsi: ses: Fix possible addl_desc_ptr out-of-bounds accesses
  scsi: ses: Fix slab-out-of-bounds in ses_enclosure_data_process()
  scsi: ses: Don't attach if enclosure has no components
  scsi: qla2xxx: Fix erroneous link down
  scsi: qla2xxx: Fix DMA-API call trace on NVMe LS requests
  scsi: qla2xxx: Fix link failure in NPIV environment
  ktest.pl: Add RUN_TIMEOUT option with default unlimited
  ktest.pl: Fix missing "end_monitor" when machine check fails
  ktest.pl: Give back console on Ctrt^C on monitor
  mm/thp: check and bail out if page in deferred queue already
  mm: memcontrol: deprecate charge moving
  media: ipu3-cio2: Fix PM runtime usage_count in driver unbind
  mips: fix syscall_get_nr
  alpha: fix FEN fault handling
  rbd: avoid use-after-free in do_rbd_add() when rbd_dev_create() fails
  ARM: dts: exynos: correct TMU phandle in Odroid XU
  ARM: dts: exynos: correct TMU phandle in Exynos4
  dm flakey: don't corrupt the zero page
  dm flakey: fix logic when corrupting a bio
  thermal: intel: powerclamp: Fix cur_state for multi package system
  wifi: cfg80211: Fix use after free for wext
  wifi: rtl8xxxu: Use a longer retry limit of 48
  ext4: refuse to create ea block when umounted
  ext4: optimize ea_inode block expansion
  ALSA: hda/realtek: Add quirk for HP EliteDesk 800 G6 Tower PC
  ALSA: ice1712: Do not left ice->gpio_mutex locked in aureon_add_controls()
  irqdomain: Drop bogus fwspec-mapping error handling
  irqdomain: Fix disassociation race
  irqdomain: Fix association race
  ima: Align ima_file_mmap() parameters with mmap_file LSM hook
  Documentation/hw-vuln: Document the interaction between IBRS and STIBP
  x86/speculation: Allow enabling STIBP with legacy IBRS
  x86/microcode/AMD: Fix mixed steppings support
  x86/microcode/AMD: Add a @cpu parameter to the reloading functions
  x86/microcode/amd: Remove load_microcode_amd()'s bsp parameter
  x86/kprobes: Fix arch_check_optimized_kprobe check within optimized_kprobe range
  x86/kprobes: Fix __recover_optprobed_insn check optimizing logic
  x86/reboot: Disable SVM, not just VMX, when stopping CPUs
  x86/reboot: Disable virtualization in an emergency if SVM is supported
  x86/crash: Disable virt in core NMI crash handler to avoid double shootdown
  x86/virt: Force GIF=1 prior to disabling SVM (for reboot flows)
  KVM: s390: disable migration mode when dirty tracking is disabled
  KVM: Destroy target device if coalesced MMIO unregistration fails
  udf: Fix file corruption when appending just after end of preallocated extent
  udf: Detect system inodes linked into directory hierarchy
  udf: Preserve link count of system files
  udf: Do not update file length for failed writes to inline files
  udf: Do not bother merging very long extents
  udf: Truncate added extents on failed expansion
  ocfs2: fix non-auto defrag path not working issue
  ocfs2: fix defrag path triggering jbd2 ASSERT
  f2fs: fix cgroup writeback accounting with fs-layer encryption
  f2fs: fix information leak in f2fs_move_inline_dirents()
  fs: hfsplus: fix UAF issue in hfsplus_put_super
  hfs: fix missing hfs_bnode_get() in __hfs_bnode_create
  ARM: dts: exynos: correct HDMI phy compatible in Exynos4
  s390/kprobes: fix current_kprobe never cleared after kprobes reenter
  s390/kprobes: fix irq mask clobbering on kprobe reenter from post_handler
  s390: discard .interp section
  ipmi_ssif: Rename idle state and check
  rtc: pm8xxx: fix set-alarm race
  firmware: coreboot: framebuffer: Ignore reserved pixel color bits
  wifi: rtl8xxxu: fixing transmisison failure for rtl8192eu
  nfsd: zero out pointers after putting nfsd_files on COPY setup error
  dm cache: add cond_resched() to various workqueue loops
  dm thin: add cond_resched() to various workqueue loops
  drm: panel-orientation-quirks: Add quirk for Lenovo IdeaPad Duet 3 10IGL5
  pinctrl: at91: use devm_kasprintf() to avoid potential leaks
  hwmon: (coretemp) Simplify platform device handling
  regulator: s5m8767: Bounds check id indexing into arrays
  regulator: max77802: Bounds check regulator id against opmode
  ASoC: kirkwood: Iterate over array indexes instead of using pointer math
  docs/scripts/gdb: add necessary make scripts_gdb step
  drm/msm/dsi: Add missing check for alloc_ordered_workqueue
  drm/radeon: free iio for atombios when driver shutdown
  HID: Add Mapping for System Microphone Mute
  drm/omap: dsi: Fix excessive stack usage
  drm/amd/display: Fix potential null-deref in dm_resume
  uaccess: Add minimum bounds check on kernel buffer size
  coda: Avoid partial allocation of sig_inputArgs
  net/mlx5: fw_tracer: Fix debug print
  ACPI: video: Fix Lenovo Ideapad Z570 DMI match
  wifi: mt76: dma: free rx_head in mt76_dma_rx_cleanup
  m68k: Check syscall_trace_enter() return code
  net: bcmgenet: Add a check for oversized packets
  ACPI: Don't build ACPICA with '-Os'
  ice: add missing checks for PF vsi type
  inet: fix fast path in __inet_hash_connect()
  wifi: mt7601u: fix an integer underflow
  wifi: brcmfmac: ensure CLM version is null-terminated to prevent stack-out-of-bounds
  x86/bugs: Reset speculation control settings on init
  timers: Prevent union confusion from unexpected restart_syscall()
  thermal: intel: Fix unsigned comparison with less than zero
  rcu: Suppress smp_processor_id() complaint in synchronize_rcu_expedited_wait()
  wifi: brcmfmac: Fix potential stack-out-of-bounds in brcmf_c_preinit_dcmds()
  blk-iocost: fix divide by 0 error in calc_lcoefs()
  ARM: dts: exynos: Use Exynos5420 compatible for the MIPI video phy
  udf: Define EFSCORRUPTED error code
  rpmsg: glink: Avoid infinite loop on intent for missing channel
  media: usb: siano: Fix use after free bugs caused by do_submit_urb
  media: i2c: ov7670: 0 instead of -EINVAL was returned
  media: rc: Fix use-after-free bugs caused by ene_tx_irqsim()
  media: i2c: ov772x: Fix memleak in ov772x_probe()
  media: ov5675: Fix memleak in ov5675_init_controls()
  powerpc: Remove linker flag from KBUILD_AFLAGS
  media: platform: ti: Add missing check for devm_regulator_get
  remoteproc: qcom_q6v5_mss: Use a carveout to authenticate modem headers
  MIPS: vpe-mt: drop physical_memsize
  MIPS: SMP-CPS: fix build error when HOTPLUG_CPU not set
  powerpc/eeh: Set channel state after notifying the drivers
  powerpc/eeh: Small refactor of eeh_handle_normal_event()
  powerpc/rtas: ensure 4KB alignment for rtas_data_buf
  powerpc/rtas: make all exports GPL
  powerpc/pseries/lparcfg: add missing RTAS retry status handling
  powerpc/pseries/lpar: add missing RTAS retry status handling
  clk: Honor CLK_OPS_PARENT_ENABLE in clk_core_is_enabled()
  powerpc/powernv/ioda: Skip unallocated resources when mapping to PE
  clk: qcom: gpucc-sdm845: fix clk_dis_wait being programmed for CX GDSC
  Input: ads7846 - don't check penirq immediately for 7845
  Input: ads7846 - don't report pressure for ads7845
  clk: renesas: cpg-mssr: Remove superfluous check in resume code
  clk: renesas: cpg-mssr: Use enum clk_reg_layout instead of a boolean flag
  clk: renesas: cpg-mssr: Fix use after free if cpg_mssr_common_init() failed
  mtd: rawnand: sunxi: Fix the size of the last OOB region
  clk: qcom: gcc-qcs404: fix names of the DSI clocks used as parents
  clk: qcom: gcc-qcs404: disable gpll[04]_out_aux parents
  mfd: pcf50633-adc: Fix potential memleak in pcf50633_adc_async_read()
  selftests/ftrace: Fix bash specific "==" operator
  sparc: allow PM configs for sparc32 COMPILE_TEST
  perf tools: Fix auto-complete on aarch64
  perf llvm: Fix inadvertent file creation
  gfs2: jdata writepage fix
  cifs: Fix warning and UAF when destroy the MR list
  cifs: Fix lost destroy smbd connection when MR allocate failed
  nfsd: fix race to check ls_layouts
  hid: bigben_probe(): validate report count
  HID: asus: Fix mute and touchpad-toggle keys on Medion Akoya E1239T
  HID: asus: Add support for multi-touch touchpad on Medion Akoya E1239T
  HID: asus: Add report_size to struct asus_touchpad_info
  HID: asus: Only set EV_REP if we are adding a mapping
  HID: bigben: use spinlock to safely schedule workers
  HID: bigben_worker() remove unneeded check on report_field
  HID: bigben: use spinlock to protect concurrent accesses
  ASoC: soc-dapm.h: fixup warning struct snd_pcm_substream not declared
  ASoC: dapm: declare missing structure prototypes
  spi: synquacer: Fix timeout handling in synquacer_spi_transfer_one()
  dm: remove flush_scheduled_work() during local_exit()
  hwmon: (mlxreg-fan) Return zero speed for broken fan
  spi: bcm63xx-hsspi: Fix multi-bit mode setting
  spi: bcm63xx-hsspi: fix pm_runtime
  scsi: aic94xx: Add missing check for dma_map_single()
  hwmon: (ltc2945) Handle error case in ltc2945_value_store
  gpio: vf610: connect GPIO label to dev name
  ASoC: soc-compress.c: fixup private_data on snd_soc_new_compress()
  drm/mediatek: Clean dangling pointer on bind error path
  drm/mediatek: Drop unbalanced obj unref
  drm/mediatek: Use NULL instead of 0 for NULL pointer
  drm/mediatek: remove cast to pointers passed to kfree
  gpu: host1x: Don't skip assigning syncpoints to channels
  drm/msm/mdp5: Add check for kzalloc
  drm: Initialize struct drm_crtc_state.no_vblank from device settings
  drm/bridge: Introduce drm_bridge_get_next_bridge()
  drm/bridge: Rename bridge helpers targeting a bridge chain
  drm/exynos: Don't reset bridge->next
  drm/msm/dpu: Add check for pstates
  drm/msm/dpu: Add check for cstate
  drm/msm: use strscpy instead of strncpy
  drm/mipi-dsi: Fix byte order of 16-bit DCS set/get brightness
  ALSA: hda/ca0132: minor fix for allocation size
  ASoC: fsl_sai: initialize is_dsp_mode flag
  pinctrl: stm32: Fix refcount leak in stm32_pctrl_get_irq_domain
  drm/msm/hdmi: Add missing check for alloc_ordered_workqueue
  gpu: ipu-v3: common: Add of_node_put() for reference returned by of_graph_get_port_by_id()
  drm/vc4: dpi: Fix format mapping for RGB565
  drm/vc4: dpi: Add option for inverting pixel clock and output enable
  drm/bridge: megachips: Fix error handling in i2c_register_driver()
  drm: mxsfb: DRM_MXSFB should depend on ARCH_MXS || ARCH_MXC
  drm/fourcc: Add missing big-endian XRGB1555 and RGB565 formats
  selftest: fib_tests: Always cleanup before exit
  selftests/net: Interpret UDP_GRO cmsg data as an int value
  irqchip/irq-bcm7120-l2: Set IRQ_LEVEL for level triggered interrupts
  irqchip/irq-brcmstb-l2: Set IRQ_LEVEL for level triggered interrupts
  can: esd_usb: Move mislocated storage of SJA1000_ECC_SEG bits in case of a bus error
  thermal/drivers/hisi: Drop second sensor hi3660
  wifi: mac80211: make rate u32 in sta_set_rate_info_rx()
  crypto: crypto4xx - Call dma_unmap_page when done
  wifi: mwifiex: fix loop iterator in mwifiex_update_ampdu_txwinsize()
  wifi: iwl4965: Add missing check for create_singlethread_workqueue()
  wifi: iwl3945: Add missing check for create_singlethread_workqueue
  treewide: Replace DECLARE_TASKLET() with DECLARE_TASKLET_OLD()
  usb: gadget: udc: Avoid tasklet passing a global
  RISC-V: time: initialize hrtimer based broadcast clock event device
  m68k: /proc/hardware should depend on PROC_FS
  crypto: rsa-pkcs1pad - Use akcipher_request_complete
  rds: rds_rm_zerocopy_callback() correct order for list_add_tail()
  libbpf: Fix alen calculation in libbpf_nla_dump_errormsg()
  Bluetooth: L2CAP: Fix potential user-after-free
  OPP: fix error checking in opp_migrate_dentry()
  tap: tap_open(): correctly initialize socket uid
  tun: tun_chr_open(): correctly initialize socket uid
  net: add sock_init_data_uid()
  mptcp: add sk_stop_timer_sync helper
  irqchip/ti-sci: Fix refcount leak in ti_sci_intr_irq_domain_probe
  irqchip/irq-mvebu-gicp: Fix refcount leak in mvebu_gicp_probe
  irqchip/alpine-msi: Fix refcount leak in alpine_msix_init_domains
  net/mlx5: Enhance debug print in page allocation failure
  powercap: fix possible name leak in powercap_register_zone()
  crypto: seqiv - Handle EBUSY correctly
  crypto: essiv - Handle EBUSY correctly
  crypto: essiv - remove redundant null pointer check before kfree
  crypto: ccp - Failure on re-initialization due to duplicate sysfs filename
  ACPI: battery: Fix missing NUL-termination with large strings
  wifi: ath9k: Fix potential stack-out-of-bounds write in ath9k_wmi_rsp_callback()
  wifi: ath9k: hif_usb: clean up skbs if ath9k_hif_usb_rx_stream() fails
  ath9k: htc: clean up statistics macros
  ath9k: hif_usb: simplify if-if to if-else
  wifi: ath9k: htc_hst: free skb in ath9k_htc_rx_msg() if there is no callback function
  wifi: orinoco: check return value of hermes_write_wordrec()
  ACPICA: nsrepair: handle cases without a return value correctly
  lib/mpi: Fix buffer overrun when SG is too long
  genirq: Fix the return type of kstat_cpu_irqs_sum()
  ACPICA: Drop port I/O validation for some regions
  crypto: x86/ghash - fix unaligned access in ghash_setkey()
  wifi: wl3501_cs: don't call kfree_skb() under spin_lock_irqsave()
  wifi: libertas: cmdresp: don't call kfree_skb() under spin_lock_irqsave()
  wifi: libertas: main: don't call kfree_skb() under spin_lock_irqsave()
  wifi: libertas: if_usb: don't call kfree_skb() under spin_lock_irqsave()
  wifi: libertas_tf: don't call kfree_skb() under spin_lock_irqsave()
  wifi: brcmfmac: unmap dma buffer in brcmf_msgbuf_alloc_pktid()
  wifi: brcmfmac: fix potential memory leak in brcmf_netdev_start_xmit()
  wifi: wilc1000: fix potential memory leak in wilc_mac_xmit()
  wilc1000: let wilc_mac_xmit() return NETDEV_TX_OK
  wifi: ipw2200: fix memory leak in ipw_wdev_init()
  wifi: ipw2x00: don't call dev_kfree_skb() under spin_lock_irqsave()
  ipw2x00: switch from 'pci_' to 'dma_' API
  wifi: rtlwifi: Fix global-out-of-bounds bug in _rtl8812ae_phy_set_txpower_limit()
  rtlwifi: fix -Wpointer-sign warning
  wifi: rtl8xxxu: don't call dev_kfree_skb() under spin_lock_irqsave()
  wifi: libertas: fix memory leak in lbs_init_adapter()
  wifi: iwlegacy: common: don't call dev_kfree_skb() under spin_lock_irqsave()
  net/wireless: Delete unnecessary checks before the macro call “dev_kfree_skb”
  wifi: rsi: Fix memory leak in rsi_coex_attach()
  block: bio-integrity: Copy flags when bio_integrity_payload is cloned
  sched/rt: pick_next_rt_entity(): check list_entry
  sched/deadline,rt: Remove unused parameter from pick_next_[rt|dl]_entity()
  s390/dasd: Fix potential memleak in dasd_eckd_init()
  s390/dasd: Prepare for additional path event handling
  blk-mq: correct stale comment of .get_budget
  blk-mq: wait on correct sbitmap_queue in blk_mq_mark_tag_wait
  blk-mq: remove stale comment for blk_mq_sched_mark_restart_hctx
  block: Limit number of items taken from the I/O scheduler in one go
  Revert "scsi: core: run queue if SCSI device queue isn't ready and queue is idle"
  arm64: dts: mediatek: mt7622: Add missing pwm-cells to pwm node
  ARM: dts: imx7s: correct iomuxc gpr mux controller cells
  arm64: dts: amlogic: meson-gxl-s905d-phicomm-n1: fix led node name
  arm64: dts: amlogic: meson-gxl: add missing unit address to eth-phy-mux node name
  arm64: dts: amlogic: meson-gx: add missing unit address to rng node name
  arm64: dts: amlogic: meson-gx: add missing SCPI sensors compatible
  arm64: dts: amlogic: meson-axg: fix SCPI clock dvfs node name
  arm64: dts: amlogic: meson-gx: fix SCPI clock dvfs node name
  ARM: imx: Call ida_simple_remove() for ida_simple_get
  ARM: dts: exynos: correct wr-active property in Exynos3250 Rinato
  ARM: OMAP1: call platform_device_put() in error case in omap1_dm_timer_init()
  arm64: dts: meson: remove CPU opps below 1GHz for G12A boards
  arm64: dts: meson-gx: Fix the SCPI DVFS node name and unit address
  arm64: dts: meson-g12a: Fix internal Ethernet PHY unit name
  arm64: dts: meson-gx: Fix Ethernet MAC address unit name
  ARM: zynq: Fix refcount leak in zynq_early_slcr_init
  arm64: dts: qcom: qcs404: use symbol names for PCIe resets
  ARM: OMAP2+: Fix memory leak in realtime_counter_init()
  HID: asus: use spinlock to safely schedule workers
  HID: asus: use spinlock to protect concurrent accesses
  HID: asus: Remove check for same LED brightness on set
  Linux 5.4.234
  USB: core: Don't hold device lock while reading the "descriptors" sysfs file
  USB: serial: option: add support for VW/Skoda "Carstick LTE"
  dmaengine: sh: rcar-dmac: Check for error num after dma_set_max_seg_size
  vc_screen: don't clobber return value in vcs_read
  net: Remove WARN_ON_ONCE(sk->sk_forward_alloc) from sk_stream_kill_queues().
  bpf: bpf_fib_lookup should not return neigh in NUD_FAILED state
  HID: core: Fix deadloop in hid_apply_multiplier.
  neigh: make sure used and confirmed times are valid
  IB/hfi1: Assign npages earlier
  btrfs: send: limit number of clones and allocated memory size
  ACPI: NFIT: fix a potential deadlock during NFIT teardown
  ARM: dts: rockchip: add power-domains property to dp node on rk3288
  arm64: dts: rockchip: drop unused LED mode property from rk3328-roc-cc

 Conflicts:
	Documentation/devicetree/bindings
	Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
	arch/arm/mm/dma-mapping.c
	drivers/clk/qcom/gcc-qcs404.c
	drivers/iommu/dma-iommu.c
	drivers/mtd/ubi/wl.c
	kernel/dma/direct.c

Change-Id: I5797c8cb2276354d851af215d431950eec734174
Signed-off-by: Srinivasarao Pathipati <quic_c_spathi@quicinc.com>
2023-07-26 14:21:25 +05:30

3122 lines
86 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2009 Red Hat, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/sched/coredump.h>
#include <linux/sched/numa_balancing.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <linux/mmu_notifier.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/shrinker.h>
#include <linux/mm_inline.h>
#include <linux/swapops.h>
#include <linux/dax.h>
#include <linux/khugepaged.h>
#include <linux/freezer.h>
#include <linux/pfn_t.h>
#include <linux/mman.h>
#include <linux/memremap.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
#include <linux/migrate.h>
#include <linux/hashtable.h>
#include <linux/userfaultfd_k.h>
#include <linux/page_idle.h>
#include <linux/shmem_fs.h>
#include <linux/oom.h>
#include <linux/numa.h>
#include <linux/page_owner.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
#include "internal.h"
/*
* By default, transparent hugepage support is disabled in order to avoid
* risking an increased memory footprint for applications that are not
* guaranteed to benefit from it. When transparent hugepage support is
* enabled, it is for all mappings, and khugepaged scans all mappings.
* Defrag is invoked by khugepaged hugepage allocations and by page faults
* for all hugepage allocations.
*/
unsigned long transparent_hugepage_flags __read_mostly =
#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
(1<<TRANSPARENT_HUGEPAGE_FLAG)|
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
#endif
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
static struct shrinker deferred_split_shrinker;
static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
unsigned long huge_zero_pfn __read_mostly = ~0UL;
bool transparent_hugepage_enabled(struct vm_area_struct *vma)
{
/* The addr is used to check if the vma size fits */
unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE;
if (!transhuge_vma_suitable(vma, addr))
return false;
if (vma_is_anonymous(vma))
return __transparent_hugepage_enabled(vma);
if (vma_is_shmem(vma))
return shmem_huge_enabled(vma);
return false;
}
static struct page *get_huge_zero_page(void)
{
struct page *zero_page;
retry:
if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
return READ_ONCE(huge_zero_page);
zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
HPAGE_PMD_ORDER);
if (!zero_page) {
count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
return NULL;
}
count_vm_event(THP_ZERO_PAGE_ALLOC);
preempt_disable();
if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
preempt_enable();
__free_pages(zero_page, compound_order(zero_page));
goto retry;
}
WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
/* We take additional reference here. It will be put back by shrinker */
atomic_set(&huge_zero_refcount, 2);
preempt_enable();
return READ_ONCE(huge_zero_page);
}
static void put_huge_zero_page(void)
{
/*
* Counter should never go to zero here. Only shrinker can put
* last reference.
*/
BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
}
struct page *mm_get_huge_zero_page(struct mm_struct *mm)
{
if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
return READ_ONCE(huge_zero_page);
if (!get_huge_zero_page())
return NULL;
if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
put_huge_zero_page();
return READ_ONCE(huge_zero_page);
}
void mm_put_huge_zero_page(struct mm_struct *mm)
{
if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
put_huge_zero_page();
}
static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
struct shrink_control *sc)
{
/* we can free zero page only if last reference remains */
return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
}
static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
struct page *zero_page = xchg(&huge_zero_page, NULL);
BUG_ON(zero_page == NULL);
WRITE_ONCE(huge_zero_pfn, ~0UL);
__free_pages(zero_page, compound_order(zero_page));
return HPAGE_PMD_NR;
}
return 0;
}
static struct shrinker huge_zero_page_shrinker = {
.count_objects = shrink_huge_zero_page_count,
.scan_objects = shrink_huge_zero_page_scan,
.seeks = DEFAULT_SEEKS,
};
#ifdef CONFIG_SYSFS
static ssize_t enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
return sprintf(buf, "[always] madvise never\n");
else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags))
return sprintf(buf, "always [madvise] never\n");
else
return sprintf(buf, "always madvise [never]\n");
}
static ssize_t enabled_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
ssize_t ret = count;
if (sysfs_streq(buf, "always")) {
clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
} else if (sysfs_streq(buf, "madvise")) {
clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
} else if (sysfs_streq(buf, "never")) {
clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
} else
ret = -EINVAL;
if (ret > 0) {
int err = start_stop_khugepaged();
if (err)
ret = err;
}
return ret;
}
static struct kobj_attribute enabled_attr =
__ATTR(enabled, 0644, enabled_show, enabled_store);
ssize_t single_hugepage_flag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf,
enum transparent_hugepage_flag flag)
{
return sprintf(buf, "%d\n",
!!test_bit(flag, &transparent_hugepage_flags));
}
ssize_t single_hugepage_flag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count,
enum transparent_hugepage_flag flag)
{
unsigned long value;
int ret;
ret = kstrtoul(buf, 10, &value);
if (ret < 0)
return ret;
if (value > 1)
return -EINVAL;
if (value)
set_bit(flag, &transparent_hugepage_flags);
else
clear_bit(flag, &transparent_hugepage_flags);
return count;
}
static ssize_t defrag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
return sprintf(buf, "[always] defer defer+madvise madvise never\n");
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
return sprintf(buf, "always [defer] defer+madvise madvise never\n");
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
return sprintf(buf, "always defer [defer+madvise] madvise never\n");
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
return sprintf(buf, "always defer defer+madvise [madvise] never\n");
return sprintf(buf, "always defer defer+madvise madvise [never]\n");
}
static ssize_t defrag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
if (sysfs_streq(buf, "always")) {
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
} else if (sysfs_streq(buf, "defer+madvise")) {
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
} else if (sysfs_streq(buf, "defer")) {
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
} else if (sysfs_streq(buf, "madvise")) {
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
} else if (sysfs_streq(buf, "never")) {
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
} else
return -EINVAL;
return count;
}
static struct kobj_attribute defrag_attr =
__ATTR(defrag, 0644, defrag_show, defrag_store);
static ssize_t use_zero_page_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return single_hugepage_flag_show(kobj, attr, buf,
TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
}
static ssize_t use_zero_page_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
return single_hugepage_flag_store(kobj, attr, buf, count,
TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
}
static struct kobj_attribute use_zero_page_attr =
__ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
static ssize_t hpage_pmd_size_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", HPAGE_PMD_SIZE);
}
static struct kobj_attribute hpage_pmd_size_attr =
__ATTR_RO(hpage_pmd_size);
#ifdef CONFIG_DEBUG_VM
static ssize_t debug_cow_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return single_hugepage_flag_show(kobj, attr, buf,
TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
}
static ssize_t debug_cow_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
return single_hugepage_flag_store(kobj, attr, buf, count,
TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
}
static struct kobj_attribute debug_cow_attr =
__ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
#endif /* CONFIG_DEBUG_VM */
static struct attribute *hugepage_attr[] = {
&enabled_attr.attr,
&defrag_attr.attr,
&use_zero_page_attr.attr,
&hpage_pmd_size_attr.attr,
#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
&shmem_enabled_attr.attr,
#endif
#ifdef CONFIG_DEBUG_VM
&debug_cow_attr.attr,
#endif
NULL,
};
static const struct attribute_group hugepage_attr_group = {
.attrs = hugepage_attr,
};
static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
{
int err;
*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
if (unlikely(!*hugepage_kobj)) {
pr_err("failed to create transparent hugepage kobject\n");
return -ENOMEM;
}
err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
if (err) {
pr_err("failed to register transparent hugepage group\n");
goto delete_obj;
}
err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
if (err) {
pr_err("failed to register transparent hugepage group\n");
goto remove_hp_group;
}
return 0;
remove_hp_group:
sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
delete_obj:
kobject_put(*hugepage_kobj);
return err;
}
static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
{
sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
kobject_put(hugepage_kobj);
}
#else
static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
{
return 0;
}
static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
{
}
#endif /* CONFIG_SYSFS */
static int __init hugepage_init(void)
{
int err;
struct kobject *hugepage_kobj;
if (!has_transparent_hugepage()) {
transparent_hugepage_flags = 0;
return -EINVAL;
}
/*
* hugepages can't be allocated by the buddy allocator
*/
MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER);
/*
* we use page->mapping and page->index in second tail page
* as list_head: assuming THP order >= 2
*/
MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
err = hugepage_init_sysfs(&hugepage_kobj);
if (err)
goto err_sysfs;
err = khugepaged_init();
if (err)
goto err_slab;
err = register_shrinker(&huge_zero_page_shrinker);
if (err)
goto err_hzp_shrinker;
err = register_shrinker(&deferred_split_shrinker);
if (err)
goto err_split_shrinker;
/*
* By default disable transparent hugepages on smaller systems,
* where the extra memory used could hurt more than TLB overhead
* is likely to save. The admin can still enable it through /sys.
*/
if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
transparent_hugepage_flags = 0;
return 0;
}
err = start_stop_khugepaged();
if (err)
goto err_khugepaged;
return 0;
err_khugepaged:
unregister_shrinker(&deferred_split_shrinker);
err_split_shrinker:
unregister_shrinker(&huge_zero_page_shrinker);
err_hzp_shrinker:
khugepaged_destroy();
err_slab:
hugepage_exit_sysfs(hugepage_kobj);
err_sysfs:
return err;
}
subsys_initcall(hugepage_init);
static int __init setup_transparent_hugepage(char *str)
{
int ret = 0;
if (!str)
goto out;
if (!strcmp(str, "always")) {
set_bit(TRANSPARENT_HUGEPAGE_FLAG,
&transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags);
ret = 1;
} else if (!strcmp(str, "madvise")) {
clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
&transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags);
ret = 1;
} else if (!strcmp(str, "never")) {
clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
&transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags);
ret = 1;
}
out:
if (!ret)
pr_warn("transparent_hugepage= cannot parse, ignored\n");
return ret;
}
__setup("transparent_hugepage=", setup_transparent_hugepage);
pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
pmd = pmd_mkwrite(pmd);
return pmd;
}
#ifdef CONFIG_MEMCG
static inline struct deferred_split *get_deferred_split_queue(struct page *page)
{
struct mem_cgroup *memcg = compound_head(page)->mem_cgroup;
struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
if (memcg)
return &memcg->deferred_split_queue;
else
return &pgdat->deferred_split_queue;
}
#else
static inline struct deferred_split *get_deferred_split_queue(struct page *page)
{
struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
return &pgdat->deferred_split_queue;
}
#endif
void prep_transhuge_page(struct page *page)
{
/*
* we use page->mapping and page->indexlru in second tail page
* as list_head: assuming THP order >= 2
*/
INIT_LIST_HEAD(page_deferred_list(page));
set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
}
static unsigned long __thp_get_unmapped_area(struct file *filp,
unsigned long addr, unsigned long len,
loff_t off, unsigned long flags, unsigned long size)
{
loff_t off_end = off + len;
loff_t off_align = round_up(off, size);
unsigned long len_pad, ret;
if (off_end <= off_align || (off_end - off_align) < size)
return 0;
len_pad = len + size;
if (len_pad < len || (off + len_pad) < off)
return 0;
ret = current->mm->get_unmapped_area(filp, addr, len_pad,
off >> PAGE_SHIFT, flags);
/*
* The failure might be due to length padding. The caller will retry
* without the padding.
*/
if (IS_ERR_VALUE(ret))
return 0;
/*
* Do not try to align to THP boundary if allocation at the address
* hint succeeds.
*/
if (ret == addr)
return addr;
ret += (off - ret) & (size - 1);
return ret;
}
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
unsigned long ret;
loff_t off = (loff_t)pgoff << PAGE_SHIFT;
if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
goto out;
ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
if (ret)
return ret;
out:
return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
}
EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
struct page *page, gfp_t gfp)
{
struct vm_area_struct *vma = vmf->vma;
struct mem_cgroup *memcg;
pgtable_t pgtable;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
vm_fault_t ret = 0;
VM_BUG_ON_PAGE(!PageCompound(page), page);
if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) {
put_page(page);
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
}
pgtable = pte_alloc_one(vma->vm_mm);
if (unlikely(!pgtable)) {
ret = VM_FAULT_OOM;
goto release;
}
clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
/*
* The memory barrier inside __SetPageUptodate makes sure that
* clear_huge_page writes become visible before the set_pmd_at()
* write.
*/
__SetPageUptodate(page);
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_none(*vmf->pmd))) {
goto unlock_release;
} else {
pmd_t entry;
ret = check_stable_address_space(vma->vm_mm);
if (ret)
goto unlock_release;
/* Deliver the page fault to userland */
if (userfaultfd_missing(vma)) {
vm_fault_t ret2;
spin_unlock(vmf->ptl);
mem_cgroup_cancel_charge(page, memcg, true);
put_page(page);
pte_free(vma->vm_mm, pgtable);
ret2 = handle_userfault(vmf, VM_UFFD_MISSING);
VM_BUG_ON(ret2 & VM_FAULT_FALLBACK);
return ret2;
}
entry = mk_huge_pmd(page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
page_add_new_anon_rmap(page, vma, haddr, true);
mem_cgroup_commit_charge(page, memcg, false, true);
lru_cache_add_active_or_unevictable(page, vma);
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
mm_inc_nr_ptes(vma->vm_mm);
spin_unlock(vmf->ptl);
count_vm_event(THP_FAULT_ALLOC);
count_memcg_events(memcg, THP_FAULT_ALLOC, 1);
}
return 0;
unlock_release:
spin_unlock(vmf->ptl);
release:
if (pgtable)
pte_free(vma->vm_mm, pgtable);
mem_cgroup_cancel_charge(page, memcg, true);
put_page(page);
return ret;
}
/*
* always: directly stall for all thp allocations
* defer: wake kswapd and fail if not immediately available
* defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
* fail if not immediately available
* madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
* available
* never: never stall for any thp allocation
*/
static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
{
const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
/* Always do synchronous compaction */
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
/* Kick kcompactd and fail quickly */
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
/* Synchronous compaction if madvised, otherwise kick kcompactd */
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
return GFP_TRANSHUGE_LIGHT |
(vma_madvised ? __GFP_DIRECT_RECLAIM :
__GFP_KSWAPD_RECLAIM);
/* Only do synchronous compaction if madvised */
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
return GFP_TRANSHUGE_LIGHT |
(vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
return GFP_TRANSHUGE_LIGHT;
}
/* Caller must hold page table lock. */
static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
struct page *zero_page)
{
pmd_t entry;
if (!pmd_none(*pmd))
return false;
entry = mk_pmd(zero_page, vma->vm_page_prot);
entry = pmd_mkhuge(entry);
if (pgtable)
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, haddr, pmd, entry);
mm_inc_nr_ptes(mm);
return true;
}
vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
gfp_t gfp;
struct page *page;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
if (!transhuge_vma_suitable(vma, haddr))
return VM_FAULT_FALLBACK;
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
return VM_FAULT_OOM;
if (!(vmf->flags & FAULT_FLAG_WRITE) &&
!mm_forbids_zeropage(vma->vm_mm) &&
transparent_hugepage_use_zero_page()) {
pgtable_t pgtable;
struct page *zero_page;
vm_fault_t ret;
pgtable = pte_alloc_one(vma->vm_mm);
if (unlikely(!pgtable))
return VM_FAULT_OOM;
zero_page = mm_get_huge_zero_page(vma->vm_mm);
if (unlikely(!zero_page)) {
pte_free(vma->vm_mm, pgtable);
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
}
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
ret = 0;
if (pmd_none(*vmf->pmd)) {
ret = check_stable_address_space(vma->vm_mm);
if (ret) {
spin_unlock(vmf->ptl);
pte_free(vma->vm_mm, pgtable);
} else if (userfaultfd_missing(vma)) {
spin_unlock(vmf->ptl);
pte_free(vma->vm_mm, pgtable);
ret = handle_userfault(vmf, VM_UFFD_MISSING);
VM_BUG_ON(ret & VM_FAULT_FALLBACK);
} else {
set_huge_zero_page(pgtable, vma->vm_mm, vma,
haddr, vmf->pmd, zero_page);
spin_unlock(vmf->ptl);
}
} else {
spin_unlock(vmf->ptl);
pte_free(vma->vm_mm, pgtable);
}
return ret;
}
gfp = alloc_hugepage_direct_gfpmask(vma);
page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
if (unlikely(!page)) {
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
}
prep_transhuge_page(page);
return __do_huge_pmd_anonymous_page(vmf, page, gfp);
}
static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
pgtable_t pgtable)
{
struct mm_struct *mm = vma->vm_mm;
pmd_t entry;
spinlock_t *ptl;
ptl = pmd_lock(mm, pmd);
if (!pmd_none(*pmd)) {
if (write) {
if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
goto out_unlock;
}
entry = pmd_mkyoung(*pmd);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
update_mmu_cache_pmd(vma, addr, pmd);
}
goto out_unlock;
}
entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pmd_mkdevmap(entry);
if (write) {
entry = pmd_mkyoung(pmd_mkdirty(entry));
entry = maybe_pmd_mkwrite(entry, vma);
}
if (pgtable) {
pgtable_trans_huge_deposit(mm, pmd, pgtable);
mm_inc_nr_ptes(mm);
pgtable = NULL;
}
set_pmd_at(mm, addr, pmd, entry);
update_mmu_cache_pmd(vma, addr, pmd);
out_unlock:
spin_unlock(ptl);
if (pgtable)
pte_free(mm, pgtable);
}
vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
{
unsigned long addr = vmf->address & PMD_MASK;
struct vm_area_struct *vma = vmf->vma;
pgprot_t pgprot = vma->vm_page_prot;
pgtable_t pgtable = NULL;
/*
* If we had pmd_special, we could avoid all these restrictions,
* but we need to be consistent with PTEs and architectures that
* can't support a 'special' bit.
*/
BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
!pfn_t_devmap(pfn));
BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
(VM_PFNMAP|VM_MIXEDMAP));
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
if (addr < vma->vm_start || addr >= vma->vm_end)
return VM_FAULT_SIGBUS;
if (arch_needs_pgtable_deposit()) {
pgtable = pte_alloc_one(vma->vm_mm);
if (!pgtable)
return VM_FAULT_OOM;
}
track_pfn_insert(vma, &pgprot, pfn);
insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
return VM_FAULT_NOPAGE;
}
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
pud = pud_mkwrite(pud);
return pud;
}
static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
{
struct mm_struct *mm = vma->vm_mm;
pud_t entry;
spinlock_t *ptl;
ptl = pud_lock(mm, pud);
if (!pud_none(*pud)) {
if (write) {
if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
WARN_ON_ONCE(!is_huge_zero_pud(*pud));
goto out_unlock;
}
entry = pud_mkyoung(*pud);
entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
if (pudp_set_access_flags(vma, addr, pud, entry, 1))
update_mmu_cache_pud(vma, addr, pud);
}
goto out_unlock;
}
entry = pud_mkhuge(pfn_t_pud(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pud_mkdevmap(entry);
if (write) {
entry = pud_mkyoung(pud_mkdirty(entry));
entry = maybe_pud_mkwrite(entry, vma);
}
set_pud_at(mm, addr, pud, entry);
update_mmu_cache_pud(vma, addr, pud);
out_unlock:
spin_unlock(ptl);
}
vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
{
unsigned long addr = vmf->address & PUD_MASK;
struct vm_area_struct *vma = vmf->vma;
pgprot_t pgprot = vma->vm_page_prot;
/*
* If we had pud_special, we could avoid all these restrictions,
* but we need to be consistent with PTEs and architectures that
* can't support a 'special' bit.
*/
BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
!pfn_t_devmap(pfn));
BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
(VM_PFNMAP|VM_MIXEDMAP));
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
if (addr < vma->vm_start || addr >= vma->vm_end)
return VM_FAULT_SIGBUS;
track_pfn_insert(vma, &pgprot, pfn);
insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
return VM_FAULT_NOPAGE;
}
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, int flags)
{
pmd_t _pmd;
_pmd = pmd_mkyoung(*pmd);
if (flags & FOLL_WRITE)
_pmd = pmd_mkdirty(_pmd);
if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
pmd, _pmd, flags & FOLL_WRITE))
update_mmu_cache_pmd(vma, addr, pmd);
}
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
{
unsigned long pfn = pmd_pfn(*pmd);
struct mm_struct *mm = vma->vm_mm;
struct page *page;
assert_spin_locked(pmd_lockptr(mm, pmd));
/*
* When we COW a devmap PMD entry, we split it into PTEs, so we should
* not be in this function with `flags & FOLL_COW` set.
*/
WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
if (flags & FOLL_WRITE && !pmd_write(*pmd))
return NULL;
if (pmd_present(*pmd) && pmd_devmap(*pmd))
/* pass */;
else
return NULL;
if (flags & FOLL_TOUCH)
touch_pmd(vma, addr, pmd, flags);
/*
* device mapped pages can only be returned if the
* caller will manage the page reference count.
*/
if (!(flags & FOLL_GET))
return ERR_PTR(-EEXIST);
pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
*pgmap = get_dev_pagemap(pfn, *pgmap);
if (!*pgmap)
return ERR_PTR(-EFAULT);
page = pfn_to_page(pfn);
get_page(page);
return page;
}
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *vma)
{
spinlock_t *dst_ptl, *src_ptl;
struct page *src_page;
pmd_t pmd;
pgtable_t pgtable = NULL;
int ret = -ENOMEM;
/* Skip if can be re-fill on fault */
if (!vma_is_anonymous(vma))
return 0;
pgtable = pte_alloc_one(dst_mm);
if (unlikely(!pgtable))
goto out;
dst_ptl = pmd_lock(dst_mm, dst_pmd);
src_ptl = pmd_lockptr(src_mm, src_pmd);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
ret = -EAGAIN;
pmd = *src_pmd;
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
if (unlikely(is_swap_pmd(pmd))) {
swp_entry_t entry = pmd_to_swp_entry(pmd);
VM_BUG_ON(!is_pmd_migration_entry(pmd));
if (is_write_migration_entry(entry)) {
make_migration_entry_read(&entry);
pmd = swp_entry_to_pmd(entry);
if (pmd_swp_soft_dirty(*src_pmd))
pmd = pmd_swp_mksoft_dirty(pmd);
set_pmd_at(src_mm, addr, src_pmd, pmd);
}
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
mm_inc_nr_ptes(dst_mm);
pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
ret = 0;
goto out_unlock;
}
#endif
if (unlikely(!pmd_trans_huge(pmd))) {
pte_free(dst_mm, pgtable);
goto out_unlock;
}
/*
* When page table lock is held, the huge zero pmd should not be
* under splitting since we don't split the page itself, only pmd to
* a page table.
*/
if (is_huge_zero_pmd(pmd)) {
struct page *zero_page;
/*
* get_huge_zero_page() will never allocate a new page here,
* since we already have a zero page to copy. It just takes a
* reference.
*/
zero_page = mm_get_huge_zero_page(dst_mm);
set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
zero_page);
ret = 0;
goto out_unlock;
}
src_page = pmd_page(pmd);
VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
get_page(src_page);
page_dup_rmap(src_page, true);
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
mm_inc_nr_ptes(dst_mm);
pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
pmdp_set_wrprotect(src_mm, addr, src_pmd);
pmd = pmd_mkold(pmd_wrprotect(pmd));
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
ret = 0;
out_unlock:
spin_unlock(src_ptl);
spin_unlock(dst_ptl);
out:
return ret;
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, int flags)
{
pud_t _pud;
_pud = pud_mkyoung(*pud);
if (flags & FOLL_WRITE)
_pud = pud_mkdirty(_pud);
if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
pud, _pud, flags & FOLL_WRITE))
update_mmu_cache_pud(vma, addr, pud);
}
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, int flags, struct dev_pagemap **pgmap)
{
unsigned long pfn = pud_pfn(*pud);
struct mm_struct *mm = vma->vm_mm;
struct page *page;
assert_spin_locked(pud_lockptr(mm, pud));
if (flags & FOLL_WRITE && !pud_write(*pud))
return NULL;
if (pud_present(*pud) && pud_devmap(*pud))
/* pass */;
else
return NULL;
if (flags & FOLL_TOUCH)
touch_pud(vma, addr, pud, flags);
/*
* device mapped pages can only be returned if the
* caller will manage the page reference count.
*/
if (!(flags & FOLL_GET))
return ERR_PTR(-EEXIST);
pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
*pgmap = get_dev_pagemap(pfn, *pgmap);
if (!*pgmap)
return ERR_PTR(-EFAULT);
page = pfn_to_page(pfn);
get_page(page);
return page;
}
int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
struct vm_area_struct *vma)
{
spinlock_t *dst_ptl, *src_ptl;
pud_t pud;
int ret;
dst_ptl = pud_lock(dst_mm, dst_pud);
src_ptl = pud_lockptr(src_mm, src_pud);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
ret = -EAGAIN;
pud = *src_pud;
if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
goto out_unlock;
/*
* When page table lock is held, the huge zero pud should not be
* under splitting since we don't split the page itself, only pud to
* a page table.
*/
if (is_huge_zero_pud(pud)) {
/* No huge zero pud yet */
}
pudp_set_wrprotect(src_mm, addr, src_pud);
pud = pud_mkold(pud_wrprotect(pud));
set_pud_at(dst_mm, addr, dst_pud, pud);
ret = 0;
out_unlock:
spin_unlock(src_ptl);
spin_unlock(dst_ptl);
return ret;
}
void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
{
pud_t entry;
unsigned long haddr;
bool write = vmf->flags & FAULT_FLAG_WRITE;
vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
if (unlikely(!pud_same(*vmf->pud, orig_pud)))
goto unlock;
entry = pud_mkyoung(orig_pud);
if (write)
entry = pud_mkdirty(entry);
haddr = vmf->address & HPAGE_PUD_MASK;
if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
unlock:
spin_unlock(vmf->ptl);
}
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
{
pmd_t entry;
unsigned long haddr;
bool write = vmf->flags & FAULT_FLAG_WRITE;
vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
goto unlock;
entry = pmd_mkyoung(orig_pmd);
if (write)
entry = pmd_mkdirty(entry);
haddr = vmf->address & HPAGE_PMD_MASK;
if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
unlock:
spin_unlock(vmf->ptl);
}
static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
pmd_t orig_pmd, struct page *page)
{
struct vm_area_struct *vma = vmf->vma;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
struct mem_cgroup *memcg;
pgtable_t pgtable;
pmd_t _pmd;
int i;
vm_fault_t ret = 0;
struct page **pages;
struct mmu_notifier_range range;
pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *),
GFP_KERNEL);
if (unlikely(!pages)) {
ret |= VM_FAULT_OOM;
goto out;
}
for (i = 0; i < HPAGE_PMD_NR; i++) {
pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
vmf->address, page_to_nid(page));
if (unlikely(!pages[i] ||
mem_cgroup_try_charge_delay(pages[i], vma->vm_mm,
GFP_KERNEL, &memcg, false))) {
if (pages[i])
put_page(pages[i]);
while (--i >= 0) {
memcg = (void *)page_private(pages[i]);
set_page_private(pages[i], 0);
mem_cgroup_cancel_charge(pages[i], memcg,
false);
put_page(pages[i]);
}
kfree(pages);
ret |= VM_FAULT_OOM;
goto out;
}
set_page_private(pages[i], (unsigned long)memcg);
}
for (i = 0; i < HPAGE_PMD_NR; i++) {
copy_user_highpage(pages[i], page + i,
haddr + PAGE_SIZE * i, vma);
__SetPageUptodate(pages[i]);
cond_resched();
}
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
haddr, haddr + HPAGE_PMD_SIZE);
mmu_notifier_invalidate_range_start(&range);
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
goto out_free_pages;
VM_BUG_ON_PAGE(!PageHead(page), page);
/*
* Leave pmd empty until pte is filled note we must notify here as
* concurrent CPU thread might write to new page before the call to
* mmu_notifier_invalidate_range_end() happens which can lead to a
* device seeing memory write in different order than CPU.
*
* See Documentation/vm/mmu_notifier.rst
*/
pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);
pmd_populate(vma->vm_mm, &_pmd, pgtable);
for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
pte_t entry;
entry = mk_pte(pages[i], vmf->vma_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags);
memcg = (void *)page_private(pages[i]);
set_page_private(pages[i], 0);
page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false);
mem_cgroup_commit_charge(pages[i], memcg, false, false);
lru_cache_add_active_or_unevictable(pages[i], vma);
vmf->pte = pte_offset_map(&_pmd, haddr);
VM_BUG_ON(!pte_none(*vmf->pte));
set_pte_at(vma->vm_mm, haddr, vmf->pte, entry);
pte_unmap(vmf->pte);
}
kfree(pages);
smp_wmb(); /* make pte visible before pmd */
pmd_populate(vma->vm_mm, vmf->pmd, pgtable);
page_remove_rmap(page, true);
spin_unlock(vmf->ptl);
/*
* No need to double call mmu_notifier->invalidate_range() callback as
* the above pmdp_huge_clear_flush_notify() did already call it.
*/
mmu_notifier_invalidate_range_only_end(&range);
ret |= VM_FAULT_WRITE;
put_page(page);
out:
return ret;
out_free_pages:
spin_unlock(vmf->ptl);
mmu_notifier_invalidate_range_end(&range);
for (i = 0; i < HPAGE_PMD_NR; i++) {
memcg = (void *)page_private(pages[i]);
set_page_private(pages[i], 0);
mem_cgroup_cancel_charge(pages[i], memcg, false);
put_page(pages[i]);
}
kfree(pages);
goto out;
}
vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page = NULL, *new_page;
struct mem_cgroup *memcg;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
struct mmu_notifier_range range;
gfp_t huge_gfp; /* for allocation and charge */
vm_fault_t ret = 0;
vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
VM_BUG_ON_VMA(!vma->anon_vma, vma);
if (is_huge_zero_pmd(orig_pmd))
goto alloc;
spin_lock(vmf->ptl);
if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
goto out_unlock;
page = pmd_page(orig_pmd);
VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
/*
* We can only reuse the page if nobody else maps the huge page or it's
* part.
*/
if (!trylock_page(page)) {
get_page(page);
spin_unlock(vmf->ptl);
lock_page(page);
spin_lock(vmf->ptl);
if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
unlock_page(page);
put_page(page);
goto out_unlock;
}
put_page(page);
}
if (reuse_swap_page(page, NULL)) {
pmd_t entry;
entry = pmd_mkyoung(orig_pmd);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
ret |= VM_FAULT_WRITE;
unlock_page(page);
goto out_unlock;
}
unlock_page(page);
get_page(page);
spin_unlock(vmf->ptl);
alloc:
if (__transparent_hugepage_enabled(vma) &&
!transparent_hugepage_debug_cow()) {
huge_gfp = alloc_hugepage_direct_gfpmask(vma);
new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
} else
new_page = NULL;
if (likely(new_page)) {
prep_transhuge_page(new_page);
} else {
if (!page) {
split_huge_pmd(vma, vmf->pmd, vmf->address);
ret |= VM_FAULT_FALLBACK;
} else {
ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page);
if (ret & VM_FAULT_OOM) {
split_huge_pmd(vma, vmf->pmd, vmf->address);
ret |= VM_FAULT_FALLBACK;
}
put_page(page);
}
count_vm_event(THP_FAULT_FALLBACK);
goto out;
}
if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm,
huge_gfp, &memcg, true))) {
put_page(new_page);
split_huge_pmd(vma, vmf->pmd, vmf->address);
if (page)
put_page(page);
ret |= VM_FAULT_FALLBACK;
count_vm_event(THP_FAULT_FALLBACK);
goto out;
}
count_vm_event(THP_FAULT_ALLOC);
count_memcg_events(memcg, THP_FAULT_ALLOC, 1);
if (!page)
clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR);
else
copy_user_huge_page(new_page, page, vmf->address,
vma, HPAGE_PMD_NR);
__SetPageUptodate(new_page);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
haddr, haddr + HPAGE_PMD_SIZE);
mmu_notifier_invalidate_range_start(&range);
spin_lock(vmf->ptl);
if (page)
put_page(page);
if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
spin_unlock(vmf->ptl);
mem_cgroup_cancel_charge(new_page, memcg, true);
put_page(new_page);
goto out_mn;
} else {
pmd_t entry;
entry = mk_huge_pmd(new_page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
page_add_new_anon_rmap(new_page, vma, haddr, true);
mem_cgroup_commit_charge(new_page, memcg, false, true);
lru_cache_add_active_or_unevictable(new_page, vma);
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
if (!page) {
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
} else {
VM_BUG_ON_PAGE(!PageHead(page), page);
page_remove_rmap(page, true);
put_page(page);
}
ret |= VM_FAULT_WRITE;
}
spin_unlock(vmf->ptl);
out_mn:
/*
* No need to double call mmu_notifier->invalidate_range() callback as
* the above pmdp_huge_clear_flush_notify() did already call it.
*/
mmu_notifier_invalidate_range_only_end(&range);
out:
return ret;
out_unlock:
spin_unlock(vmf->ptl);
return ret;
}
/*
* FOLL_FORCE can write to even unwritable pmd's, but only
* after we've gone through a COW cycle and they are dirty.
*/
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
{
return pmd_write(pmd) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
}
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr,
pmd_t *pmd,
unsigned int flags)
{
struct mm_struct *mm = vma->vm_mm;
struct page *page = NULL;
assert_spin_locked(pmd_lockptr(mm, pmd));
if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
goto out;
/* Avoid dumping huge zero page */
if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
return ERR_PTR(-EFAULT);
/* Full NUMA hinting faults to serialise migration in fault paths */
if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
goto out;
page = pmd_page(*pmd);
VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
if (flags & FOLL_TOUCH)
touch_pmd(vma, addr, pmd, flags);
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
/*
* We don't mlock() pte-mapped THPs. This way we can avoid
* leaking mlocked pages into non-VM_LOCKED VMAs.
*
* For anon THP:
*
* In most cases the pmd is the only mapping of the page as we
* break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
* writable private mappings in populate_vma_page_range().
*
* The only scenario when we have the page shared here is if we
* mlocking read-only mapping shared over fork(). We skip
* mlocking such pages.
*
* For file THP:
*
* We can expect PageDoubleMap() to be stable under page lock:
* for file pages we set it in page_add_file_rmap(), which
* requires page to be locked.
*/
if (PageAnon(page) && compound_mapcount(page) != 1)
goto skip_mlock;
if (PageDoubleMap(page) || !page->mapping)
goto skip_mlock;
if (!trylock_page(page))
goto skip_mlock;
lru_add_drain();
if (page->mapping && !PageDoubleMap(page))
mlock_vma_page(page);
unlock_page(page);
}
skip_mlock:
page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
if (flags & FOLL_GET)
get_page(page);
out:
return page;
}
/* NUMA hinting page fault entry point for trans huge pmds */
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
{
struct vm_area_struct *vma = vmf->vma;
struct anon_vma *anon_vma = NULL;
struct page *page;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
int page_nid = NUMA_NO_NODE, this_nid = numa_node_id();
int target_nid, last_cpupid = -1;
bool page_locked;
bool migrated = false;
bool was_writable;
int flags = 0;
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_same(pmd, *vmf->pmd)))
goto out_unlock;
/*
* If there are potential migrations, wait for completion and retry
* without disrupting NUMA hinting information. Do not relock and
* check_same as the page may no longer be mapped.
*/
if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
page = pmd_page(*vmf->pmd);
if (!get_page_unless_zero(page))
goto out_unlock;
spin_unlock(vmf->ptl);
put_and_wait_on_page_locked(page);
goto out;
}
page = pmd_page(pmd);
BUG_ON(is_huge_zero_page(page));
page_nid = page_to_nid(page);
last_cpupid = page_cpupid_last(page);
count_vm_numa_event(NUMA_HINT_FAULTS);
if (page_nid == this_nid) {
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
flags |= TNF_FAULT_LOCAL;
}
/* See similar comment in do_numa_page for explanation */
if (!pmd_savedwrite(pmd))
flags |= TNF_NO_GROUP;
/*
* Acquire the page lock to serialise THP migrations but avoid dropping
* page_table_lock if at all possible
*/
page_locked = trylock_page(page);
target_nid = mpol_misplaced(page, vma, haddr);
if (target_nid == NUMA_NO_NODE) {
/* If the page was locked, there are no parallel migrations */
if (page_locked)
goto clear_pmdnuma;
}
/* Migration could have started since the pmd_trans_migrating check */
if (!page_locked) {
page_nid = NUMA_NO_NODE;
if (!get_page_unless_zero(page))
goto out_unlock;
spin_unlock(vmf->ptl);
put_and_wait_on_page_locked(page);
goto out;
}
/*
* Page is misplaced. Page lock serialises migrations. Acquire anon_vma
* to serialises splits
*/
get_page(page);
spin_unlock(vmf->ptl);
anon_vma = page_lock_anon_vma_read(page, NULL);
/* Confirm the PMD did not change while page_table_lock was released */
spin_lock(vmf->ptl);
if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
unlock_page(page);
put_page(page);
page_nid = NUMA_NO_NODE;
goto out_unlock;
}
/* Bail if we fail to protect against THP splits for any reason */
if (unlikely(!anon_vma)) {
put_page(page);
page_nid = NUMA_NO_NODE;
goto clear_pmdnuma;
}
/*
* Since we took the NUMA fault, we must have observed the !accessible
* bit. Make sure all other CPUs agree with that, to avoid them
* modifying the page we're about to migrate.
*
* Must be done under PTL such that we'll observe the relevant
* inc_tlb_flush_pending().
*
* We are not sure a pending tlb flush here is for a huge page
* mapping or not. Hence use the tlb range variant
*/
if (mm_tlb_flush_pending(vma->vm_mm)) {
flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
/*
* change_huge_pmd() released the pmd lock before
* invalidating the secondary MMUs sharing the primary
* MMU pagetables (with ->invalidate_range()). The
* mmu_notifier_invalidate_range_end() (which
* internally calls ->invalidate_range()) in
* change_pmd_range() will run after us, so we can't
* rely on it here and we need an explicit invalidate.
*/
mmu_notifier_invalidate_range(vma->vm_mm, haddr,
haddr + HPAGE_PMD_SIZE);
}
/*
* Migrate the THP to the requested node, returns with page unlocked
* and access rights restored.
*/
spin_unlock(vmf->ptl);
migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
vmf->pmd, pmd, vmf->address, page, target_nid);
if (migrated) {
flags |= TNF_MIGRATED;
page_nid = target_nid;
} else
flags |= TNF_MIGRATE_FAIL;
goto out;
clear_pmdnuma:
BUG_ON(!PageLocked(page));
was_writable = pmd_savedwrite(pmd);
pmd = pmd_modify(pmd, vma->vm_page_prot);
pmd = pmd_mkyoung(pmd);
if (was_writable)
pmd = pmd_mkwrite(pmd);
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
unlock_page(page);
out_unlock:
spin_unlock(vmf->ptl);
out:
if (anon_vma)
page_unlock_anon_vma_read(anon_vma);
if (page_nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
flags);
return 0;
}
/*
* Return true if we do MADV_FREE successfully on entire pmd page.
* Otherwise, return false.
*/
bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, unsigned long next)
{
spinlock_t *ptl;
pmd_t orig_pmd;
struct page *page;
struct mm_struct *mm = tlb->mm;
bool ret = false;
tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
ptl = pmd_trans_huge_lock(pmd, vma);
if (!ptl)
goto out_unlocked;
orig_pmd = *pmd;
if (is_huge_zero_pmd(orig_pmd))
goto out;
if (unlikely(!pmd_present(orig_pmd))) {
VM_BUG_ON(thp_migration_supported() &&
!is_pmd_migration_entry(orig_pmd));
goto out;
}
page = pmd_page(orig_pmd);
/*
* If other processes are mapping this page, we couldn't discard
* the page unless they all do MADV_FREE so let's skip the page.
*/
if (total_mapcount(page) != 1)
goto out;
if (!trylock_page(page))
goto out;
/*
* If user want to discard part-pages of THP, split it so MADV_FREE
* will deactivate only them.
*/
if (next - addr != HPAGE_PMD_SIZE) {
get_page(page);
spin_unlock(ptl);
split_huge_page(page);
unlock_page(page);
put_page(page);
goto out_unlocked;
}
if (PageDirty(page))
ClearPageDirty(page);
unlock_page(page);
if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
pmdp_invalidate(vma, addr, pmd);
orig_pmd = pmd_mkold(orig_pmd);
orig_pmd = pmd_mkclean(orig_pmd);
set_pmd_at(mm, addr, pmd, orig_pmd);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
}
mark_page_lazyfree(page);
ret = true;
out:
spin_unlock(ptl);
out_unlocked:
return ret;
}
static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
{
pgtable_t pgtable;
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
pte_free(mm, pgtable);
mm_dec_nr_ptes(mm);
}
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr)
{
pmd_t orig_pmd;
spinlock_t *ptl;
tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
ptl = __pmd_trans_huge_lock(pmd, vma);
if (!ptl)
return 0;
/*
* For architectures like ppc64 we look at deposited pgtable
* when calling pmdp_huge_get_and_clear. So do the
* pgtable_trans_huge_withdraw after finishing pmdp related
* operations.
*/
orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
tlb->fullmm);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
if (vma_is_dax(vma)) {
if (arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd);
spin_unlock(ptl);
if (is_huge_zero_pmd(orig_pmd))
tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
} else if (is_huge_zero_pmd(orig_pmd)) {
zap_deposited_table(tlb->mm, pmd);
spin_unlock(ptl);
tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
} else {
struct page *page = NULL;
int flush_needed = 1;
if (pmd_present(orig_pmd)) {
page = pmd_page(orig_pmd);
page_remove_rmap(page, true);
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
VM_BUG_ON_PAGE(!PageHead(page), page);
} else if (thp_migration_supported()) {
swp_entry_t entry;
VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
entry = pmd_to_swp_entry(orig_pmd);
page = pfn_to_page(swp_offset(entry));
flush_needed = 0;
} else
WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
if (PageAnon(page)) {
zap_deposited_table(tlb->mm, pmd);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
} else {
if (arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd);
add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
}
spin_unlock(ptl);
if (flush_needed)
tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
}
return 1;
}
#ifndef pmd_move_must_withdraw
static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
spinlock_t *old_pmd_ptl,
struct vm_area_struct *vma)
{
/*
* With split pmd lock we also need to move preallocated
* PTE page table if new_pmd is on different PMD page table.
*
* We also don't deposit and withdraw tables for file pages.
*/
return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
}
#endif
static pmd_t move_soft_dirty_pmd(pmd_t pmd)
{
#ifdef CONFIG_MEM_SOFT_DIRTY
if (unlikely(is_pmd_migration_entry(pmd)))
pmd = pmd_swp_mksoft_dirty(pmd);
else if (pmd_present(pmd))
pmd = pmd_mksoft_dirty(pmd);
#endif
return pmd;
}
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
pmd_t *old_pmd, pmd_t *new_pmd)
{
spinlock_t *old_ptl, *new_ptl;
pmd_t pmd;
struct mm_struct *mm = vma->vm_mm;
bool force_flush = false;
if ((old_addr & ~HPAGE_PMD_MASK) ||
(new_addr & ~HPAGE_PMD_MASK) ||
old_end - old_addr < HPAGE_PMD_SIZE)
return false;
/*
* The destination pmd shouldn't be established, free_pgtables()
* should have release it.
*/
if (WARN_ON(!pmd_none(*new_pmd))) {
VM_BUG_ON(pmd_trans_huge(*new_pmd));
return false;
}
/*
* We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_sem prevents deadlock.
*/
old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
if (old_ptl) {
new_ptl = pmd_lockptr(mm, new_pmd);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
if (pmd_present(pmd))
force_flush = true;
VM_BUG_ON(!pmd_none(*new_pmd));
if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
pgtable_t pgtable;
pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
}
pmd = move_soft_dirty_pmd(pmd);
set_pmd_at(mm, new_addr, new_pmd, pmd);
if (force_flush)
flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
spin_unlock(old_ptl);
return true;
}
return false;
}
/*
* Returns
* - 0 if PMD could not be locked
* - 1 if PMD was locked but protections unchange and TLB flush unnecessary
* - HPAGE_PMD_NR is protections changed and TLB flush necessary
*/
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot, int prot_numa)
{
struct mm_struct *mm = vma->vm_mm;
spinlock_t *ptl;
pmd_t entry;
bool preserve_write;
int ret;
ptl = __pmd_trans_huge_lock(pmd, vma);
if (!ptl)
return 0;
preserve_write = prot_numa && pmd_write(*pmd);
ret = 1;
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
if (is_swap_pmd(*pmd)) {
swp_entry_t entry = pmd_to_swp_entry(*pmd);
VM_BUG_ON(!is_pmd_migration_entry(*pmd));
if (is_write_migration_entry(entry)) {
pmd_t newpmd;
/*
* A protection check is difficult so
* just be safe and disable write
*/
make_migration_entry_read(&entry);
newpmd = swp_entry_to_pmd(entry);
if (pmd_swp_soft_dirty(*pmd))
newpmd = pmd_swp_mksoft_dirty(newpmd);
set_pmd_at(mm, addr, pmd, newpmd);
}
goto unlock;
}
#endif
/*
* Avoid trapping faults against the zero page. The read-only
* data is likely to be read-cached on the local CPU and
* local/remote hits to the zero page are not interesting.
*/
if (prot_numa && is_huge_zero_pmd(*pmd))
goto unlock;
if (prot_numa && pmd_protnone(*pmd))
goto unlock;
/*
* In case prot_numa, we are under down_read(mmap_sem). It's critical
* to not clear pmd intermittently to avoid race with MADV_DONTNEED
* which is also under down_read(mmap_sem):
*
* CPU0: CPU1:
* change_huge_pmd(prot_numa=1)
* pmdp_huge_get_and_clear_notify()
* madvise_dontneed()
* zap_pmd_range()
* pmd_trans_huge(*pmd) == 0 (without ptl)
* // skip the pmd
* set_pmd_at();
* // pmd is re-established
*
* The race makes MADV_DONTNEED miss the huge pmd and don't clear it
* which may break userspace.
*
* pmdp_invalidate() is required to make sure we don't miss
* dirty/young flags set by hardware.
*/
entry = pmdp_invalidate(vma, addr, pmd);
entry = pmd_modify(entry, newprot);
if (preserve_write)
entry = pmd_mk_savedwrite(entry);
ret = HPAGE_PMD_NR;
set_pmd_at(mm, addr, pmd, entry);
BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
unlock:
spin_unlock(ptl);
return ret;
}
/*
* Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
*
* Note that if it returns page table lock pointer, this routine returns without
* unlocking page table lock. So callers must unlock it.
*/
spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
{
spinlock_t *ptl;
ptl = pmd_lock(vma->vm_mm, pmd);
if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
pmd_devmap(*pmd)))
return ptl;
spin_unlock(ptl);
return NULL;
}
/*
* Returns true if a given pud maps a thp, false otherwise.
*
* Note that if it returns true, this routine returns without unlocking page
* table lock. So callers must unlock it.
*/
spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
{
spinlock_t *ptl;
ptl = pud_lock(vma->vm_mm, pud);
if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
return ptl;
spin_unlock(ptl);
return NULL;
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
pud_t *pud, unsigned long addr)
{
spinlock_t *ptl;
ptl = __pud_trans_huge_lock(pud, vma);
if (!ptl)
return 0;
/*
* For architectures like ppc64 we look at deposited pgtable
* when calling pudp_huge_get_and_clear. So do the
* pgtable_trans_huge_withdraw after finishing pudp related
* operations.
*/
pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm);
tlb_remove_pud_tlb_entry(tlb, pud, addr);
if (vma_is_dax(vma)) {
spin_unlock(ptl);
/* No zero page support yet */
} else {
/* No support for anonymous PUD pages yet */
BUG();
}
return 1;
}
static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
unsigned long haddr)
{
VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
count_vm_event(THP_SPLIT_PUD);
pudp_huge_clear_flush_notify(vma, haddr, pud);
}
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address)
{
spinlock_t *ptl;
struct mmu_notifier_range range;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
address & HPAGE_PUD_MASK,
(address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
mmu_notifier_invalidate_range_start(&range);
ptl = pud_lock(vma->vm_mm, pud);
if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
goto out;
__split_huge_pud_locked(vma, pud, range.start);
out:
spin_unlock(ptl);
/*
* No need to double call mmu_notifier->invalidate_range() callback as
* the above pudp_huge_clear_flush_notify() did already call it.
*/
mmu_notifier_invalidate_range_only_end(&range);
}
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
unsigned long haddr, pmd_t *pmd)
{
struct mm_struct *mm = vma->vm_mm;
pgtable_t pgtable;
pmd_t _pmd;
int i;
/*
* Leave pmd empty until pte is filled note that it is fine to delay
* notification until mmu_notifier_invalidate_range_end() as we are
* replacing a zero pmd write protected page with a zero pte write
* protected page.
*
* See Documentation/vm/mmu_notifier.rst
*/
pmdp_huge_clear_flush(vma, haddr, pmd);
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
pmd_populate(mm, &_pmd, pgtable);
for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
pte_t *pte, entry;
entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
entry = pte_mkspecial(entry);
pte = pte_offset_map(&_pmd, haddr);
VM_BUG_ON(!pte_none(*pte));
set_pte_at(mm, haddr, pte, entry);
pte_unmap(pte);
}
smp_wmb(); /* make pte visible before pmd */
pmd_populate(mm, pmd, pgtable);
}
static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long haddr, bool freeze)
{
struct mm_struct *mm = vma->vm_mm;
struct page *page;
pgtable_t pgtable;
pmd_t old_pmd, _pmd;
bool young, write, soft_dirty, pmd_migration = false;
unsigned long addr;
int i;
VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
&& !pmd_devmap(*pmd));
count_vm_event(THP_SPLIT_PMD);
if (!vma_is_anonymous(vma)) {
old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
/*
* We are going to unmap this huge page. So
* just go ahead and zap it
*/
if (arch_needs_pgtable_deposit())
zap_deposited_table(mm, pmd);
if (vma_is_dax(vma))
return;
if (unlikely(is_pmd_migration_entry(old_pmd))) {
swp_entry_t entry;
entry = pmd_to_swp_entry(old_pmd);
page = migration_entry_to_page(entry);
} else {
page = pmd_page(old_pmd);
if (!PageDirty(page) && pmd_dirty(old_pmd))
set_page_dirty(page);
if (!PageReferenced(page) && pmd_young(old_pmd))
SetPageReferenced(page);
page_remove_rmap(page, true);
put_page(page);
}
add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
return;
}
if (is_huge_zero_pmd(*pmd)) {
/*
* FIXME: Do we want to invalidate secondary mmu by calling
* mmu_notifier_invalidate_range() see comments below inside
* __split_huge_pmd() ?
*
* We are going from a zero huge page write protected to zero
* small page also write protected so it does not seems useful
* to invalidate secondary mmu at this time.
*/
return __split_huge_zero_page_pmd(vma, haddr, pmd);
}
/*
* Up to this point the pmd is present and huge and userland has the
* whole access to the hugepage during the split (which happens in
* place). If we overwrite the pmd with the not-huge version pointing
* to the pte here (which of course we could if all CPUs were bug
* free), userland could trigger a small page size TLB miss on the
* small sized TLB while the hugepage TLB entry is still established in
* the huge TLB. Some CPU doesn't like that.
* See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
* 383 on page 93. Intel should be safe but is also warns that it's
* only safe if the permission and cache attributes of the two entries
* loaded in the two TLB is identical (which should be the case here).
* But it is generally safer to never allow small and huge TLB entries
* for the same virtual address to be loaded simultaneously. So instead
* of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
* current pmd notpresent (atomically because here the pmd_trans_huge
* must remain set at all times on the pmd until the split is complete
* for this pmd), then we flush the SMP TLB and finally we write the
* non-huge version of the pmd entry with pmd_populate.
*/
old_pmd = pmdp_invalidate(vma, haddr, pmd);
pmd_migration = is_pmd_migration_entry(old_pmd);
if (unlikely(pmd_migration)) {
swp_entry_t entry;
entry = pmd_to_swp_entry(old_pmd);
page = pfn_to_page(swp_offset(entry));
write = is_write_migration_entry(entry);
young = false;
soft_dirty = pmd_swp_soft_dirty(old_pmd);
} else {
page = pmd_page(old_pmd);
if (pmd_dirty(old_pmd))
SetPageDirty(page);
write = pmd_write(old_pmd);
young = pmd_young(old_pmd);
soft_dirty = pmd_soft_dirty(old_pmd);
}
VM_BUG_ON_PAGE(!page_count(page), page);
page_ref_add(page, HPAGE_PMD_NR - 1);
/*
* Withdraw the table only after we mark the pmd entry invalid.
* This's critical for some architectures (Power).
*/
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
pmd_populate(mm, &_pmd, pgtable);
for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
pte_t entry, *pte;
/*
* Note that NUMA hinting access restrictions are not
* transferred to avoid any possibility of altering
* permissions across VMAs.
*/
if (freeze || pmd_migration) {
swp_entry_t swp_entry;
swp_entry = make_migration_entry(page + i, write);
entry = swp_entry_to_pte(swp_entry);
if (soft_dirty)
entry = pte_swp_mksoft_dirty(entry);
} else {
entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
entry = maybe_mkwrite(entry, vma->vm_flags);
if (!write)
entry = pte_wrprotect(entry);
if (!young)
entry = pte_mkold(entry);
if (soft_dirty)
entry = pte_mksoft_dirty(entry);
}
pte = pte_offset_map(&_pmd, addr);
BUG_ON(!pte_none(*pte));
set_pte_at(mm, addr, pte, entry);
if (!pmd_migration)
atomic_inc(&page[i]._mapcount);
pte_unmap(pte);
}
if (!pmd_migration) {
/*
* Set PG_double_map before dropping compound_mapcount to avoid
* false-negative page_mapped().
*/
if (compound_mapcount(page) > 1 &&
!TestSetPageDoubleMap(page)) {
for (i = 0; i < HPAGE_PMD_NR; i++)
atomic_inc(&page[i]._mapcount);
}
lock_page_memcg(page);
if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
/* Last compound_mapcount is gone. */
__dec_lruvec_page_state(page, NR_ANON_THPS);
if (TestClearPageDoubleMap(page)) {
/* No need in mapcount reference anymore */
for (i = 0; i < HPAGE_PMD_NR; i++)
atomic_dec(&page[i]._mapcount);
}
}
unlock_page_memcg(page);
}
smp_wmb(); /* make pte visible before pmd */
pmd_populate(mm, pmd, pgtable);
if (freeze) {
for (i = 0; i < HPAGE_PMD_NR; i++) {
page_remove_rmap(page + i, false);
put_page(page + i);
}
}
}
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze, struct page *page)
{
spinlock_t *ptl;
struct mmu_notifier_range range;
bool do_unlock_page = false;
pmd_t _pmd;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
address & HPAGE_PMD_MASK,
(address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
mmu_notifier_invalidate_range_start(&range);
ptl = pmd_lock(vma->vm_mm, pmd);
/*
* If caller asks to setup a migration entries, we need a page to check
* pmd against. Otherwise we can end up replacing wrong page.
*/
VM_BUG_ON(freeze && !page);
if (page) {
VM_WARN_ON_ONCE(!PageLocked(page));
if (page != pmd_page(*pmd))
goto out;
}
repeat:
if (pmd_trans_huge(*pmd)) {
if (!page) {
page = pmd_page(*pmd);
/*
* An anonymous page must be locked, to ensure that a
* concurrent reuse_swap_page() sees stable mapcount;
* but reuse_swap_page() is not used on shmem or file,
* and page lock must not be taken when zap_pmd_range()
* calls __split_huge_pmd() while i_mmap_lock is held.
*/
if (PageAnon(page)) {
if (unlikely(!trylock_page(page))) {
get_page(page);
_pmd = *pmd;
spin_unlock(ptl);
lock_page(page);
spin_lock(ptl);
if (unlikely(!pmd_same(*pmd, _pmd))) {
unlock_page(page);
put_page(page);
page = NULL;
goto repeat;
}
put_page(page);
}
do_unlock_page = true;
}
}
if (PageMlocked(page))
clear_page_mlock(page);
} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
goto out;
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
out:
spin_unlock(ptl);
if (do_unlock_page)
unlock_page(page);
/*
* No need to double call mmu_notifier->invalidate_range() callback.
* They are 3 cases to consider inside __split_huge_pmd_locked():
* 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
* 2) __split_huge_zero_page_pmd() read only zero page and any write
* fault will trigger a flush_notify before pointing to a new page
* (it is fine if the secondary mmu keeps pointing to the old zero
* page in the meantime)
* 3) Split a huge pmd into pte pointing to the same page. No need
* to invalidate secondary tlb entry they are all still valid.
* any further changes to individual pte will notify. So no need
* to call mmu_notifier->invalidate_range()
*/
mmu_notifier_invalidate_range_only_end(&range);
}
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
bool freeze, struct page *page)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(vma->vm_mm, address);
if (!pgd_present(*pgd))
return;
p4d = p4d_offset(pgd, address);
if (!p4d_present(*p4d))
return;
pud = pud_offset(p4d, address);
if (!pud_present(*pud))
return;
pmd = pmd_offset(pud, address);
__split_huge_pmd(vma, pmd, address, freeze, page);
}
void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next)
{
/*
* If the new start address isn't hpage aligned and it could
* previously contain an hugepage: check if we need to split
* an huge pmd.
*/
if (start & ~HPAGE_PMD_MASK &&
(start & HPAGE_PMD_MASK) >= vma->vm_start &&
(start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
split_huge_pmd_address(vma, start, false, NULL);
/*
* If the new end address isn't hpage aligned and it could
* previously contain an hugepage: check if we need to split
* an huge pmd.
*/
if (end & ~HPAGE_PMD_MASK &&
(end & HPAGE_PMD_MASK) >= vma->vm_start &&
(end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
split_huge_pmd_address(vma, end, false, NULL);
/*
* If we're also updating the vma->vm_next->vm_start, if the new
* vm_next->vm_start isn't page aligned and it could previously
* contain an hugepage: check if we need to split an huge pmd.
*/
if (adjust_next > 0) {
struct vm_area_struct *next = vma->vm_next;
unsigned long nstart = next->vm_start;
nstart += adjust_next << PAGE_SHIFT;
if (nstart & ~HPAGE_PMD_MASK &&
(nstart & HPAGE_PMD_MASK) >= next->vm_start &&
(nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
split_huge_pmd_address(next, nstart, false, NULL);
}
}
static void unmap_page(struct page *page)
{
enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | TTU_SYNC;
VM_BUG_ON_PAGE(!PageHead(page), page);
if (PageAnon(page))
ttu_flags |= TTU_SPLIT_FREEZE;
try_to_unmap(page, ttu_flags, NULL);
VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
}
static void remap_page(struct page *page)
{
int i;
if (PageTransHuge(page)) {
remove_migration_ptes(page, page, true);
} else {
for (i = 0; i < HPAGE_PMD_NR; i++)
remove_migration_ptes(page + i, page + i, true);
}
}
static void __split_huge_page_tail(struct page *head, int tail,
struct lruvec *lruvec, struct list_head *list)
{
struct page *page_tail = head + tail;
VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
/*
* Clone page flags before unfreezing refcount.
*
* After successful get_page_unless_zero() might follow flags change,
* for exmaple lock_page() which set PG_waiters.
*/
page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
page_tail->flags |= (head->flags &
((1L << PG_referenced) |
(1L << PG_swapbacked) |
(1L << PG_swapcache) |
(1L << PG_mlocked) |
(1L << PG_uptodate) |
(1L << PG_active) |
(1L << PG_workingset) |
(1L << PG_locked) |
(1L << PG_unevictable) |
(1L << PG_dirty)));
/* ->mapping in first tail page is compound_mapcount */
VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
page_tail);
page_tail->mapping = head->mapping;
page_tail->index = head->index + tail;
/* Page flags must be visible before we make the page non-compound. */
smp_wmb();
/*
* Clear PageTail before unfreezing page refcount.
*
* After successful get_page_unless_zero() might follow put_page()
* which needs correct compound_head().
*/
clear_compound_head(page_tail);
/* Finally unfreeze refcount. Additional reference from page cache. */
page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
PageSwapCache(head)));
if (page_is_young(head))
set_page_young(page_tail);
if (page_is_idle(head))
set_page_idle(page_tail);
page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
/*
* always add to the tail because some iterators expect new
* pages to show after the currently processed elements - e.g.
* migrate_pages
*/
lru_add_page_tail(head, page_tail, lruvec, list);
}
static void __split_huge_page(struct page *page, struct list_head *list,
pgoff_t end, unsigned long flags)
{
struct page *head = compound_head(page);
pg_data_t *pgdat = page_pgdat(head);
struct lruvec *lruvec;
struct address_space *swap_cache = NULL;
unsigned long offset = 0;
int i;
lruvec = mem_cgroup_page_lruvec(head, pgdat);
/* complete memcg works before add pages to LRU */
mem_cgroup_split_huge_fixup(head);
if (PageAnon(head) && PageSwapCache(head)) {
swp_entry_t entry = { .val = page_private(head) };
offset = swp_offset(entry);
swap_cache = swap_address_space(entry);
xa_lock(&swap_cache->i_pages);
}
for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
__split_huge_page_tail(head, i, lruvec, list);
/* Some pages can be beyond i_size: drop them from page cache */
if (head[i].index >= end) {
ClearPageDirty(head + i);
__delete_from_page_cache(head + i, NULL);
if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
shmem_uncharge(head->mapping->host, 1);
put_page(head + i);
} else if (!PageAnon(page)) {
__xa_store(&head->mapping->i_pages, head[i].index,
head + i, 0);
} else if (swap_cache) {
__xa_store(&swap_cache->i_pages, offset + i,
head + i, 0);
}
}
ClearPageCompound(head);
split_page_owner(head, HPAGE_PMD_NR);
/* See comment in __split_huge_page_tail() */
if (PageAnon(head)) {
/* Additional pin to swap cache */
if (PageSwapCache(head)) {
page_ref_add(head, 2);
xa_unlock(&swap_cache->i_pages);
} else {
page_ref_inc(head);
}
} else {
/* Additional pin to page cache */
page_ref_add(head, 2);
xa_unlock(&head->mapping->i_pages);
}
spin_unlock_irqrestore(&pgdat->lru_lock, flags);
remap_page(head);
for (i = 0; i < HPAGE_PMD_NR; i++) {
struct page *subpage = head + i;
if (subpage == page)
continue;
unlock_page(subpage);
/*
* Subpages may be freed if there wasn't any mapping
* like if add_to_swap() is running on a lru page that
* had its mapping zapped. And freeing these pages
* requires taking the lru_lock so we do the put_page
* of the tail pages after the split is complete.
*/
put_page(subpage);
}
}
int total_mapcount(struct page *page)
{
int i, compound, ret;
VM_BUG_ON_PAGE(PageTail(page), page);
if (likely(!PageCompound(page)))
return atomic_read(&page->_mapcount) + 1;
compound = compound_mapcount(page);
if (PageHuge(page))
return compound;
ret = compound;
for (i = 0; i < HPAGE_PMD_NR; i++)
ret += atomic_read(&page[i]._mapcount) + 1;
/* File pages has compound_mapcount included in _mapcount */
if (!PageAnon(page))
return ret - compound * HPAGE_PMD_NR;
if (PageDoubleMap(page))
ret -= HPAGE_PMD_NR;
return ret;
}
/*
* This calculates accurately how many mappings a transparent hugepage
* has (unlike page_mapcount() which isn't fully accurate). This full
* accuracy is primarily needed to know if copy-on-write faults can
* reuse the page and change the mapping to read-write instead of
* copying them. At the same time this returns the total_mapcount too.
*
* The function returns the highest mapcount any one of the subpages
* has. If the return value is one, even if different processes are
* mapping different subpages of the transparent hugepage, they can
* all reuse it, because each process is reusing a different subpage.
*
* The total_mapcount is instead counting all virtual mappings of the
* subpages. If the total_mapcount is equal to "one", it tells the
* caller all mappings belong to the same "mm" and in turn the
* anon_vma of the transparent hugepage can become the vma->anon_vma
* local one as no other process may be mapping any of the subpages.
*
* It would be more accurate to replace page_mapcount() with
* page_trans_huge_mapcount(), however we only use
* page_trans_huge_mapcount() in the copy-on-write faults where we
* need full accuracy to avoid breaking page pinning, because
* page_trans_huge_mapcount() is slower than page_mapcount().
*/
int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
{
int i, ret, _total_mapcount, mapcount;
/* hugetlbfs shouldn't call it */
VM_BUG_ON_PAGE(PageHuge(page), page);
if (likely(!PageTransCompound(page))) {
mapcount = atomic_read(&page->_mapcount) + 1;
if (total_mapcount)
*total_mapcount = mapcount;
return mapcount;
}
page = compound_head(page);
_total_mapcount = ret = 0;
for (i = 0; i < HPAGE_PMD_NR; i++) {
mapcount = atomic_read(&page[i]._mapcount) + 1;
ret = max(ret, mapcount);
_total_mapcount += mapcount;
}
if (PageDoubleMap(page)) {
ret -= 1;
_total_mapcount -= HPAGE_PMD_NR;
}
mapcount = compound_mapcount(page);
ret += mapcount;
_total_mapcount += mapcount;
if (total_mapcount)
*total_mapcount = _total_mapcount;
return ret;
}
/* Racy check whether the huge page can be split */
bool can_split_huge_page(struct page *page, int *pextra_pins)
{
int extra_pins;
/* Additional pins from page cache */
if (PageAnon(page))
extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0;
else
extra_pins = HPAGE_PMD_NR;
if (pextra_pins)
*pextra_pins = extra_pins;
return total_mapcount(page) == page_count(page) - extra_pins - 1;
}
/*
* This function splits huge page into normal pages. @page can point to any
* subpage of huge page to split. Split doesn't change the position of @page.
*
* Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
* The huge page must be locked.
*
* If @list is null, tail pages will be added to LRU list, otherwise, to @list.
*
* Both head page and tail pages will inherit mapping, flags, and so on from
* the hugepage.
*
* GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
* they are not mapped.
*
* Returns 0 if the hugepage is split successfully.
* Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
* us.
*/
int split_huge_page_to_list(struct page *page, struct list_head *list)
{
struct page *head = compound_head(page);
struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
struct deferred_split *ds_queue = get_deferred_split_queue(page);
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
int extra_pins, ret;
bool mlocked;
unsigned long flags;
pgoff_t end;
VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageCompound(page), page);
if (PageWriteback(page))
return -EBUSY;
if (PageAnon(head)) {
/*
* The caller does not necessarily hold an mmap_sem that would
* prevent the anon_vma disappearing so we first we take a
* reference to it and then lock the anon_vma for write. This
* is similar to page_lock_anon_vma_read except the write lock
* is taken to serialise against parallel split or collapse
* operations.
*/
anon_vma = page_get_anon_vma(head);
if (!anon_vma) {
ret = -EBUSY;
goto out;
}
end = -1;
mapping = NULL;
anon_vma_lock_write(anon_vma);
} else {
mapping = head->mapping;
/* Truncated ? */
if (!mapping) {
ret = -EBUSY;
goto out;
}
anon_vma = NULL;
i_mmap_lock_read(mapping);
/*
*__split_huge_page() may need to trim off pages beyond EOF:
* but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
* which cannot be nested inside the page tree lock. So note
* end now: i_size itself may be changed at any moment, but
* head page lock is good enough to serialize the trimming.
*/
end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
}
/*
* Racy check if we can split the page, before unmap_page() will
* split PMDs
*/
if (!can_split_huge_page(head, &extra_pins)) {
ret = -EBUSY;
goto out_unlock;
}
mlocked = PageMlocked(page);
unmap_page(head);
/* Make sure the page is not on per-CPU pagevec as it takes pin */
if (mlocked)
lru_add_drain();
/* prevent PageLRU to go away from under us, and freeze lru stats */
spin_lock_irqsave(&pgdata->lru_lock, flags);
if (mapping) {
XA_STATE(xas, &mapping->i_pages, page_index(head));
/*
* Check if the head page is present in page cache.
* We assume all tail are present too, if head is there.
*/
xa_lock(&mapping->i_pages);
if (xas_load(&xas) != head)
goto fail;
}
/* Prevent deferred_split_scan() touching ->_refcount */
spin_lock(&ds_queue->split_queue_lock);
if (page_ref_freeze(head, 1 + extra_pins)) {
if (!list_empty(page_deferred_list(head))) {
ds_queue->split_queue_len--;
list_del(page_deferred_list(head));
}
if (mapping) {
if (PageSwapBacked(page))
__dec_node_page_state(page, NR_SHMEM_THPS);
else
__dec_node_page_state(page, NR_FILE_THPS);
}
spin_unlock(&ds_queue->split_queue_lock);
__split_huge_page(page, list, end, flags);
if (PageSwapCache(head)) {
swp_entry_t entry = { .val = page_private(head) };
ret = split_swap_cluster(entry);
} else
ret = 0;
} else {
spin_unlock(&ds_queue->split_queue_lock);
fail:
if (mapping)
xa_unlock(&mapping->i_pages);
spin_unlock_irqrestore(&pgdata->lru_lock, flags);
remap_page(head);
ret = -EBUSY;
}
out_unlock:
if (anon_vma) {
anon_vma_unlock_write(anon_vma);
put_anon_vma(anon_vma);
}
if (mapping)
i_mmap_unlock_read(mapping);
out:
count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
return ret;
}
void free_transhuge_page(struct page *page)
{
struct deferred_split *ds_queue = get_deferred_split_queue(page);
unsigned long flags;
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
if (!list_empty(page_deferred_list(page))) {
ds_queue->split_queue_len--;
list_del(page_deferred_list(page));
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
free_compound_page(page);
}
void deferred_split_huge_page(struct page *page)
{
struct deferred_split *ds_queue = get_deferred_split_queue(page);
#ifdef CONFIG_MEMCG
struct mem_cgroup *memcg = compound_head(page)->mem_cgroup;
#endif
unsigned long flags;
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
/*
* The try_to_unmap() in page reclaim path might reach here too,
* this may cause a race condition to corrupt deferred split queue.
* And, if page reclaim is already handling the same page, it is
* unnecessary to handle it again in shrinker.
*
* Check PageSwapCache to determine if the page is being
* handled by page reclaim since THP swap would add the page into
* swap cache before calling try_to_unmap().
*/
if (PageSwapCache(page))
return;
if (!list_empty(page_deferred_list(page)))
return;
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
if (list_empty(page_deferred_list(page))) {
count_vm_event(THP_DEFERRED_SPLIT_PAGE);
list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
ds_queue->split_queue_len++;
#ifdef CONFIG_MEMCG
if (memcg)
memcg_set_shrinker_bit(memcg, page_to_nid(page),
deferred_split_shrinker.id);
#endif
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
}
static unsigned long deferred_split_count(struct shrinker *shrink,
struct shrink_control *sc)
{
struct pglist_data *pgdata = NODE_DATA(sc->nid);
struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
#ifdef CONFIG_MEMCG
if (sc->memcg)
ds_queue = &sc->memcg->deferred_split_queue;
#endif
return READ_ONCE(ds_queue->split_queue_len);
}
static unsigned long deferred_split_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
struct pglist_data *pgdata = NODE_DATA(sc->nid);
struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
unsigned long flags;
LIST_HEAD(list), *pos, *next;
struct page *page;
int split = 0;
#ifdef CONFIG_MEMCG
if (sc->memcg)
ds_queue = &sc->memcg->deferred_split_queue;
#endif
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
/* Take pin on all head pages to avoid freeing them under us */
list_for_each_safe(pos, next, &ds_queue->split_queue) {
page = list_entry((void *)pos, struct page, mapping);
page = compound_head(page);
if (get_page_unless_zero(page)) {
list_move(page_deferred_list(page), &list);
} else {
/* We lost race with put_compound_page() */
list_del_init(page_deferred_list(page));
ds_queue->split_queue_len--;
}
if (!--sc->nr_to_scan)
break;
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
list_for_each_safe(pos, next, &list) {
page = list_entry((void *)pos, struct page, mapping);
if (!trylock_page(page))
goto next;
/* split_huge_page() removes page from list on success */
if (!split_huge_page(page))
split++;
unlock_page(page);
next:
put_page(page);
}
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
list_splice_tail(&list, &ds_queue->split_queue);
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
/*
* Stop shrinker if we didn't split any page, but the queue is empty.
* This can happen if pages were freed under us.
*/
if (!split && list_empty(&ds_queue->split_queue))
return SHRINK_STOP;
return split;
}
static struct shrinker deferred_split_shrinker = {
.count_objects = deferred_split_count,
.scan_objects = deferred_split_scan,
.seeks = DEFAULT_SEEKS,
.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
SHRINKER_NONSLAB,
};
#ifdef CONFIG_DEBUG_FS
static int split_huge_pages_set(void *data, u64 val)
{
struct zone *zone;
struct page *page;
unsigned long pfn, max_zone_pfn;
unsigned long total = 0, split = 0;
if (val != 1)
return -EINVAL;
for_each_populated_zone(zone) {
max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
if (!get_page_unless_zero(page))
continue;
if (zone != page_zone(page))
goto next;
if (!PageHead(page) || PageHuge(page) || !PageLRU(page))
goto next;
total++;
lock_page(page);
if (!split_huge_page(page))
split++;
unlock_page(page);
next:
put_page(page);
}
}
pr_info("%lu of %lu THP split\n", split, total);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set,
"%llu\n");
static int __init split_huge_pages_debugfs(void)
{
debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
&split_huge_pages_fops);
return 0;
}
late_initcall(split_huge_pages_debugfs);
#endif
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
{
struct vm_area_struct *vma = pvmw->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long address = pvmw->address;
pmd_t pmdval;
swp_entry_t entry;
pmd_t pmdswp;
if (!(pvmw->pmd && !pvmw->pte))
return;
flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
if (pmd_dirty(pmdval))
set_page_dirty(page);
entry = make_migration_entry(page, pmd_write(pmdval));
pmdswp = swp_entry_to_pmd(entry);
if (pmd_soft_dirty(pmdval))
pmdswp = pmd_swp_mksoft_dirty(pmdswp);
set_pmd_at(mm, address, pvmw->pmd, pmdswp);
page_remove_rmap(page, true);
put_page(page);
}
void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
{
struct vm_area_struct *vma = pvmw->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long address = pvmw->address;
unsigned long mmun_start = address & HPAGE_PMD_MASK;
pmd_t pmde;
swp_entry_t entry;
if (!(pvmw->pmd && !pvmw->pte))
return;
entry = pmd_to_swp_entry(*pvmw->pmd);
get_page(new);
pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde);
if (is_write_migration_entry(entry))
pmde = maybe_pmd_mkwrite(pmde, vma);
flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
if (PageAnon(new))
page_add_anon_rmap(new, vma, mmun_start, true);
else
page_add_file_rmap(new, true);
set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
mlock_vma_page(new);
update_mmu_cache_pmd(vma, address, pvmw->pmd);
}
#endif