android_kernel_xiaomi_sm8350/arch/powerpc/lib/memcmp_64.S

639 lines
11 KiB
ArmAsm
Raw Permalink Normal View History

Merge remote-tracking branch 'remotes/origin/tmp-f686d9f' into msm-lahaina * remotes/origin/tmp-f686d9f: ANDROID: update abi_gki_aarch64.xml for 5.2-rc6 Linux 5.2-rc6 Revert "iommu/vt-d: Fix lock inversion between iommu->lock and device_domain_lock" Bluetooth: Fix regression with minimum encryption key size alignment tcp: refine memory limit test in tcp_fragment() x86/vdso: Prevent segfaults due to hoisted vclock reads SUNRPC: Fix a credential refcount leak Revert "SUNRPC: Declare RPC timers as TIMER_DEFERRABLE" net :sunrpc :clnt :Fix xps refcount imbalance on the error path NFS4: Only set creation opendata if O_CREAT ANDROID: gki_defconfig: workaround to enable configs ANDROID: gki_defconfig: more configs for partners ARM: 8867/1: vdso: pass --be8 to linker if necessary KVM: nVMX: reorganize initial steps of vmx_set_nested_state KVM: PPC: Book3S HV: Invalidate ERAT when flushing guest TLB entries habanalabs: use u64_to_user_ptr() for reading user pointers nfsd: replace Jeff by Chuck as nfsd co-maintainer inet: clear num_timeout reqsk_alloc() PCI/P2PDMA: Ignore root complex whitelist when an IOMMU is present net: mvpp2: debugfs: Add pmap to fs dump ipv6: Default fib6_type to RTN_UNICAST when not set net: hns3: Fix inconsistent indenting net/af_iucv: always register net_device notifier net/af_iucv: build proper skbs for HiperTransport net/af_iucv: remove GFP_DMA restriction for HiperTransport doc: fix documentation about UIO_MEM_LOGICAL using MAINTAINERS / Documentation: Thorsten Scherer is the successor of Gavin Schenk docs: fb: Add TER16x32 to the available font names MAINTAINERS: fpga: hand off maintainership to Moritz treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 507 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 506 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 505 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 504 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 503 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 502 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 501 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 499 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 498 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 497 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 496 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 495 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 491 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 490 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 489 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 488 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 487 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 486 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 485 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 484 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 482 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 481 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 480 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 479 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 477 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 475 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 474 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 473 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 472 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 471 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 469 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 468 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 467 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 466 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 465 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 464 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 463 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 462 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 461 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 460 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 459 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 457 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 456 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 455 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 454 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 452 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 451 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 250 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 248 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 247 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 246 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 245 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 244 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 243 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 239 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 238 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 237 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 235 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 233 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 232 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 231 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 230 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 226 KVM: arm/arm64: Fix emulated ptimer irq injection net: dsa: mv88e6xxx: fix shift of FID bits in mv88e6185_g1_vtu_loadpurge() tests: kvm: Check for a kernel warning kvm: tests: Sort tests in the Makefile alphabetically KVM: x86/mmu: Allocate PAE root array when using SVM's 32-bit NPT KVM: x86: Modify struct kvm_nested_state to have explicit fields for data fanotify: update connector fsid cache on add mark quota: fix a problem about transfer quota drm/i915: Don't clobber M/N values during fastset check powerpc: enable a 30-bit ZONE_DMA for 32-bit pmac ovl: make i_ino consistent with st_ino in more cases scsi: qla2xxx: Fix hardlockup in abort command during driver remove scsi: ufs: Avoid runtime suspend possibly being blocked forever scsi: qedi: update driver version to 8.37.0.20 scsi: qedi: Check targetname while finding boot target information hvsock: fix epollout hang from race condition net/udp_gso: Allow TX timestamp with UDP GSO net: netem: fix use after free and double free with packet corruption net: netem: fix backlog accounting for corrupted GSO frames net: lio_core: fix potential sign-extension overflow on large shift tipc: pass tunnel dev as NULL to udp_tunnel(6)_xmit_skb ip6_tunnel: allow not to count pkts on tstats by passing dev as NULL ip_tunnel: allow not to count pkts on tstats by setting skb's dev to NULL apparmor: reset pos on failure to unpack for various functions apparmor: enforce nullbyte at end of tag string apparmor: fix PROFILE_MEDIATES for untrusted input RDMA/efa: Handle mmap insertions overflow tun: wake up waitqueues after IFF_UP is set drm: return -EFAULT if copy_to_user() fails net: remove duplicate fetch in sock_getsockopt tipc: fix issues with early FAILOVER_MSG from peer bnx2x: Check if transceiver implements DDM before access xhci: detect USB 3.2 capable host controllers correctly usb: xhci: Don't try to recover an endpoint if port is in error state. KVM: fix typo in documentation drm/panfrost: Make sure a BO is only unmapped when appropriate md: fix for divide error in status_resync soc: ixp4xx: npe: Fix an IS_ERR() vs NULL check in probe arm64/mm: don't initialize pgd_cache twice MAINTAINERS: Update my email address arm64/sve: <uapi/asm/ptrace.h> should not depend on <uapi/linux/prctl.h> ovl: fix typo in MODULE_PARM_DESC ovl: fix bogus -Wmaybe-unitialized warning ovl: don't fail with disconnected lower NFS mmc: core: Prevent processing SDIO IRQs when the card is suspended mmc: sdhci: sdhci-pci-o2micro: Correctly set bus width when tuning brcmfmac: sdio: Don't tune while the card is off mmc: core: Add sdio_retune_hold_now() and sdio_retune_release() brcmfmac: sdio: Disable auto-tuning around commands expected to fail mmc: core: API to temporarily disable retuning for SDIO CRC errors Revert "brcmfmac: disable command decode in sdio_aos" ARM: ixp4xx: include irqs.h where needed ARM: ixp4xx: mark ixp4xx_irq_setup as __init ARM: ixp4xx: don't select SERIAL_OF_PLATFORM firmware: trusted_foundations: add ARMv7 dependency usb: dwc2: Use generic PHY width in params setup RDMA/efa: Fix success return value in case of error IB/hfi1: Handle port down properly in pio IB/hfi1: Handle wakeup of orphaned QPs for pio IB/hfi1: Wakeup QPs orphaned on wait list after flush IB/hfi1: Use aborts to trigger RC throttling IB/hfi1: Create inline to get extended headers IB/hfi1: Silence txreq allocation warnings IB/hfi1: Avoid hardlockup with flushlist_lock KVM: PPC: Book3S HV: Only write DAWR[X] when handling h_set_dawr in real mode KVM: PPC: Book3S HV: Fix r3 corruption in h_set_dabr() fs/namespace: fix unprivileged mount propagation vfs: fsmount: add missing mntget() cifs: fix GlobalMid_Lock bug in cifs_reconnect SMB3: retry on STATUS_INSUFFICIENT_RESOURCES instead of failing write staging: erofs: add requirements field in superblock arm64: ssbd: explicitly depend on <linux/prctl.h> block: fix page leak when merging to same page block: return from __bio_try_merge_page if merging occured in the same page Btrfs: fix failure to persist compression property xattr deletion on fsync riscv: remove unused barrier defines usb: chipidea: udc: workaround for endpoint conflict issue MAINTAINERS: Change QCOM repo location mmc: mediatek: fix SDIO IRQ detection issue mmc: mediatek: fix SDIO IRQ interrupt handle flow mmc: core: complete HS400 before checking status riscv: mm: synchronize MMU after pte change MAINTAINERS: Update my email address to use @kernel.org ANDROID: update abi_gki_aarch64.xml for 5.2-rc5 riscv: dts: add initial board data for the SiFive HiFive Unleashed riscv: dts: add initial support for the SiFive FU540-C000 SoC dt-bindings: riscv: convert cpu binding to json-schema dt-bindings: riscv: sifive: add YAML documentation for the SiFive FU540 arch: riscv: add support for building DTB files from DT source data drm/i915/gvt: ignore unexpected pvinfo write lapb: fixed leak of control-blocks. tipc: purge deferredq list for each grp member in tipc_group_delete ax25: fix inconsistent lock state in ax25_destroy_timer neigh: fix use-after-free read in pneigh_get_next tcp: fix compile error if !CONFIG_SYSCTL hv_sock: Suppress bogus "may be used uninitialized" warnings be2net: Fix number of Rx queues used for flow hashing net: handle 802.1P vlan 0 packets properly Linux 5.2-rc5 tcp: enforce tcp_min_snd_mss in tcp_mtu_probing() tcp: add tcp_min_snd_mss sysctl tcp: tcp_fragment() should apply sane memory limits tcp: limit payload size of sacked skbs Revert "net: phylink: set the autoneg state in phylink_phy_change" bpf: fix nested bpf tracepoints with per-cpu data bpf: Fix out of bounds memory access in bpf_sk_storage vsock/virtio: set SOCK_DONE on peer shutdown net: dsa: rtl8366: Fix up VLAN filtering net: phylink: set the autoneg state in phylink_phy_change powerpc/32: fix build failure on book3e with KVM powerpc/booke: fix fast syscall entry on SMP powerpc/32s: fix initial setup of segment registers on secondary CPU x86/microcode, cpuhotplug: Add a microcode loader CPU hotplug callback net: add high_order_alloc_disable sysctl/static key tcp: add tcp_tx_skb_cache sysctl tcp: add tcp_rx_skb_cache sysctl sysctl: define proc_do_static_key() hv_netvsc: Set probe mode to sync net: sched: flower: don't call synchronize_rcu() on mask creation net: dsa: fix warning same module names sctp: Free cookie before we memdup a new one net: dsa: microchip: Don't try to read stats for unused ports qmi_wwan: extend permitted QMAP mux_id value range qmi_wwan: avoid RCU stalls on device disconnect when in QMAP mode qmi_wwan: add network device usage statistics for qmimux devices qmi_wwan: add support for QMAP padding in the RX path bpf, x64: fix stack layout of JITed bpf code Smack: Restore the smackfsdef mount option and add missing prefixes bpf, devmap: Add missing RCU read lock on flush bpf, devmap: Add missing bulk queue free bpf, devmap: Fix premature entry free on destroying map ftrace: Fix NULL pointer dereference in free_ftrace_func_mapper() module: Fix livepatch/ftrace module text permissions race tracing/uprobe: Fix obsolete comment on trace_uprobe_create() tracing/uprobe: Fix NULL pointer dereference in trace_uprobe_create() tracing: Make two symbols static tracing: avoid build warning with HAVE_NOP_MCOUNT tracing: Fix out-of-range read in trace_stack_print() gfs2: Fix rounding error in gfs2_iomap_page_prepare net: phylink: further mac_config documentation improvements nfc: Ensure presence of required attributes in the deactivate_target handler btrfs: start readahead also in seed devices x86/kasan: Fix boot with 5-level paging and KASAN cfg80211: report measurement start TSF correctly cfg80211: fix memory leak of wiphy device name cfg80211: util: fix bit count off by one mac80211: do not start any work during reconfigure flow cfg80211: use BIT_ULL in cfg80211_parse_mbssid_data() mac80211: only warn once on chanctx_conf being NULL mac80211: drop robust management frames from unknown TA gpu: ipu-v3: image-convert: Fix image downsize coefficients gpu: ipu-v3: image-convert: Fix input bytesperline for packed formats gpu: ipu-v3: image-convert: Fix input bytesperline width/height align thunderbolt: Implement CIO reset correctly for Titan Ridge ARM: davinci: da8xx: specify dma_coherent_mask for lcdc ARM: davinci: da850-evm: call regulator_has_full_constraints() timekeeping: Repair ktime_get_coarse*() granularity Revert "ALSA: hda/realtek - Improve the headset mic for Acer Aspire laptops" ANDROID: update abi_gki_aarch64.xml mm/devm_memremap_pages: fix final page put race PCI/P2PDMA: track pgmap references per resource, not globally lib/genalloc: introduce chunk owners PCI/P2PDMA: fix the gen_pool_add_virt() failure path mm/devm_memremap_pages: introduce devm_memunmap_pages drivers/base/devres: introduce devm_release_action() mm/vmscan.c: fix trying to reclaim unevictable LRU page coredump: fix race condition between collapse_huge_page() and core dumping mm/mlock.c: change count_mm_mlocked_page_nr return type mm: mmu_gather: remove __tlb_reset_range() for force flush fs/ocfs2: fix race in ocfs2_dentry_attach_lock() mm/vmscan.c: fix recent_rotated history mm/mlock.c: mlockall error for flag MCL_ONFAULT scripts/decode_stacktrace.sh: prefix addr2line with $CROSS_COMPILE mm/list_lru.c: fix memory leak in __memcg_init_list_lru_node mm: memcontrol: don't batch updates of local VM stats and events PCI: PM: Skip devices in D0 for suspend-to-idle ANDROID: Removed extraneous configs from gki powerpc/bpf: use unsigned division instruction for 64-bit operations bpf: fix div64 overflow tests to properly detect errors bpf: sync BPF_FIB_LOOKUP flag changes with BPF uapi bpf: simplify definition of BPF_FIB_LOOKUP related flags cifs: add spinlock for the openFileList to cifsInodeInfo cifs: fix panic in smb2_reconnect x86/fpu: Don't use current->mm to check for a kthread KVM: nVMX: use correct clean fields when copying from eVMCS vfio-ccw: Destroy kmem cache region on module exit block/ps3vram: Use %llu to format sector_t after LBDAF removal libata: Extend quirks for the ST1000LM024 drives with NOLPM quirk bcache: only set BCACHE_DEV_WB_RUNNING when cached device attached bcache: fix stack corruption by PRECEDING_KEY() arm64/sve: Fix missing SVE/FPSIMD endianness conversions blk-mq: remove WARN_ON(!q->elevator) from blk_mq_sched_free_requests blkio-controller.txt: Remove references to CFQ block/switching-sched.txt: Update to blk-mq schedulers null_blk: remove duplicate check for report zone blk-mq: no need to check return value of debugfs_create functions io_uring: fix memory leak of UNIX domain socket inode block: force select mq-deadline for zoned block devices binder: fix possible UAF when freeing buffer drm/amdgpu: return 0 by default in amdgpu_pm_load_smu_firmware drm/amdgpu: Fix bounds checking in amdgpu_ras_is_supported() ANDROID: x86 gki_defconfig: enable DMA_CMA ANDROID: Fixed x86 regression ANDROID: gki_defconfig: enable DMA_CMA Input: synaptics - enable SMBus on ThinkPad E480 and E580 net: mvpp2: prs: Use the correct helpers when removing all VID filters net: mvpp2: prs: Fix parser range for VID filtering mlxsw: spectrum: Disallow prio-tagged packets when PVID is removed mlxsw: spectrum_buffers: Reduce pool size on Spectrum-2 selftests: tc_flower: Add TOS matching test mlxsw: spectrum_flower: Fix TOS matching selftests: mlxsw: Test nexthop offload indication mlxsw: spectrum_router: Refresh nexthop neighbour when it becomes dead mlxsw: spectrum: Use different seeds for ECMP and LAG hash net: tls, correctly account for copied bytes with multiple sk_msgs vrf: Increment Icmp6InMsgs on the original netdev cpuset: restore sanity to cpuset_cpus_allowed_fallback() net: ethtool: Allow matching on vlan DEI bit linux-next: DOC: RDS: Fix a typo in rds.txt x86/kgdb: Return 0 from kgdb_arch_set_breakpoint() mpls: fix af_mpls dependencies for real selinux: fix a missing-check bug in selinux_sb_eat_lsm_opts() selinux: fix a missing-check bug in selinux_add_mnt_opt( ) arm64: tlbflush: Ensure start/end of address range are aligned to stride usb: typec: Make sure an alt mode exist before getting its partner KVM: arm/arm64: vgic: Fix kvm_device leak in vgic_its_destroy KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST KVM: arm64: Implement vq_present() as a macro xdp: check device pointer before clearing bpf: net: Set sk_bpf_storage back to NULL for cloned sk Btrfs: fix race between block group removal and block group allocation clocksource/drivers/arm_arch_timer: Don't trace count reader functions i2c: pca-platform: Fix GPIO lookup code thunderbolt: Make sure device runtime resume completes before taking domain lock drm: add fallback override/firmware EDID modes workaround i2c: acorn: fix i2c warning arm64: Don't unconditionally add -Wno-psabi to KBUILD_CFLAGS drm/edid: abstract override/firmware EDID retrieval platform/mellanox: mlxreg-hotplug: Add devm_free_irq call to remove flow platform/x86: mlx-platform: Fix parent device in i2c-mux-reg device registration platform/x86: intel-vbtn: Report switch events when event wakes device platform/x86: asus-wmi: Only Tell EC the OS will handle display hotkeys from asus_nb_wmi ARM: mvebu_v7_defconfig: fix Ethernet on Clearfog x86/resctrl: Prevent NULL pointer dereference when local MBM is disabled x86/resctrl: Don't stop walking closids when a locksetup group is found iommu/arm-smmu: Avoid constant zero in TLBI writes drm/i915/perf: fix whitelist on Gen10+ drm/i915/sdvo: Implement proper HDMI audio support for SDVO drm/i915: Fix per-pixel alpha with CCS drm/i915/dmc: protect against reading random memory drm/i915/dsi: Use a fuzzy check for burst mode clock check Input: imx_keypad - make sure keyboard can always wake up system selinux: log raw contexts as untrusted strings ptrace: restore smp_rmb() in __ptrace_may_access() IB/hfi1: Correct tid qp rcd to match verbs context IB/hfi1: Close PSM sdma_progress sleep window IB/hfi1: Validate fault injection opcode user input geneve: Don't assume linear buffers in error handler vxlan: Don't assume linear buffers in error handler net: openvswitch: do not free vport if register_netdevice() is failed. net: correct udp zerocopy refcnt also when zerocopy only on append drm/amdgpu/{uvd,vcn}: fetch ring's read_ptr after alloc ovl: fix wrong flags check in FS_IOC_FS[SG]ETXATTR ioctls riscv: Fix udelay in RV32. drm/vmwgfx: fix a warning due to missing dma_parms riscv: export pm_power_off again drm/vmwgfx: Honor the sg list segment size limitation RISC-V: defconfig: enable clocks, serial console drm/vmwgfx: Use the backdoor port if the HB port is not available bpf: lpm_trie: check left child of last leftmost node for NULL Revert "fuse: require /dev/fuse reads to have enough buffer capacity" ALSA: ice1712: Check correct return value to snd_i2c_sendbytes (EWS/DMX 6Fire) ALSA: oxfw: allow PCM capture for Stanton SCS.1m ALSA: firewire-motu: fix destruction of data for isochronous resources s390/ctl_reg: mark __ctl_set_bit and __ctl_clear_bit as __always_inline s390/boot: disable address-of-packed-member warning ANDROID: update gki aarch64 ABI representation cgroup: Fix css_task_iter_advance_css_set() cset skip condition drm/panfrost: Require the simple_ondemand governor drm/panfrost: make devfreq optional again drm/gem_shmem: Use a writecombine mapping for ->vaddr mmc: sdhi: disallow HS400 for M3-W ES1.2, RZ/G2M, and V3H ASoC: Intel: sst: fix kmalloc call with wrong flags ASoC: core: Fix deadlock in snd_soc_instantiate_card() cgroup/bfq: revert bfq.weight symlink change ARM: dts: am335x phytec boards: Fix cd-gpios active level ARM: dts: dra72x: Disable usb4_tm target module nfp: ensure skb network header is set for packet redirect tcp: fix undo spurious SYNACK in passive Fast Open mpls: fix af_mpls dependencies ibmvnic: Fix unchecked return codes of memory allocations ibmvnic: Refresh device multicast list after reset ibmvnic: Do not close unopened driver during reset mpls: fix warning with multi-label encap net: phy: rename Asix Electronics PHY driver ipv6: flowlabel: fl6_sock_lookup() must use atomic_inc_not_zero net: ipv4: fib_semantics: fix uninitialized variable Input: iqs5xx - get axis info before calling input_mt_init_slots() Linux 5.2-rc4 drm: panel-orientation-quirks: Add quirk for GPD MicroPC drm: panel-orientation-quirks: Add quirk for GPD pocket2 counter/ftm-quaddec: Add missing dependencies in Kconfig staging: iio: adt7316: Fix build errors when GPIOLIB is not set x86/fpu: Update kernel's FPU state before using for the fsave header MAINTAINERS: Karthikeyan Ramasubramanian is MIA i2c: xiic: Add max_read_len quirk ANDROID: update ABI representation gpio: pca953x: hack to fix 24 bit gpio expanders net/mlx5e: Support tagged tunnel over bond net/mlx5e: Avoid detaching non-existing netdev under switchdev mode net/mlx5e: Fix source port matching in fdb peer flow rule net/mlx5e: Replace reciprocal_scale in TX select queue function net/mlx5e: Add ndo_set_feature for uplink representor net/mlx5: Avoid reloading already removed devices net/mlx5: Update pci error handler entries and command translation RAS/CEC: Convert the timer callback to a workqueue RAS/CEC: Fix binary search function x86/mm/KASLR: Compute the size of the vmemmap section properly can: purge socket error queue on sock destruct can: flexcan: Remove unneeded registration message can: af_can: Fix error path of can_init() can: m_can: implement errata "Needless activation of MRAF irq" can: mcp251x: add support for mcp25625 dt-bindings: can: mcp251x: add mcp25625 support can: xilinx_can: use correct bittiming_const for CAN FD core can: flexcan: fix timeout when set small bitrate can: usb: Kconfig: Remove duplicate menu entry lockref: Limit number of cmpxchg loop retries uaccess: add noop untagged_addr definition x86/insn-eval: Fix use-after-free access to LDT entry kbuild: use more portable 'command -v' for cc-cross-prefix s390/unwind: correct stack switching during unwind scsi: hpsa: correct ioaccel2 chaining btrfs: Always trim all unallocated space in btrfs_trim_free_extents netfilter: ipv6: nf_defrag: accept duplicate fragments again powerpc/32s: fix booting with CONFIG_PPC_EARLY_DEBUG_BOOTX drm/meson: fix G12A primary plane disabling drm/meson: fix primary plane disabling drm/meson: fix G12A HDMI PLL settings for 4K60 1000/1001 variations block, bfq: add weight symlink to the bfq.weight cgroup parameter cgroup: let a symlink too be created with a cftype file powerpc/64s: __find_linux_pte() synchronization vs pmdp_invalidate() powerpc/64s: Fix THP PMD collapse serialisation powerpc: Fix kexec failure on book3s/32 drm/nouveau/secboot/gp10[2467]: support newer FW to fix SEC2 failures on some boards drm/nouveau/secboot: enable loading of versioned LS PMU/SEC2 ACR msgqueue FW drm/nouveau/secboot: split out FW version-specific LS function pointers drm/nouveau/secboot: pass max supported FW version to LS load funcs drm/nouveau/core: support versioned firmware loading drm/nouveau/core: pass subdev into nvkm_firmware_get, rather than device block: free sched's request pool in blk_cleanup_queue bpf: expand section tests for test_section_names bpf: more msg_name rewrite tests to test_sock_addr bpf, bpftool: enable recvmsg attach types bpf, libbpf: enable recvmsg attach types bpf: sync tooling uapi header bpf: fix unconnected udp hooks vfio/mdev: Synchronize device create/remove with parent removal vfio/mdev: Avoid creating sysfs remove file on stale device removal pktgen: do not sleep with the thread lock held. net: mvpp2: Use strscpy to handle stat strings net: rds: fix memory leak in rds_ib_flush_mr_pool ipv6: fix EFAULT on sendto with icmpv6 and hdrincl ipv6: use READ_ONCE() for inet->hdrincl as in ipv4 soundwire: intel: set dai min and max channels correctly soundwire: stream: fix bad unlock balance x86/fpu: Use fault_in_pages_writeable() for pre-faulting nvme-rdma: use dynamic dma mapping per command nvme: Fix u32 overflow in the number of namespace list calculation vfio/mdev: Improve the create/remove sequence SoC: rt274: Fix internal jack assignment in set_jack callback ALSA: hdac: fix memory release for SST and SOF drivers ASoC: SOF: Intel: hda: use the defined ppcap functions ASoC: core: move DAI pre-links initiation to snd_soc_instantiate_card ASoC: Intel: cht_bsw_rt5672: fix kernel oops with platform_name override ASoC: Intel: cht_bsw_nau8824: fix kernel oops with platform_name override ASoC: Intel: bytcht_es8316: fix kernel oops with platform_name override ASoC: Intel: cht_bsw_max98090: fix kernel oops with platform_name override Revert "gfs2: Replace gl_revokes with a GLF flag" arm64: Silence gcc warnings about arch ABI drift parisc: Fix crash due alternative coding for NP iopdir_fdc bit parisc: Use lpa instruction to load physical addresses in driver code parisc: configs: Remove useless UEVENT_HELPER_PATH parisc: Use implicit space register selection for loading the coherence index of I/O pdirs usb: gadget: udc: lpc32xx: fix return value check in lpc32xx_udc_probe() usb: gadget: dwc2: fix zlp handling usb: dwc2: Set actual frame number for completed ISOC transfer for none DDMA usb: gadget: udc: lpc32xx: allocate descriptor with GFP_ATOMIC usb: gadget: fusb300_udc: Fix memory leak of fusb300->ep[i] usb: phy: mxs: Disable external charger detect in mxs_phy_hw_init() usb: dwc2: Fix DMA cache alignment issues usb: dwc2: host: Fix wMaxPacketSize handling (fix webcam regression) ARM64: trivial: s/TIF_SECOMP/TIF_SECCOMP/ comment typo fix drm/komeda: Potential error pointer dereference drm/komeda: remove set but not used variable 'kcrtc' x86/CPU: Add more Icelake model numbers hwmon: (pmbus/core) Treat parameters as paged if on multiple pages hwmon: (pmbus/core) mutex_lock write in pmbus_set_samples hwmon: (core) add thermal sensors only if dev->of_node is present Revert "fib_rules: return 0 directly if an exactly same rule exists when NLM_F_EXCL not supplied" net: aquantia: fix wol configuration not applied sometimes ethtool: fix potential userspace buffer overflow Fix memory leak in sctp_process_init net: rds: fix memory leak when unload rds_rdma ipv6: fix the check before getting the cookie in rt6_get_cookie ipv4: not do cache for local delivery if bc_forwarding is enabled selftests: vm: Fix test build failure when built by itself tools: bpftool: Fix JSON output when lookup fails mmc: also set max_segment_size in the device mtip32xx: also set max_segment_size in the device rsxx: don't call dma_set_max_seg_size nvme-pci: don't limit DMA segement size s390/qeth: handle error when updating TX queue count s390/qeth: fix VLAN attribute in bridge_hostnotify udev event s390/qeth: check dst entry before use s390/qeth: handle limited IPv4 broadcast in L3 TX path ceph: fix error handling in ceph_get_caps() ceph: avoid iput_final() while holding mutex or in dispatch thread ceph: single workqueue for inode related works cgroup: css_task_iter_skip()'d iterators must be advanced before accessed drm/amd/amdgpu: add RLC firmware to support raven1 refresh drm/amd/powerplay: add set_power_profile_mode for raven1_refresh drm/amdgpu: fix ring test failure issue during s3 in vce 3.0 (V2) treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 450 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 449 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 448 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 446 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 445 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 444 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 443 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 442 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 441 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 440 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 438 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 437 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 436 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 435 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 434 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 433 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 432 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 431 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 430 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 429 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 428 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 426 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 424 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 423 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 422 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 421 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 420 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 419 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 418 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 417 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 416 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 414 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 412 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 411 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 410 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 409 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 408 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 407 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 406 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 405 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 404 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 403 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 402 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 401 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 400 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 399 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 398 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 397 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 396 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 395 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 394 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 393 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 392 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 391 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 390 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 389 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 388 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 387 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 380 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 378 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 377 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 376 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 375 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 373 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 372 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 371 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 370 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 367 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 365 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 364 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 363 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 362 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 354 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 353 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 352 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 351 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 350 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 349 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 348 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 347 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 346 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 345 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 344 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 343 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 342 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 341 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 340 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 339 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 338 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 336 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 335 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 334 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 333 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 332 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 330 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 328 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 326 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 325 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 324 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 323 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 322 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 321 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 320 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 316 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 315 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 314 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 313 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 312 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 311 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 310 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 309 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 308 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 307 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 305 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 301 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 300 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 299 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 297 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 296 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 295 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 294 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 292 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 291 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 290 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 289 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 288 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 287 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 286 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 285 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 284 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 283 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 282 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 281 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 280 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 278 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 277 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 276 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 275 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 274 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 273 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 272 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 271 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 270 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 269 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 268 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 267 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 266 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 265 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 264 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 263 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 262 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 260 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 258 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 257 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 256 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 254 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 253 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 252 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 251 lib/test_stackinit: Handle Clang auto-initialization pattern block: Drop unlikely before IS_ERR(_OR_NULL) xen/swiotlb: don't initialize swiotlb twice on arm64 s390/mm: fix address space detection in exception handling HID: logitech-dj: Fix 064d:c52f receiver support Revert "HID: core: Call request_module before doing device_add" Revert "HID: core: Do not call request_module() in async context" Revert "HID: Increase maximum report size allowed by hid_field_extract()" tests: fix pidfd-test compilation signal: improve comments samples: fix pidfd-metadata compilation arm64: arch_timer: mark functions as __always_inline arm64: smp: Moved cpu_logical_map[] to smp.h arm64: cpufeature: Fix missing ZFR0 in __read_sysreg_by_encoding() selftests/bpf: move test_lirc_mode2_user to TEST_GEN_PROGS_EXTENDED USB: Fix chipmunk-like voice when using Logitech C270 for recording audio. USB: usb-storage: Add new ID to ums-realtek udmabuf: actually unmap the scatterlist net: fix indirect calls helpers for ptype list hooks. net: ipvlan: Fix ipvlan device tso disabled while NETIF_F_IP_CSUM is set scsi: smartpqi: unlock on error in pqi_submit_raid_request_synchronous() scsi: ufs: Check that space was properly alloced in copy_query_response udp: only choose unbound UDP socket for multicast when not in a VRF net/tls: replace the sleeping lock around RX resync with a bit lock Revert "net/tls: avoid NULL-deref on resync during device removal" block: aoe: no need to check return value of debugfs_create functions net: dsa: sja1105: Fix link speed not working at 100 Mbps and below net: phylink: avoid reducing support mask scripts/checkstack.pl: Fix arm64 wrong or unknown architecture kbuild: tar-pkg: enable communication with jobserver kconfig: tests: fix recursive inclusion unit test kbuild: teach kselftest-merge to find nested config files nvmet: fix data_len to 0 for bdev-backed write_zeroes MAINTAINERS: Hand over skd maintainership ASoC: sun4i-i2s: Add offset to RX channel select ASoC: sun4i-i2s: Fix sun8i tx channel offset mask ASoC: max98090: remove 24-bit format support if RJ is 0 ASoC: da7219: Fix build error without CONFIG_I2C ASoC: SOF: Intel: hda: Fix COMPILE_TEST build error drm/arm/hdlcd: Allow a bit of clock tolerance drm/arm/hdlcd: Actually validate CRTC modes drm/arm/mali-dp: Add a loop around the second set CVAL and try 5 times drm/komeda: fixing of DMA mapping sg segment warning netfilter: ipv6: nf_defrag: fix leakage of unqueued fragments habanalabs: Read upper bits of trace buffer from RWPHI arm64: arch_k3: Fix kconfig dependency warning drm: don't block fb changes for async plane updates drm/vc4: fix fb references in async update drm/msm: fix fb references in async update drm/amd: fix fb references in async update drm/rockchip: fix fb references in async update xen-blkfront: switch kcalloc to kvcalloc for large array allocation drm/mediatek: call mtk_dsi_stop() after mtk_drm_crtc_atomic_disable() drm/mediatek: clear num_pipes when unbind driver drm/mediatek: call drm_atomic_helper_shutdown() when unbinding driver drm/mediatek: unbind components in mtk_drm_unbind() drm/mediatek: fix unbind functions net: sfp: read eeprom in maximum 16 byte increments selftests: set sysctl bc_forwarding properly in router_broadcast.sh ANDROID: update gki aarch64 ABI representation net: ethernet: mediatek: Use NET_IP_ALIGN to judge if HW RX_2BYTE_OFFSET is enabled net: ethernet: mediatek: Use hw_feature to judge if HWLRO is supported net: ethernet: ti: cpsw_ethtool: fix ethtool ring param set ANDROID: gki_defconfig: Enable CMA, SLAB_FREELIST (RANDOM and HARDENED) on x86 bpf: udp: Avoid calling reuseport's bpf_prog from udp_gro bpf: udp: ipv6: Avoid running reuseport's bpf_prog from __udp6_lib_err rcu: locking and unlocking need to always be at least barriers ANDROID: gki_defconfig: enable SLAB_FREELIST_RANDOM, SLAB_FREELIST_HARDENED ANDROID: gki_defconfig: enable CMA and increase CMA_AREAS ASoC: SOF: fix DSP oops definitions in FW ABI ASoC: hda: fix unbalanced codec dev refcount for HDA_DEV_ASOC ASoC: SOF: ipc: replace fw ready bitfield with explicit bit ordering ASoC: SOF: bump to ABI 3.6 ASoC: SOF: soundwire: add initial soundwire support ASoC: SOF: uapi: mirror firmware changes ASoC: Intel: Baytrail: add quirk for Aegex 10 (RU2) tablet xfs: inode btree scrubber should calculate im_boffset correctly mmc: sdhci_am654: Fix SLOTTYPE write usb: typec: ucsi: ccg: fix memory leak in do_flash ANDROID: update gki aarch64 ABI representation habanalabs: Fix virtual address access via debugfs for 2MB pages drm/komeda: Constify the usage of komeda_component/pipeline/dev_funcs x86/power: Fix 'nosmt' vs hibernation triple fault during resume mm/vmalloc: Avoid rare case of flushing TLB with weird arguments mm/vmalloc: Fix calculation of direct map addr range PM: sleep: Add kerneldoc comments to some functions drm/i915/gvt: save RING_HEAD into vreg when vgpu switched out sparc: perf: fix updated event period in response to PERF_EVENT_IOC_PERIOD mdesc: fix a missing-check bug in get_vdev_port_node_info() drm/i915/gvt: add F_CMD_ACCESS flag for wa regs sparc64: Fix regression in non-hypervisor TLB flush xcall packet: unconditionally free po->rollover Update my email address net: hns: Fix loopback test failed at copper ports Linux 5.2-rc3 net: dsa: mv88e6xxx: avoid error message on remove from VLAN 0 mm, compaction: make sure we isolate a valid PFN include/linux/generic-radix-tree.h: fix kerneldoc comment kernel/signal.c: trace_signal_deliver when signal_group_exit drivers/iommu/intel-iommu.c: fix variable 'iommu' set but not used spdxcheck.py: fix directory structures kasan: initialize tag to 0xff in __kasan_kmalloc z3fold: fix sheduling while atomic scripts/gdb: fix invocation when CONFIG_COMMON_CLK is not set mm/gup: continue VM_FAULT_RETRY processing even for pre-faults ocfs2: fix error path kobject memory leak memcg: make it work on sparse non-0-node systems mm, memcg: consider subtrees in memory.events prctl_set_mm: downgrade mmap_sem to read lock prctl_set_mm: refactor checks from validate_prctl_map kernel/fork.c: make max_threads symbol static arch/arm/boot/compressed/decompress.c: fix build error due to lz4 changes arch/parisc/configs/c8000_defconfig: remove obsoleted CONFIG_DEBUG_SLAB_LEAK mm/vmalloc.c: fix typo in comment lib/sort.c: fix kernel-doc notation warnings mm: fix Documentation/vm/hmm.rst Sphinx warnings treewide: fix typos of SPDX-License-Identifier crypto: ux500 - fix license comment syntax error MAINTAINERS: add I2C DT bindings to ARM platforms MAINTAINERS: add DT bindings to i2c drivers mwifiex: Fix heap overflow in mwifiex_uap_parse_tail_ies() iwlwifi: mvm: change TLC config cmd sent by rs to be async iwlwifi: Fix double-free problems in iwl_req_fw_callback() iwlwifi: fix AX201 killer sku loading firmware issue iwlwifi: print fseq info upon fw assert iwlwifi: clear persistence bit according to device family iwlwifi: fix load in rfkill flow for unified firmware iwlwifi: mvm: remove d3_sram debugfs file bpf, riscv: clear high 32 bits for ALU32 add/sub/neg/lsh/rsh/arsh libbpf: Return btf_fd for load_sk_storage_btf HID: a4tech: fix horizontal scrolling HID: hyperv: Add a module description line net: dsa: sja1105: Don't store frame type in skb->cb block: print offending values when cloned rq limits are exceeded blk-mq: Document the blk_mq_hw_queue_to_node() arguments blk-mq: Fix spelling in a source code comment block: Fix bsg_setup_queue() kernel-doc header block: Fix rq_qos_wait() kernel-doc header block: Fix blk_mq_*_map_queues() kernel-doc headers block: Fix throtl_pending_timer_fn() kernel-doc header block: Convert blk_invalidate_devt() header into a non-kernel-doc header block/partitions/ldm: Convert a kernel-doc header into a non-kernel-doc header leds: avoid flush_work in atomic context cgroup: Include dying leaders with live threads in PROCS iterations cgroup: Implement css_task_iter_skip() cgroup: Call cgroup_release() before __exit_signal() netfilter: nf_tables: fix module autoload with inet family Revert "lockd: Show pid of lockd for remote locks" ALSA: hda/realtek - Update headset mode for ALC256 fs/adfs: fix filename fixup handling for "/" and "//" names fs/adfs: move append_filetype_suffix() into adfs_object_fixup() fs/adfs: remove truncated filename hashing fs/adfs: factor out filename fixup fs/adfs: factor out object fixups fs/adfs: factor out filename case lowering fs/adfs: factor out filename comparison ovl: doc: add non-standard corner cases pstore/ram: Run without kernel crash dump region MAINTAINERS: add Vasily Gorbik and Christian Borntraeger for s390 MAINTAINERS: Farewell Martin Schwidefsky pstore: Set tfm to NULL on free_buf_for_compression nds32: add new emulations for floating point instruction nds32: Avoid IEX status being incorrectly modified math-emu: Use statement expressions to fix Wshift-count-overflow warning net: correct zerocopy refcnt with udp MSG_MORE ethtool: Check for vlan etype or vlan tci when parsing flow_rule net: don't clear sock->sk early to avoid trouble in strparser net-gro: fix use-after-free read in napi_gro_frags() net: dsa: tag_8021q: Create a stable binary format net: dsa: tag_8021q: Change order of rx_vid setup net: mvpp2: fix bad MVPP2_TXQ_SCHED_TOKEN_CNTR_REG queue value docs cgroups: add another example size for hugetlb NFSv4.1: Fix bug only first CB_NOTIFY_LOCK is handled NFSv4.1: Again fix a race where CB_NOTIFY_LOCK fails to wake a waiter ipv4: tcp_input: fix stack out of bounds when parsing TCP options. mlxsw: spectrum: Prevent force of 56G mlxsw: spectrum_acl: Avoid warning after identical rules insertion SUNRPC: Fix a use after free when a server rejects the RPCSEC_GSS credential net: dsa: mv88e6xxx: fix handling of upper half of STATS_TYPE_PORT SUNRPC fix regression in umount of a secure mount r8169: fix MAC address being lost in PCI D3 treewide: Add SPDX license identifier - Kbuild treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 225 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 224 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 223 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 222 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 221 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 220 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 218 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 217 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 216 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 215 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 214 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 213 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 211 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 210 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 209 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 207 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 206 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 203 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 201 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 200 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 199 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 198 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 197 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 195 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 194 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 193 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 191 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 190 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 188 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 185 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 183 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 182 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 180 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 179 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 178 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 177 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 176 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 175 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 174 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 173 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 172 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 171 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 170 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 167 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 166 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 165 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 164 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 162 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 161 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 160 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 159 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 158 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 157 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 156 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 155 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 154 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 153 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 152 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 151 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 150 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 149 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 148 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 147 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 145 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 144 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 143 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 142 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 140 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 139 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 138 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 137 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 136 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 135 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 133 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 132 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 131 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 130 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 129 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 128 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 127 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 126 net: core: support XDP generic on stacked devices. netvsc: unshare skb in VF rx handler udp: Avoid post-GRO UDP checksum recalculation nvme-tcp: fix queue mapping when queue count is limited nvme-rdma: fix queue mapping when queue count is limited fpga: zynqmp-fpga: Correctly handle error pointer selftests: vm: install test_vmalloc.sh for run_vmtests userfaultfd: selftest: fix compiler warning kselftest/cgroup: fix incorrect test_core skip kselftest/cgroup: fix unexpected testing failure on test_core kselftest/cgroup: fix unexpected testing failure on test_memcontrol xtensa: Fix section mismatch between memblock_reserve and mem_reserve signal/ptrace: Don't leak unitialized kernel memory with PTRACE_PEEK_SIGINFO mwifiex: Abort at too short BSS descriptor element mwifiex: Fix possible buffer overflows at parsing bss descriptor drm/i915/gvt: Assign NULL to the pointer after memory free. drm/i915/gvt: Check if cur_pt_type is valid x86: intel_epb: Do not build when CONFIG_PM is unset crypto: hmac - fix memory leak in hmac_init_tfm() crypto: jitterentropy - change back to module_init() ARM: dts: Drop bogus CLKSEL for timer12 on dra7 KVM: PPC: Book3S HV: Restore SPRG3 in kvmhv_p9_guest_entry() KVM: PPC: Book3S HV: Fix lockdep warning when entering guest on POWER9 KVM: PPC: Book3S HV: XIVE: Fix page offset when clearing ESB pages KVM: PPC: Book3S HV: XIVE: Take the srcu read lock when accessing memslots KVM: PPC: Book3S HV: XIVE: Do not clear IRQ data of passthrough interrupts KVM: PPC: Book3S HV: XIVE: Introduce a new mutex for the XIVE device drm/i915/gvt: Fix cmd length of VEB_DI_IECP drm/i915/gvt: refine ggtt range validation drm/i915/gvt: Fix vGPU CSFE_CHICKEN1_REG mmio handler drm/i915/gvt: Fix GFX_MODE handling drm/i915/gvt: Update force-to-nonpriv register whitelist drm/i915/gvt: Initialize intel_gvt_gtt_entry in stack ima: show rules with IMA_INMASK correctly evm: check hash algorithm passed to init_desc() scsi: libsas: delete sas port if expander discover failed scsi: libsas: only clear phy->in_shutdown after shutdown event done scsi: scsi_dh_alua: Fix possible null-ptr-deref scsi: smartpqi: properly set both the DMA mask and the coherent DMA mask scsi: zfcp: fix to prevent port_remove with pure auto scan LUNs (only sdevs) scsi: zfcp: fix missing zfcp_port reference put on -EBUSY from port_remove scsi: libcxgbi: add a check for NULL pointer in cxgbi_check_route() net: phy: dp83867: Set up RGMII TX delay net: phy: dp83867: do not call config_init twice net: phy: dp83867: increase SGMII autoneg timer duration net: phy: dp83867: fix speed 10 in sgmii mode net: phy: marvell10g: report if the PHY fails to boot firmware net: phylink: ensure consistent phy interface mode cgroup: Use css_tryget() instead of css_tryget_online() in task_get_css() blk-mq: Fix memory leak in error handling usbip: usbip_host: fix stub_dev lock context imbalance regression net: sh_eth: fix mdio access in sh_eth_close() for R-Car Gen2 and RZ/A1 SoCs MIPS: uprobes: remove set but not used variable 'epc' s390/crypto: fix possible sleep during spinlock aquired MIPS: pistachio: Build uImage.gz by default MIPS: Make virt_addr_valid() return bool MIPS: Bounds check virt_addr_valid CIFS: cifs_read_allocate_pages: don't iterate through whole page array on ENOMEM RDMA/efa: Remove MAYEXEC flag check from mmap flow mlx5: avoid 64-bit division IB/hfi1: Validate page aligned for a given virtual address IB/{qib, hfi1, rdmavt}: Correct ibv_devinfo max_mr value IB/hfi1: Insure freeze_work work_struct is canceled on shutdown IB/rdmavt: Fix alloc_qpn() WARN_ON() ASoC: sun4i-codec: fix first delay on Speaker drm/amdgpu: reserve stollen vram for raven series media: venus: hfi_parser: fix a regression in parser selftests: bpf: fix compiler warning in flow_dissector test arm64: use the correct function type for __arm64_sys_ni_syscall arm64: use the correct function type in SYSCALL_DEFINE0 arm64: fix syscall_fn_t type block: don't protect generic_make_request_checks with blk_queue_enter block: move blk_exit_queue into __blk_release_queue selftests: bpf: complete sub-register zero extension checks selftests: bpf: move sub-register zero extension checks into subreg.c ovl: detect overlapping layers drm/i915/icl: Add WaDisableBankHangMode ALSA: fireface: Use ULL suffixes for 64-bit constants signal/arm64: Use force_sig not force_sig_fault for SIGKILL nl80211: fill all policy .type entries mac80211: free peer keys before vif down in mesh ANDROID: ABI out: Use the extension .xml rather then .out drm/mediatek: respect page offset for PRIME mmap calls drm/mediatek: adjust ddp clock control flow ALSA: hda/realtek - Improve the headset mic for Acer Aspire laptops KVM: PPC: Book3S HV: XIVE: Fix the enforced limit on the vCPU identifier KVM: PPC: Book3S HV: XIVE: Do not test the EQ flag validity when resetting KVM: PPC: Book3S HV: XIVE: Clear file mapping when device is released KVM: PPC: Book3S HV: Don't take kvm->lock around kvm_for_each_vcpu KVM: PPC: Book3S: Use new mutex to synchronize access to rtas token list KVM: PPC: Book3S HV: Use new mutex to synchronize MMU setup KVM: PPC: Book3S HV: Avoid touching arch.mmu_ready in XIVE release functions Revert "drivers: thermal: tsens: Add new operation to check if a sensor is enabled" net/mlx5e: Disable rxhash when CQE compress is enabled net/mlx5e: restrict the real_dev of vlan device is the same as uplink device net/mlx5: Allocate root ns memory using kzalloc to match kfree net/mlx5: Avoid double free in fs init error unwinding path net/mlx5: Avoid double free of root ns in the error flow path net/mlx5: Fix error handling in mlx5_load() Documentation: net-sysfs: Remove duplicate PHY device documentation llc: fix skb leak in llc_build_and_send_ui_pkt() selftests: pmtu: Fix encapsulating device in pmtu_vti6_link_change_mtu dfs_cache: fix a wrong use of kfree in flush_cache_ent() fs/cifs/smb2pdu.c: fix buffer free in SMB2_ioctl_free cifs: fix memory leak of pneg_inbuf on -EOPNOTSUPP ioctl case xenbus: Avoid deadlock during suspend due to open transactions xen/pvcalls: Remove set but not used variable tracing: Avoid memory leak in predicate_parse() habanalabs: fix bug in checking huge page optimization mmc: sdhci: Fix SDIO IRQ thread deadlock dpaa_eth: use only online CPU portals net: mvneta: Fix err code path of probe net: stmmac: Do not output error on deferred probe Btrfs: fix race updating log root item during fsync Btrfs: fix wrong ctime and mtime of a directory after log replay ARC: [plat-hsdk] Get rid of inappropriate PHY settings ARC: [plat-hsdk]: Add support of Vivante GPU ARC: [plat-hsdk]: enable creg-gpio controller Btrfs: fix fsync not persisting changed attributes of a directory btrfs: qgroup: Check bg while resuming relocation to avoid NULL pointer dereference btrfs: reloc: Also queue orphan reloc tree for cleanup to avoid BUG_ON() Btrfs: incremental send, fix emission of invalid clone operations Btrfs: incremental send, fix file corruption when no-holes feature is enabled btrfs: correct zstd workspace manager lock to use spin_lock_bh() btrfs: Ensure replaced device doesn't have pending chunk allocation ia64: fix build errors by exporting paddr_to_nid() ASoC: SOF: Intel: hda: fix the hda init chip ASoC: SOF: ipc: fix a race, leading to IPC timeouts ASoC: SOF: control: correct the copy size for bytes kcontrol put ASoC: SOF: pcm: remove warning - initialize workqueue on open ASoC: SOF: pcm: clear hw_params_upon_resume flag correctly ASoC: SOF: core: fix error handling with the probe workqueue ASoC: SOF: core: remove snd_soc_unregister_component in case of error ASoC: SOF: core: remove DSP after unregistering machine driver ASoC: soc-core: fixup references at soc_cleanup_card_resources() arm64/module: revert to unsigned interpretation of ABS16/32 relocations KVM: s390: Do not report unusabled IDs via KVM_CAP_MAX_VCPU_ID kvm: fix compile on s390 part 2 xprtrdma: Use struct_size() in kzalloc() tools headers UAPI: Sync kvm.h headers with the kernel sources perf record: Fix s390 missing module symbol and warning for non-root users perf machine: Read also the end of the kernel perf test vmlinux-kallsyms: Ignore aliases to _etext when searching on kallsyms perf session: Add missing swap ops for namespace events perf namespace: Protect reading thread's namespace tools headers UAPI: Sync drm/drm.h with the kernel s390/crypto: fix gcm-aes-s390 selftest failures s390/zcrypt: Fix wrong dispatching for control domain CPRBs s390/pci: fix assignment of bus resources s390/pci: fix struct definition for set PCI function s390: mark __cpacf_check_opcode() and cpacf_query_func() as __always_inline s390: add unreachable() to dump_fault_info() to fix -Wmaybe-uninitialized tools headers UAPI: Sync drm/i915_drm.h with the kernel tools headers UAPI: Sync linux/fs.h with the kernel tools headers UAPI: Sync linux/sched.h with the kernel tools arch x86: Sync asm/cpufeatures.h with the with the kernel tools include UAPI: Update copy of files related to new fspick, fsmount, fsconfig, fsopen, move_mount and open_tree syscalls perf arm64: Fix mksyscalltbl when system kernel headers are ahead of the kernel perf data: Fix 'strncat may truncate' build failure with recent gcc arm64: Fix the arm64_personality() syscall wrapper redirection rtw88: Make some symbols static rtw88: avoid circular locking between local->iflist_mtx and rtwdev->mutex rsi: Properly initialize data in rsi_sdio_ta_reset rtw88: fix unassigned rssi_level in rtw_sta_info rtw88: fix subscript above array bounds compiler warning fuse: extract helper for range writeback fuse: fix copy_file_range() in the writeback case mmc: meson-gx: fix irq ack mmc: tmio: fix SCC error handling to avoid false positive CRC error mmc: tegra: Fix a warning message memstick: mspro_block: Fix an error code in mspro_block_issue_req() mac80211: mesh: fix RCU warning nl80211: fix station_info pertid memory leak mac80211: Do not use stack memory with scatterlist for GMAC ALSA: line6: Assure canceling delayed work at disconnection configfs: Fix use-after-free when accessing sd->s_dentry ALSA: hda - Force polling mode on CNL for fixing codec communication i2c: synquacer: fix synquacer_i2c_doxfer() return value i2c: mlxcpld: Fix wrong initialization order in probe i2c: dev: fix potential memory leak in i2cdev_ioctl_rdwr RDMA/core: Fix panic when port_data isn't initialized RDMA/uverbs: Pass udata on uverbs error unwind RDMA/core: Clear out the udata before error unwind net: aquantia: tcp checksum 0xffff being handled incorrectly net: aquantia: fix LRO with FCS error net: aquantia: check rx csum for all packets in LRO session net: aquantia: tx clean budget logic error vhost: scsi: add weight support vhost: vsock: add weight support vhost_net: fix possible infinite loop vhost: introduce vhost_exceeds_weight() virtio: Fix indentation of VIRTIO_MMIO virtio: add unlikely() to WARN_ON_ONCE() iommu/vt-d: Set the right field for Page Walk Snoop iommu/vt-d: Fix lock inversion between iommu->lock and device_domain_lock iommu: Add missing new line for dma type drm/etnaviv: lock MMU while dumping core block: Don't revalidate bdev of hidden gendisk loop: Don't change loop device under exclusive opener drm/imx: ipuv3-plane: fix atomic update status query for non-plus i.MX6Q drm/qxl: drop WARN_ONCE() iio: temperature: mlx90632 Relax the compatibility check iio: imu: st_lsm6dsx: fix PM support for st_lsm6dsx i2c controller staging:iio:ad7150: fix threshold mode config bit fuse: add FUSE_WRITE_KILL_PRIV fuse: fallocate: fix return with locked inode PCI: PM: Avoid possible suspend-to-idle issue ACPI: PM: Call pm_set_suspend_via_firmware() during hibernation ACPI/PCI: PM: Add missing wakeup.flags.valid checks ovl: support the FS_IOC_FS[SG]ETXATTR ioctls soundwire: stream: fix out of boundary access on port properties net: tulip: de4x5: Drop redundant MODULE_DEVICE_TABLE() selftests/tls: add test for sleeping even though there is data net/tls: fix no wakeup on partial reads selftests/tls: test for lowat overshoot with multiple records net/tls: fix lowat calculation if some data came from previous record dpaa2-eth: Make constant 64-bit long dpaa2-eth: Use PTR_ERR_OR_ZERO where appropriate dpaa2-eth: Fix potential spectre issue bonding/802.3ad: fix slave link initialization transition states io_uring: Fix __io_uring_register() false success net: ethtool: Document get_rxfh_context and set_rxfh_context ethtool ops net: stmmac: dwmac-mediatek: modify csr_clk value to fix mdio read/write fail net: stmmac: fix csr_clk can't be zero issue net: stmmac: update rx tail pointer register to fix rx dma hang issue. ip_sockglue: Fix missing-check bug in ip_ra_control() ipv6_sockglue: Fix a missing-check bug in ip6_ra_control() efi: Allow the number of EFI configuration tables entries to be zero efi/x86/Add missing error handling to old_memmap 1:1 mapping code parisc: Fix compiler warnings in float emulation code parisc/slab: cleanup after /proc/slab_allocators removal bpf: sockmap, fix use after free from sleep in psock backlog workqueue net: sched: don't use tc_action->order during action dump cxgb4: Revert "cxgb4: Remove SGE_HOST_PAGE_SIZE dependency on page size" net: fec: fix the clk mismatch in failed_reset path habanalabs: Avoid using a non-initialized MMU cache mutex habanalabs: fix debugfs code uapi/habanalabs: add opcode for enable/disable device debug mode habanalabs: halt debug engines on user process close selftests: rtc: rtctest: specify timeouts selftests/harness: Allow test to configure timeout selftests/ftrace: Add checkbashisms meta-testcase selftests/ftrace: Make a script checkbashisms clean media: smsusb: better handle optional alignment test_firmware: Use correct snprintf() limit genwqe: Prevent an integer overflow in the ioctl parport: Fix mem leak in parport_register_dev_model fpga: dfl: expand minor range when registering chrdev region fpga: dfl: Add lockdep classes for pdata->lock fpga: dfl: afu: Pass the correct device to dma_mapping_error() fpga: stratix10-soc: fix use-after-free on s10_init() w1: ds2408: Fix typo after 49695ac46861 (reset on output_write retry with readback) kheaders: Do not regenerate archive if config is not changed kheaders: Move from proc to sysfs drm/amd/display: Don't load DMCU for Raven 1 (v2) drm/i915: Maintain consistent documentation subsection ordering scripts/sphinx-pre-install: make it handle Sphinx versions docs: Fix conf.py for Sphinx 2.0 vt/fbcon: deinitialize resources in visual_init() after failed memory allocation xfs: fix broken log reservation debugging clocksource/drivers/timer-ti-dm: Change to new style declaration ASoC: core: lock client_mutex while removing link components ASoC: simple-card: Restore original configuration of DAI format {nl,mac}80211: allow 4addr AP operation on crypto controlled devices mac80211_hwsim: mark expected switch fall-through mac80211: fix rate reporting inside cfg80211_calculate_bitrate_he() mac80211: remove set but not used variable 'old' mac80211: handle deauthentication/disassociation from TDLS peer gpio: fix gpio-adp5588 build errors pinctrl: stmfx: Fix compile issue when CONFIG_OF_GPIO is not defined staging: kpc2000: Add dependency on MFD_CORE to kconfig symbol 'KPC2000' perf/ring-buffer: Use regular variables for nesting perf/ring-buffer: Always use {READ,WRITE}_ONCE() for rb->user_page data perf/ring_buffer: Add ordering to rb->nest increment perf/ring_buffer: Fix exposing a temporarily decreased data_head x86/CPU/AMD: Don't force the CPB cap when running under a hypervisor x86/boot: Provide KASAN compatible aliases for string routines ALSA: hda/realtek - Enable micmute LED for Huawei laptops Input: uinput - add compat ioctl number translation for UI_*_FF_UPLOAD Input: silead - add MSSL0017 to acpi_device_id cxgb4: offload VLAN flows regardless of VLAN ethtype hsr: fix don't prune the master node from the node_db net: mvpp2: cls: Fix leaked ethtool_rx_flow_rule docs: fix multiple doc build warnings in enumeration.rst lib/list_sort: fix kerneldoc build error docs: fix numaperf.rst and add it to the doc tree doc: Cope with the deprecation of AutoReporter doc: Cope with Sphinx logging deprecations bpf: sockmap, restore sk_write_space when psock gets dropped selftests: bpf: add zero extend checks for ALU32 and/or/xor bpf, riscv: clear target register high 32-bits for and/or/xor on ALU32 spi: abort spi_sync if failed to prepare_transfer_hardware ALSA: hda/realtek - Set default power save node to 0 ipv4/igmp: fix build error if !CONFIG_IP_MULTICAST powerpc/kexec: Fix loading of kernel + initramfs with kexec_file_load() MIPS: TXx9: Fix boot crash in free_initmem() MIPS: remove a space after -I to cope with header search paths for VDSO MIPS: mark ginvt() as __always_inline ipv4/igmp: fix another memory leak in igmpv3_del_delrec() bnxt_en: Device serial number is supported only for PFs. bnxt_en: Reduce memory usage when running in kdump kernel. bnxt_en: Fix possible BUG() condition when calling pci_disable_msix(). bnxt_en: Fix aggregation buffer leak under OOM condition. ipv6: Fix redirect with VRF net: stmmac: fix reset gpio free missing mISDN: make sure device name is NUL terminated net: macb: save/restore the remaining registers and features media: dvb: warning about dvb frequency limits produces too much noise net/tls: don't ignore netdev notifications if no TLS features net/tls: fix state removal with feature flags off net/tls: avoid NULL-deref on resync during device removal Documentation: add TLS offload documentation Documentation: tls: RSTify the ktls documentation Documentation: net: move device drivers docs to a submenu mISDN: Fix indenting in dsp_cmx.c ocelot: Dont allocate another multicast list, use __dev_mc_sync Validate required parameters in inet6_validate_link_af xhci: Use %zu for printing size_t type xhci: Convert xhci_handshake() to use readl_poll_timeout_atomic() xhci: Fix immediate data transfer if buffer is already DMA mapped usb: xhci: avoid null pointer deref when bos field is NULL usb: xhci: Fix a potential null pointer dereference in xhci_debugfs_create_endpoint() xhci: update bounce buffer with correct sg num media: usb: siano: Fix false-positive "uninitialized variable" warning spi: spi-fsl-spi: call spi_finalize_current_message() at the end ALSA: hda/realtek - Check headset type by unplug and resume powerpc/perf: Fix MMCRA corruption by bhrb_filter powerpc/powernv: Return for invalid IMC domain HID: logitech-hidpp: Add support for the S510 remote control HID: multitouch: handle faulty Elo touch device selftests: netfilter: add flowtable test script netfilter: nft_flow_offload: IPCB is only valid for ipv4 family netfilter: nft_flow_offload: don't offload when sequence numbers need adjustment netfilter: nft_flow_offload: set liberal tracking mode for tcp netfilter: nf_flow_table: ignore DF bit setting ASoC: Intel: sof-rt5682: fix AMP quirk support ASoC: Intel: sof-rt5682: fix for codec button mapping clk: ti: clkctrl: Fix clkdm_clk handling clk: imx: imx8mm: fix int pll clk gate clk: sifive: restrict Kconfig scope for the FU540 PRCI driver RDMA/hns: Fix PD memory leak for internal allocation netfilter: nat: fix udp checksum corruption selftests: netfilter: missing error check when setting up veth interface RDMA/srp: Rename SRP sysfs name after IB device rename trigger ipvs: Fix use-after-free in ip_vs_in ARC: [plat-hsdk]: Add missing FIFO size entry in GMAC node ARC: [plat-hsdk]: Add missing multicast filter bins number to GMAC node samples, bpf: suppress compiler warning samples, bpf: fix to change the buffer size for read() bpf: Check sk_fullsock() before returning from bpf_sk_lookup() bpf: fix out-of-bounds read in __bpf_skc_lookup Documentation/networking: fix af_xdp.rst Sphinx warnings netfilter: nft_fib: Fix existence check support netfilter: nf_queue: fix reinject verdict handling dmaengine: sprd: Add interrupt support for 2-stage transfer dmaengine: sprd: Fix the right place to configure 2-stage transfer dmaengine: sprd: Fix block length overflow dmaengine: sprd: Fix the incorrect start for 2-stage destination channels dmaengine: sprd: Add validation of current descriptor in irq handler dmaengine: sprd: Fix the possible crash when getting descriptor status tty: max310x: Fix external crystal register setup serial: sh-sci: disable DMA for uart_console serial: imx: remove log spamming error message tty: serial: msm_serial: Fix XON/XOFF USB: serial: option: add Telit 0x1260 and 0x1261 compositions USB: serial: pl2303: add Allied Telesis VT-Kit3 USB: serial: option: add support for Simcom SIM7500/SIM7600 RNDIS mode dmaengine: tegra210-adma: Fix spelling dmaengine: tegra210-adma: Fix channel FIFO configuration dmaengine: tegra210-adma: Fix crash during probe dmaengine: mediatek-cqdma: sleeping in atomic context dmaengine: dw-axi-dmac: fix null dereference when pointer first is null perf/x86/intel/ds: Fix EVENT vs. UEVENT PEBS constraints USB: rio500: update Documentation USB: rio500: simplify locking USB: rio500: fix memory leak in close after disconnect USB: rio500: refuse more than one device at a time usbip: usbip_host: fix BUG: sleeping function called from invalid context USB: sisusbvga: fix oops in error path of sisusb_probe USB: Add LPM quirk for Surface Dock GigE adapter media: usb: siano: Fix general protection fault in smsusb usb: mtu3: fix up undefined reference to usb_debug_root USB: Fix slab-out-of-bounds write in usb_get_bos_descriptor Input: elantech - enable middle button support on 2 ThinkPads dmaengine: fsl-qdma: Add improvement dmaengine: jz4780: Fix transfers being ACKed too soon gcc-plugins: Fix build failures under Darwin host MAINTAINERS: Update Stefan Wahren email address netfilter: nf_tables: fix oops during rule dump ARC: mm: SIGSEGV userspace trying to access kernel virtual memory ARC: fix build warnings ARM: dts: bcm: Add missing device_type = "memory" property soc: bcm: brcmstb: biuctrl: Register writes require a barrier soc: brcmstb: Fix error path for unsupported CPUs ARM: dts: dra71x: Disable usb4_tm target module ARM: dts: dra71x: Disable rtc target module ARM: dts: dra76x: Disable usb4_tm target module ARM: dts: dra76x: Disable rtc target module ASoC: simple-card: Fix configuration of DAI format ASoC: Intel: soc-acpi: Fix machine selection order ASoC: rt5677-spi: Handle over reading when flipping bytes ASoC: soc-dpm: fixup DAI active unbalance pinctrl: intel: Clear interrupt status in mask/unmask callback pinctrl: intel: Use GENMASK() consistently parisc: Allow building 64-bit kernel without -mlong-calls compiler option parisc: Kconfig: remove ARCH_DISCARD_MEMBLOCK staging: wilc1000: Fix some double unlock bugs in wilc_wlan_cleanup() staging: vc04_services: prevent integer overflow in create_pagelist() Staging: vc04_services: Fix a couple error codes staging: wlan-ng: fix adapter initialization failure staging: kpc2000: double unlock in error handling in kpc_dma_transfer() staging: kpc2000: Fix build error without CONFIG_UIO staging: kpc2000: fix build error on xtensa staging: erofs: set sb->s_root to NULL when failing from __getname() ARM: imx: cpuidle-imx6sx: Restrict the SW2ISO increase to i.MX6SX firmware: imx: SCU irq should ONLY be enabled after SCU IPC is ready arm64: imx: Fix build error without CONFIG_SOC_BUS ima: fix wrong signed policy requirement when not appraising x86/ima: Check EFI_RUNTIME_SERVICES before using stacktrace: Unbreak stack_trace_save_tsk_reliable() HID: wacom: Sync INTUOSP2_BT touch state after each frame if necessary HID: wacom: Correct button numbering 2nd-gen Intuos Pro over Bluetooth HID: wacom: Send BTN_TOUCH in response to INTUOSP2_BT eraser contact HID: wacom: Don't report anything prior to the tool entering range HID: wacom: Don't set tool type until we're in range ASoC: cs42xx8: Add regcache mask dirty regulator: tps6507x: Fix boot regression due to testing wrong init_data pointer ASoC: fsl_asrc: Fix the issue about unsupported rate spi: bitbang: Fix NULL pointer dereference in spi_unregister_master Input: elan_i2c - increment wakeup count if wake source wireless: Skip directory when generating certificates ASoC: ak4458: rstn_control - return a non-zero on error only ASoC: soc-pcm: BE dai needs prepare when pause release after resume ASoC: ak4458: add return value for ak4458_probe ASoC : cs4265 : readable register too low ASoC: SOF: fix error in verbose ipc command parsing ASoC: SOF: fix race in FW boot timeout handling ASoC: SOF: nocodec: fix undefined reference iio: adc: ti-ads8688: fix timestamp is not updated in buffer iio: dac: ds4422/ds4424 fix chip verification HID: rmi: Use SET_REPORT request on control endpoint for Acer Switch 3 and 5 HID: logitech-hidpp: add support for the MX5500 keyboard HID: logitech-dj: add support for the Logitech MX5500's Bluetooth Mini-Receiver HID: i2c-hid: add iBall Aer3 to descriptor override spi: Fix Raspberry Pi breakage ARM: dts: dra76x: Update MMC2_HS200_MANUAL1 iodelay values ARM: dts: am57xx-idk: Remove support for voltage switching for SD card bus: ti-sysc: Handle devices with no control registers ARM: dts: Configure osc clock for d_can on am335x iio: imu: mpu6050: Fix FIFO layout for ICM20602 lkdtm/bugs: Adjust recursion test to avoid elision lkdtm/usercopy: Moves the KERNEL_DS test to non-canonical iio: adc: ads124: avoid buffer overflow iio: adc: modify NPCM ADC read reference voltage Change-Id: I98c823993370027391cc21dfb239c3049f025136 Signed-off-by: Raghavendra Rao Ananta <rananta@codeaurora.org>
2019-06-24 20:30:20 -04:00
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Author: Anton Blanchard <anton@au.ibm.com>
* Copyright 2015 IBM Corporation.
*/
#include <asm/ppc_asm.h>
#include <asm/export.h>
powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision This patch add VMX primitives to do memcmp() in case the compare size is equal or greater than 4K bytes. KSM feature can benefit from this. Test result with following test program(replace the "^>" with ""): ------ ># cat tools/testing/selftests/powerpc/stringloops/memcmp.c >#include <malloc.h> >#include <stdlib.h> >#include <string.h> >#include <time.h> >#include "utils.h" >#define SIZE (1024 * 1024 * 900) >#define ITERATIONS 40 int test_memcmp(const void *s1, const void *s2, size_t n); static int testcase(void) { char *s1; char *s2; unsigned long i; s1 = memalign(128, SIZE); if (!s1) { perror("memalign"); exit(1); } s2 = memalign(128, SIZE); if (!s2) { perror("memalign"); exit(1); } for (i = 0; i < SIZE; i++) { s1[i] = i & 0xff; s2[i] = i & 0xff; } for (i = 0; i < ITERATIONS; i++) { int ret = test_memcmp(s1, s2, SIZE); if (ret) { printf("return %d at[%ld]! should have returned zero\n", ret, i); abort(); } } return 0; } int main(void) { return test_harness(testcase, "memcmp"); } ------ Without this patch (but with the first patch "powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()." in the series): 4.726728762 seconds time elapsed ( +- 3.54%) With VMX patch: 4.234335473 seconds time elapsed ( +- 2.63%) There is ~+10% improvement. Testing with unaligned and different offset version (make s1 and s2 shift random offset within 16 bytes) can archieve higher improvement than 10%.. Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:53 -04:00
#include <asm/ppc-opcode.h>
#define off8 r6
#define off16 r7
#define off24 r8
#define rA r9
#define rB r10
#define rC r11
#define rD r27
#define rE r28
#define rF r29
#define rG r30
#define rH r31
#ifdef __LITTLE_ENDIAN__
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
#define LH lhbrx
#define LW lwbrx
#define LD ldbrx
powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision This patch add VMX primitives to do memcmp() in case the compare size is equal or greater than 4K bytes. KSM feature can benefit from this. Test result with following test program(replace the "^>" with ""): ------ ># cat tools/testing/selftests/powerpc/stringloops/memcmp.c >#include <malloc.h> >#include <stdlib.h> >#include <string.h> >#include <time.h> >#include "utils.h" >#define SIZE (1024 * 1024 * 900) >#define ITERATIONS 40 int test_memcmp(const void *s1, const void *s2, size_t n); static int testcase(void) { char *s1; char *s2; unsigned long i; s1 = memalign(128, SIZE); if (!s1) { perror("memalign"); exit(1); } s2 = memalign(128, SIZE); if (!s2) { perror("memalign"); exit(1); } for (i = 0; i < SIZE; i++) { s1[i] = i & 0xff; s2[i] = i & 0xff; } for (i = 0; i < ITERATIONS; i++) { int ret = test_memcmp(s1, s2, SIZE); if (ret) { printf("return %d at[%ld]! should have returned zero\n", ret, i); abort(); } } return 0; } int main(void) { return test_harness(testcase, "memcmp"); } ------ Without this patch (but with the first patch "powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()." in the series): 4.726728762 seconds time elapsed ( +- 3.54%) With VMX patch: 4.234335473 seconds time elapsed ( +- 2.63%) There is ~+10% improvement. Testing with unaligned and different offset version (make s1 and s2 shift random offset within 16 bytes) can archieve higher improvement than 10%.. Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:53 -04:00
#define LVS lvsr
#define VPERM(_VRT,_VRA,_VRB,_VRC) \
vperm _VRT,_VRB,_VRA,_VRC
#else
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
#define LH lhzx
#define LW lwzx
#define LD ldx
powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision This patch add VMX primitives to do memcmp() in case the compare size is equal or greater than 4K bytes. KSM feature can benefit from this. Test result with following test program(replace the "^>" with ""): ------ ># cat tools/testing/selftests/powerpc/stringloops/memcmp.c >#include <malloc.h> >#include <stdlib.h> >#include <string.h> >#include <time.h> >#include "utils.h" >#define SIZE (1024 * 1024 * 900) >#define ITERATIONS 40 int test_memcmp(const void *s1, const void *s2, size_t n); static int testcase(void) { char *s1; char *s2; unsigned long i; s1 = memalign(128, SIZE); if (!s1) { perror("memalign"); exit(1); } s2 = memalign(128, SIZE); if (!s2) { perror("memalign"); exit(1); } for (i = 0; i < SIZE; i++) { s1[i] = i & 0xff; s2[i] = i & 0xff; } for (i = 0; i < ITERATIONS; i++) { int ret = test_memcmp(s1, s2, SIZE); if (ret) { printf("return %d at[%ld]! should have returned zero\n", ret, i); abort(); } } return 0; } int main(void) { return test_harness(testcase, "memcmp"); } ------ Without this patch (but with the first patch "powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()." in the series): 4.726728762 seconds time elapsed ( +- 3.54%) With VMX patch: 4.234335473 seconds time elapsed ( +- 2.63%) There is ~+10% improvement. Testing with unaligned and different offset version (make s1 and s2 shift random offset within 16 bytes) can archieve higher improvement than 10%.. Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:53 -04:00
#define LVS lvsl
#define VPERM(_VRT,_VRA,_VRB,_VRC) \
vperm _VRT,_VRA,_VRB,_VRC
#endif
powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision This patch add VMX primitives to do memcmp() in case the compare size is equal or greater than 4K bytes. KSM feature can benefit from this. Test result with following test program(replace the "^>" with ""): ------ ># cat tools/testing/selftests/powerpc/stringloops/memcmp.c >#include <malloc.h> >#include <stdlib.h> >#include <string.h> >#include <time.h> >#include "utils.h" >#define SIZE (1024 * 1024 * 900) >#define ITERATIONS 40 int test_memcmp(const void *s1, const void *s2, size_t n); static int testcase(void) { char *s1; char *s2; unsigned long i; s1 = memalign(128, SIZE); if (!s1) { perror("memalign"); exit(1); } s2 = memalign(128, SIZE); if (!s2) { perror("memalign"); exit(1); } for (i = 0; i < SIZE; i++) { s1[i] = i & 0xff; s2[i] = i & 0xff; } for (i = 0; i < ITERATIONS; i++) { int ret = test_memcmp(s1, s2, SIZE); if (ret) { printf("return %d at[%ld]! should have returned zero\n", ret, i); abort(); } } return 0; } int main(void) { return test_harness(testcase, "memcmp"); } ------ Without this patch (but with the first patch "powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()." in the series): 4.726728762 seconds time elapsed ( +- 3.54%) With VMX patch: 4.234335473 seconds time elapsed ( +- 2.63%) There is ~+10% improvement. Testing with unaligned and different offset version (make s1 and s2 shift random offset within 16 bytes) can archieve higher improvement than 10%.. Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:53 -04:00
#define VMX_THRESH 4096
#define ENTER_VMX_OPS \
mflr r0; \
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1); \
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1); \
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
std r0,16(r1); \
stdu r1,-STACKFRAMESIZE(r1); \
bl enter_vmx_ops; \
cmpwi cr1,r3,0; \
ld r0,STACKFRAMESIZE+16(r1); \
ld r3,STK_REG(R31)(r1); \
ld r4,STK_REG(R30)(r1); \
ld r5,STK_REG(R29)(r1); \
addi r1,r1,STACKFRAMESIZE; \
mtlr r0
#define EXIT_VMX_OPS \
mflr r0; \
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1); \
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1); \
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
std r0,16(r1); \
stdu r1,-STACKFRAMESIZE(r1); \
bl exit_vmx_ops; \
ld r0,STACKFRAMESIZE+16(r1); \
ld r3,STK_REG(R31)(r1); \
ld r4,STK_REG(R30)(r1); \
ld r5,STK_REG(R29)(r1); \
addi r1,r1,STACKFRAMESIZE; \
mtlr r0
/*
* LD_VSR_CROSS16B load the 2nd 16 bytes for _vaddr which is unaligned with
* 16 bytes boundary and permute the result with the 1st 16 bytes.
* | y y y y y y y y y y y y y 0 1 2 | 3 4 5 6 7 8 9 a b c d e f z z z |
* ^ ^ ^
* 0xbbbb10 0xbbbb20 0xbbb30
* ^
* _vaddr
*
*
* _vmask is the mask generated by LVS
* _v1st_qw is the 1st aligned QW of current addr which is already loaded.
* for example: 0xyyyyyyyyyyyyy012 for big endian
* _v2nd_qw is the 2nd aligned QW of cur _vaddr to be loaded.
* for example: 0x3456789abcdefzzz for big endian
* The permute result is saved in _v_res.
* for example: 0x0123456789abcdef for big endian.
*/
#define LD_VSR_CROSS16B(_vaddr,_vmask,_v1st_qw,_v2nd_qw,_v_res) \
lvx _v2nd_qw,_vaddr,off16; \
VPERM(_v_res,_v1st_qw,_v2nd_qw,_vmask)
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
/*
* There are 2 categories for memcmp:
* 1) src/dst has the same offset to the 8 bytes boundary. The handlers
* are named like .Lsameoffset_xxxx
* 2) src/dst has different offset to the 8 bytes boundary. The handlers
* are named like .Ldiffoffset_xxxx
*/
powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision This patch add VMX primitives to do memcmp() in case the compare size is equal or greater than 4K bytes. KSM feature can benefit from this. Test result with following test program(replace the "^>" with ""): ------ ># cat tools/testing/selftests/powerpc/stringloops/memcmp.c >#include <malloc.h> >#include <stdlib.h> >#include <string.h> >#include <time.h> >#include "utils.h" >#define SIZE (1024 * 1024 * 900) >#define ITERATIONS 40 int test_memcmp(const void *s1, const void *s2, size_t n); static int testcase(void) { char *s1; char *s2; unsigned long i; s1 = memalign(128, SIZE); if (!s1) { perror("memalign"); exit(1); } s2 = memalign(128, SIZE); if (!s2) { perror("memalign"); exit(1); } for (i = 0; i < SIZE; i++) { s1[i] = i & 0xff; s2[i] = i & 0xff; } for (i = 0; i < ITERATIONS; i++) { int ret = test_memcmp(s1, s2, SIZE); if (ret) { printf("return %d at[%ld]! should have returned zero\n", ret, i); abort(); } } return 0; } int main(void) { return test_harness(testcase, "memcmp"); } ------ Without this patch (but with the first patch "powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()." in the series): 4.726728762 seconds time elapsed ( +- 3.54%) With VMX patch: 4.234335473 seconds time elapsed ( +- 2.63%) There is ~+10% improvement. Testing with unaligned and different offset version (make s1 and s2 shift random offset within 16 bytes) can archieve higher improvement than 10%.. Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:53 -04:00
_GLOBAL_TOC(memcmp)
cmpdi cr1,r5,0
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
/* Use the short loop if the src/dst addresses are not
* with the same offset of 8 bytes align boundary.
*/
xor r6,r3,r4
andi. r6,r6,7
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
/* Fall back to short loop if compare at aligned addrs
* with less than 8 bytes.
*/
cmpdi cr6,r5,7
beq cr1,.Lzero
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
bgt cr6,.Lno_short
.Lshort:
mtctr r5
1: lbz rA,0(r3)
lbz rB,0(r4)
subf. rC,rB,rA
bne .Lnon_zero
bdz .Lzero
lbz rA,1(r3)
lbz rB,1(r4)
subf. rC,rB,rA
bne .Lnon_zero
bdz .Lzero
lbz rA,2(r3)
lbz rB,2(r4)
subf. rC,rB,rA
bne .Lnon_zero
bdz .Lzero
lbz rA,3(r3)
lbz rB,3(r4)
subf. rC,rB,rA
bne .Lnon_zero
addi r3,r3,4
addi r4,r4,4
bdnz 1b
.Lzero:
li r3,0
blr
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
.Lno_short:
dcbt 0,r3
dcbt 0,r4
bne .Ldiffoffset_8bytes_make_align_start
.Lsameoffset_8bytes_make_align_start:
/* attempt to compare bytes not aligned with 8 bytes so that
* rest comparison can run based on 8 bytes alignment.
*/
andi. r6,r3,7
/* Try to compare the first double word which is not 8 bytes aligned:
* load the first double word at (src & ~7UL) and shift left appropriate
* bits before comparision.
*/
rlwinm r6,r3,3,26,28
beq .Lsameoffset_8bytes_aligned
clrrdi r3,r3,3
clrrdi r4,r4,3
LD rA,0,r3
LD rB,0,r4
sld rA,rA,r6
sld rB,rB,r6
cmpld cr0,rA,rB
srwi r6,r6,3
bne cr0,.LcmpAB_lightweight
subfic r6,r6,8
subf. r5,r6,r5
addi r3,r3,8
addi r4,r4,8
beq .Lzero
.Lsameoffset_8bytes_aligned:
/* now we are aligned with 8 bytes.
* Use .Llong loop if left cmp bytes are equal or greater than 32B.
*/
cmpdi cr6,r5,31
bgt cr6,.Llong
.Lcmp_lt32bytes:
powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision This patch add VMX primitives to do memcmp() in case the compare size is equal or greater than 4K bytes. KSM feature can benefit from this. Test result with following test program(replace the "^>" with ""): ------ ># cat tools/testing/selftests/powerpc/stringloops/memcmp.c >#include <malloc.h> >#include <stdlib.h> >#include <string.h> >#include <time.h> >#include "utils.h" >#define SIZE (1024 * 1024 * 900) >#define ITERATIONS 40 int test_memcmp(const void *s1, const void *s2, size_t n); static int testcase(void) { char *s1; char *s2; unsigned long i; s1 = memalign(128, SIZE); if (!s1) { perror("memalign"); exit(1); } s2 = memalign(128, SIZE); if (!s2) { perror("memalign"); exit(1); } for (i = 0; i < SIZE; i++) { s1[i] = i & 0xff; s2[i] = i & 0xff; } for (i = 0; i < ITERATIONS; i++) { int ret = test_memcmp(s1, s2, SIZE); if (ret) { printf("return %d at[%ld]! should have returned zero\n", ret, i); abort(); } } return 0; } int main(void) { return test_harness(testcase, "memcmp"); } ------ Without this patch (but with the first patch "powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()." in the series): 4.726728762 seconds time elapsed ( +- 3.54%) With VMX patch: 4.234335473 seconds time elapsed ( +- 2.63%) There is ~+10% improvement. Testing with unaligned and different offset version (make s1 and s2 shift random offset within 16 bytes) can archieve higher improvement than 10%.. Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:53 -04:00
/* compare 1 ~ 31 bytes, at least r3 addr is 8 bytes aligned now */
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
cmpdi cr5,r5,7
srdi r0,r5,3
ble cr5,.Lcmp_rest_lt8bytes
/* handle 8 ~ 31 bytes */
clrldi r5,r5,61
mtctr r0
2:
LD rA,0,r3
LD rB,0,r4
cmpld cr0,rA,rB
addi r3,r3,8
addi r4,r4,8
bne cr0,.LcmpAB_lightweight
bdnz 2b
cmpwi r5,0
beq .Lzero
.Lcmp_rest_lt8bytes:
powerpc/64: Fix memcmp reading past the end of src/dest Chandan reported that fstests' generic/026 test hit a crash: BUG: Unable to handle kernel data access at 0xc00000062ac40000 Faulting instruction address: 0xc000000000092240 Oops: Kernel access of bad area, sig: 11 [#1] LE SMP NR_CPUS=2048 DEBUG_PAGEALLOC NUMA pSeries CPU: 0 PID: 27828 Comm: chacl Not tainted 5.0.0-rc2-next-20190115-00001-g6de6dba64dda #1 NIP: c000000000092240 LR: c00000000066a55c CTR: 0000000000000000 REGS: c00000062c0c3430 TRAP: 0300 Not tainted (5.0.0-rc2-next-20190115-00001-g6de6dba64dda) MSR: 8000000002009033 <SF,VEC,EE,ME,IR,DR,RI,LE> CR: 44000842 XER: 20000000 CFAR: 00007fff7f3108ac DAR: c00000062ac40000 DSISR: 40000000 IRQMASK: 0 GPR00: 0000000000000000 c00000062c0c36c0 c0000000017f4c00 c00000000121a660 GPR04: c00000062ac3fff9 0000000000000004 0000000000000020 00000000275b19c4 GPR08: 000000000000000c 46494c4500000000 5347495f41434c5f c0000000026073a0 GPR12: 0000000000000000 c0000000027a0000 0000000000000000 0000000000000000 GPR16: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 GPR20: c00000062ea70020 c00000062c0c38d0 0000000000000002 0000000000000002 GPR24: c00000062ac3ffe8 00000000275b19c4 0000000000000001 c00000062ac30000 GPR28: c00000062c0c38d0 c00000062ac30050 c00000062ac30058 0000000000000000 NIP memcmp+0x120/0x690 LR xfs_attr3_leaf_lookup_int+0x53c/0x5b0 Call Trace: xfs_attr3_leaf_lookup_int+0x78/0x5b0 (unreliable) xfs_da3_node_lookup_int+0x32c/0x5a0 xfs_attr_node_addname+0x170/0x6b0 xfs_attr_set+0x2ac/0x340 __xfs_set_acl+0xf0/0x230 xfs_set_acl+0xd0/0x160 set_posix_acl+0xc0/0x130 posix_acl_xattr_set+0x68/0x110 __vfs_setxattr+0xa4/0x110 __vfs_setxattr_noperm+0xac/0x240 vfs_setxattr+0x128/0x130 setxattr+0x248/0x600 path_setxattr+0x108/0x120 sys_setxattr+0x28/0x40 system_call+0x5c/0x70 Instruction dump: 7d201c28 7d402428 7c295040 38630008 38840008 408201f0 4200ffe8 2c050000 4182ff6c 20c50008 54c61838 7d201c28 <7d402428> 7d293436 7d4a3436 7c295040 The instruction dump decodes as: subfic r6,r5,8 rlwinm r6,r6,3,0,28 ldbrx r9,0,r3 ldbrx r10,0,r4 <- Which shows us doing an 8 byte load from c00000062ac3fff9, which crosses the page boundary at c00000062ac40000 and faults. It's not OK for memcmp to read past the end of the source or destination buffers if that would cross a page boundary, because we don't know that the next page is mapped. As pointed out by Segher, we can read past the end of the source or destination as long as we don't cross a 4K boundary, because that's our minimum page size on all platforms. The bug is in the code at the .Lcmp_rest_lt8bytes label. When we get there we know that s1 is 8-byte aligned and we have at least 1 byte to read, so a single 8-byte load won't read past the end of s1 and cross a page boundary. But we have to be more careful with s2. So check if it's within 8 bytes of a 4K boundary and if so go to the byte-by-byte loop. Fixes: 2d9ee327adce ("powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()") Cc: stable@vger.kernel.org # v4.19+ Reported-by: Chandan Rajendra <chandan@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Reviewed-by: Segher Boessenkool <segher@kernel.crashing.org> Tested-by: Chandan Rajendra <chandan@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2019-03-22 08:37:24 -04:00
/*
* Here we have less than 8 bytes to compare. At least s1 is aligned to
* 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a
* page boundary, otherwise we might read past the end of the buffer and
* trigger a page fault. We use 4K as the conservative minimum page
* size. If we detect that case we go to the byte-by-byte loop.
*
* Otherwise the next double word is loaded from s1 and s2, and shifted
* right to compare the appropriate bits.
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
*/
powerpc/64: Fix memcmp reading past the end of src/dest Chandan reported that fstests' generic/026 test hit a crash: BUG: Unable to handle kernel data access at 0xc00000062ac40000 Faulting instruction address: 0xc000000000092240 Oops: Kernel access of bad area, sig: 11 [#1] LE SMP NR_CPUS=2048 DEBUG_PAGEALLOC NUMA pSeries CPU: 0 PID: 27828 Comm: chacl Not tainted 5.0.0-rc2-next-20190115-00001-g6de6dba64dda #1 NIP: c000000000092240 LR: c00000000066a55c CTR: 0000000000000000 REGS: c00000062c0c3430 TRAP: 0300 Not tainted (5.0.0-rc2-next-20190115-00001-g6de6dba64dda) MSR: 8000000002009033 <SF,VEC,EE,ME,IR,DR,RI,LE> CR: 44000842 XER: 20000000 CFAR: 00007fff7f3108ac DAR: c00000062ac40000 DSISR: 40000000 IRQMASK: 0 GPR00: 0000000000000000 c00000062c0c36c0 c0000000017f4c00 c00000000121a660 GPR04: c00000062ac3fff9 0000000000000004 0000000000000020 00000000275b19c4 GPR08: 000000000000000c 46494c4500000000 5347495f41434c5f c0000000026073a0 GPR12: 0000000000000000 c0000000027a0000 0000000000000000 0000000000000000 GPR16: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 GPR20: c00000062ea70020 c00000062c0c38d0 0000000000000002 0000000000000002 GPR24: c00000062ac3ffe8 00000000275b19c4 0000000000000001 c00000062ac30000 GPR28: c00000062c0c38d0 c00000062ac30050 c00000062ac30058 0000000000000000 NIP memcmp+0x120/0x690 LR xfs_attr3_leaf_lookup_int+0x53c/0x5b0 Call Trace: xfs_attr3_leaf_lookup_int+0x78/0x5b0 (unreliable) xfs_da3_node_lookup_int+0x32c/0x5a0 xfs_attr_node_addname+0x170/0x6b0 xfs_attr_set+0x2ac/0x340 __xfs_set_acl+0xf0/0x230 xfs_set_acl+0xd0/0x160 set_posix_acl+0xc0/0x130 posix_acl_xattr_set+0x68/0x110 __vfs_setxattr+0xa4/0x110 __vfs_setxattr_noperm+0xac/0x240 vfs_setxattr+0x128/0x130 setxattr+0x248/0x600 path_setxattr+0x108/0x120 sys_setxattr+0x28/0x40 system_call+0x5c/0x70 Instruction dump: 7d201c28 7d402428 7c295040 38630008 38840008 408201f0 4200ffe8 2c050000 4182ff6c 20c50008 54c61838 7d201c28 <7d402428> 7d293436 7d4a3436 7c295040 The instruction dump decodes as: subfic r6,r5,8 rlwinm r6,r6,3,0,28 ldbrx r9,0,r3 ldbrx r10,0,r4 <- Which shows us doing an 8 byte load from c00000062ac3fff9, which crosses the page boundary at c00000062ac40000 and faults. It's not OK for memcmp to read past the end of the source or destination buffers if that would cross a page boundary, because we don't know that the next page is mapped. As pointed out by Segher, we can read past the end of the source or destination as long as we don't cross a 4K boundary, because that's our minimum page size on all platforms. The bug is in the code at the .Lcmp_rest_lt8bytes label. When we get there we know that s1 is 8-byte aligned and we have at least 1 byte to read, so a single 8-byte load won't read past the end of s1 and cross a page boundary. But we have to be more careful with s2. So check if it's within 8 bytes of a 4K boundary and if so go to the byte-by-byte loop. Fixes: 2d9ee327adce ("powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()") Cc: stable@vger.kernel.org # v4.19+ Reported-by: Chandan Rajendra <chandan@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Reviewed-by: Segher Boessenkool <segher@kernel.crashing.org> Tested-by: Chandan Rajendra <chandan@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2019-03-22 08:37:24 -04:00
clrldi r6,r4,(64-12) // r6 = r4 & 0xfff
cmpdi r6,0xff8
bgt .Lshort
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
subfic r6,r5,8
slwi r6,r6,3
LD rA,0,r3
LD rB,0,r4
srd rA,rA,r6
srd rB,rB,r6
cmpld cr0,rA,rB
bne cr0,.LcmpAB_lightweight
b .Lzero
.Lnon_zero:
mr r3,rC
blr
.Llong:
powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision This patch add VMX primitives to do memcmp() in case the compare size is equal or greater than 4K bytes. KSM feature can benefit from this. Test result with following test program(replace the "^>" with ""): ------ ># cat tools/testing/selftests/powerpc/stringloops/memcmp.c >#include <malloc.h> >#include <stdlib.h> >#include <string.h> >#include <time.h> >#include "utils.h" >#define SIZE (1024 * 1024 * 900) >#define ITERATIONS 40 int test_memcmp(const void *s1, const void *s2, size_t n); static int testcase(void) { char *s1; char *s2; unsigned long i; s1 = memalign(128, SIZE); if (!s1) { perror("memalign"); exit(1); } s2 = memalign(128, SIZE); if (!s2) { perror("memalign"); exit(1); } for (i = 0; i < SIZE; i++) { s1[i] = i & 0xff; s2[i] = i & 0xff; } for (i = 0; i < ITERATIONS; i++) { int ret = test_memcmp(s1, s2, SIZE); if (ret) { printf("return %d at[%ld]! should have returned zero\n", ret, i); abort(); } } return 0; } int main(void) { return test_harness(testcase, "memcmp"); } ------ Without this patch (but with the first patch "powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()." in the series): 4.726728762 seconds time elapsed ( +- 3.54%) With VMX patch: 4.234335473 seconds time elapsed ( +- 2.63%) There is ~+10% improvement. Testing with unaligned and different offset version (make s1 and s2 shift random offset within 16 bytes) can archieve higher improvement than 10%.. Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:53 -04:00
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
/* Try to use vmx loop if length is equal or greater than 4K */
cmpldi cr6,r5,VMX_THRESH
bge cr6,.Lsameoffset_vmx_cmp
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
.Llong_novmx_cmp:
#endif
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
/* At least s1 addr is aligned with 8 bytes */
li off8,8
li off16,16
li off24,24
std r31,-8(r1)
std r30,-16(r1)
std r29,-24(r1)
std r28,-32(r1)
std r27,-40(r1)
srdi r0,r5,5
mtctr r0
andi. r5,r5,31
LD rA,0,r3
LD rB,0,r4
LD rC,off8,r3
LD rD,off8,r4
LD rE,off16,r3
LD rF,off16,r4
LD rG,off24,r3
LD rH,off24,r4
cmpld cr0,rA,rB
addi r3,r3,32
addi r4,r4,32
bdz .Lfirst32
LD rA,0,r3
LD rB,0,r4
cmpld cr1,rC,rD
LD rC,off8,r3
LD rD,off8,r4
cmpld cr6,rE,rF
LD rE,off16,r3
LD rF,off16,r4
cmpld cr7,rG,rH
bne cr0,.LcmpAB
LD rG,off24,r3
LD rH,off24,r4
cmpld cr0,rA,rB
bne cr1,.LcmpCD
addi r3,r3,32
addi r4,r4,32
bdz .Lsecond32
.balign 16
1: LD rA,0,r3
LD rB,0,r4
cmpld cr1,rC,rD
bne cr6,.LcmpEF
LD rC,off8,r3
LD rD,off8,r4
cmpld cr6,rE,rF
bne cr7,.LcmpGH
LD rE,off16,r3
LD rF,off16,r4
cmpld cr7,rG,rH
bne cr0,.LcmpAB
LD rG,off24,r3
LD rH,off24,r4
cmpld cr0,rA,rB
bne cr1,.LcmpCD
addi r3,r3,32
addi r4,r4,32
bdnz 1b
.Lsecond32:
cmpld cr1,rC,rD
bne cr6,.LcmpEF
cmpld cr6,rE,rF
bne cr7,.LcmpGH
cmpld cr7,rG,rH
bne cr0,.LcmpAB
bne cr1,.LcmpCD
bne cr6,.LcmpEF
bne cr7,.LcmpGH
.Ltail:
ld r31,-8(r1)
ld r30,-16(r1)
ld r29,-24(r1)
ld r28,-32(r1)
ld r27,-40(r1)
cmpdi r5,0
beq .Lzero
b .Lshort
.Lfirst32:
cmpld cr1,rC,rD
cmpld cr6,rE,rF
cmpld cr7,rG,rH
bne cr0,.LcmpAB
bne cr1,.LcmpCD
bne cr6,.LcmpEF
bne cr7,.LcmpGH
b .Ltail
.LcmpAB:
li r3,1
bgt cr0,.Lout
li r3,-1
b .Lout
.LcmpCD:
li r3,1
bgt cr1,.Lout
li r3,-1
b .Lout
.LcmpEF:
li r3,1
bgt cr6,.Lout
li r3,-1
b .Lout
.LcmpGH:
li r3,1
bgt cr7,.Lout
li r3,-1
.Lout:
ld r31,-8(r1)
ld r30,-16(r1)
ld r29,-24(r1)
ld r28,-32(r1)
ld r27,-40(r1)
blr
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
.LcmpAB_lightweight: /* skip NV GPRS restore */
li r3,1
bgtlr
li r3,-1
blr
powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision This patch add VMX primitives to do memcmp() in case the compare size is equal or greater than 4K bytes. KSM feature can benefit from this. Test result with following test program(replace the "^>" with ""): ------ ># cat tools/testing/selftests/powerpc/stringloops/memcmp.c >#include <malloc.h> >#include <stdlib.h> >#include <string.h> >#include <time.h> >#include "utils.h" >#define SIZE (1024 * 1024 * 900) >#define ITERATIONS 40 int test_memcmp(const void *s1, const void *s2, size_t n); static int testcase(void) { char *s1; char *s2; unsigned long i; s1 = memalign(128, SIZE); if (!s1) { perror("memalign"); exit(1); } s2 = memalign(128, SIZE); if (!s2) { perror("memalign"); exit(1); } for (i = 0; i < SIZE; i++) { s1[i] = i & 0xff; s2[i] = i & 0xff; } for (i = 0; i < ITERATIONS; i++) { int ret = test_memcmp(s1, s2, SIZE); if (ret) { printf("return %d at[%ld]! should have returned zero\n", ret, i); abort(); } } return 0; } int main(void) { return test_harness(testcase, "memcmp"); } ------ Without this patch (but with the first patch "powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()." in the series): 4.726728762 seconds time elapsed ( +- 3.54%) With VMX patch: 4.234335473 seconds time elapsed ( +- 2.63%) There is ~+10% improvement. Testing with unaligned and different offset version (make s1 and s2 shift random offset within 16 bytes) can archieve higher improvement than 10%.. Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:53 -04:00
#ifdef CONFIG_ALTIVEC
.Lsameoffset_vmx_cmp:
/* Enter with src/dst addrs has the same offset with 8 bytes
powerpc/64: add 32 bytes prechecking before using VMX optimization on memcmp() This patch is based on the previous VMX patch on memcmp(). To optimize ppc64 memcmp() with VMX instruction, we need to think about the VMX penalty brought with: If kernel uses VMX instruction, it needs to save/restore current thread's VMX registers. There are 32 x 128 bits VMX registers in PPC, which means 32 x 16 = 512 bytes for load and store. The major concern regarding the memcmp() performance in kernel is KSM, who will use memcmp() frequently to merge identical pages. So it will make sense to take some measures/enhancement on KSM to see whether any improvement can be done here. Cyril Bur indicates that the memcmp() for KSM has a higher possibility to fail (unmatch) early in previous bytes in following mail. https://patchwork.ozlabs.org/patch/817322/#1773629 And I am taking a follow-up on this with this patch. Per some testing, it shows KSM memcmp() will fail early at previous 32 bytes. More specifically: - 76% cases will fail/unmatch before 16 bytes; - 83% cases will fail/unmatch before 32 bytes; - 84% cases will fail/unmatch before 64 bytes; So 32 bytes looks a better choice than other bytes for pre-checking. The early failure is also true for memcmp() for non-KSM case. With a non-typical call load, it shows ~73% cases fail before first 32 bytes. This patch adds a 32 bytes pre-checking firstly before jumping into VMX operations, to avoid the unnecessary VMX penalty. It is not limited to KSM case. And the testing shows ~20% improvement on memcmp() average execution time with this patch. And note the 32B pre-checking is only performed when the compare size is long enough (>=4K currently) to allow VMX operation. The detail data and analysis is at: https://github.com/justdoitqd/publicFiles/blob/master/memcmp/README.md Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:54 -04:00
* align boundary.
*
* There is an optimization based on following fact: memcmp()
* prones to fail early at the first 32 bytes.
* Before applying VMX instructions which will lead to 32x128bits
* VMX regs load/restore penalty, we compare the first 32 bytes
* so that we can catch the ~80% fail cases.
powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision This patch add VMX primitives to do memcmp() in case the compare size is equal or greater than 4K bytes. KSM feature can benefit from this. Test result with following test program(replace the "^>" with ""): ------ ># cat tools/testing/selftests/powerpc/stringloops/memcmp.c >#include <malloc.h> >#include <stdlib.h> >#include <string.h> >#include <time.h> >#include "utils.h" >#define SIZE (1024 * 1024 * 900) >#define ITERATIONS 40 int test_memcmp(const void *s1, const void *s2, size_t n); static int testcase(void) { char *s1; char *s2; unsigned long i; s1 = memalign(128, SIZE); if (!s1) { perror("memalign"); exit(1); } s2 = memalign(128, SIZE); if (!s2) { perror("memalign"); exit(1); } for (i = 0; i < SIZE; i++) { s1[i] = i & 0xff; s2[i] = i & 0xff; } for (i = 0; i < ITERATIONS; i++) { int ret = test_memcmp(s1, s2, SIZE); if (ret) { printf("return %d at[%ld]! should have returned zero\n", ret, i); abort(); } } return 0; } int main(void) { return test_harness(testcase, "memcmp"); } ------ Without this patch (but with the first patch "powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()." in the series): 4.726728762 seconds time elapsed ( +- 3.54%) With VMX patch: 4.234335473 seconds time elapsed ( +- 2.63%) There is ~+10% improvement. Testing with unaligned and different offset version (make s1 and s2 shift random offset within 16 bytes) can archieve higher improvement than 10%.. Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:53 -04:00
*/
powerpc/64: add 32 bytes prechecking before using VMX optimization on memcmp() This patch is based on the previous VMX patch on memcmp(). To optimize ppc64 memcmp() with VMX instruction, we need to think about the VMX penalty brought with: If kernel uses VMX instruction, it needs to save/restore current thread's VMX registers. There are 32 x 128 bits VMX registers in PPC, which means 32 x 16 = 512 bytes for load and store. The major concern regarding the memcmp() performance in kernel is KSM, who will use memcmp() frequently to merge identical pages. So it will make sense to take some measures/enhancement on KSM to see whether any improvement can be done here. Cyril Bur indicates that the memcmp() for KSM has a higher possibility to fail (unmatch) early in previous bytes in following mail. https://patchwork.ozlabs.org/patch/817322/#1773629 And I am taking a follow-up on this with this patch. Per some testing, it shows KSM memcmp() will fail early at previous 32 bytes. More specifically: - 76% cases will fail/unmatch before 16 bytes; - 83% cases will fail/unmatch before 32 bytes; - 84% cases will fail/unmatch before 64 bytes; So 32 bytes looks a better choice than other bytes for pre-checking. The early failure is also true for memcmp() for non-KSM case. With a non-typical call load, it shows ~73% cases fail before first 32 bytes. This patch adds a 32 bytes pre-checking firstly before jumping into VMX operations, to avoid the unnecessary VMX penalty. It is not limited to KSM case. And the testing shows ~20% improvement on memcmp() average execution time with this patch. And note the 32B pre-checking is only performed when the compare size is long enough (>=4K currently) to allow VMX operation. The detail data and analysis is at: https://github.com/justdoitqd/publicFiles/blob/master/memcmp/README.md Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:54 -04:00
li r0,4
mtctr r0
.Lsameoffset_prechk_32B_loop:
LD rA,0,r3
LD rB,0,r4
cmpld cr0,rA,rB
addi r3,r3,8
addi r4,r4,8
bne cr0,.LcmpAB_lightweight
addi r5,r5,-8
bdnz .Lsameoffset_prechk_32B_loop
powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision This patch add VMX primitives to do memcmp() in case the compare size is equal or greater than 4K bytes. KSM feature can benefit from this. Test result with following test program(replace the "^>" with ""): ------ ># cat tools/testing/selftests/powerpc/stringloops/memcmp.c >#include <malloc.h> >#include <stdlib.h> >#include <string.h> >#include <time.h> >#include "utils.h" >#define SIZE (1024 * 1024 * 900) >#define ITERATIONS 40 int test_memcmp(const void *s1, const void *s2, size_t n); static int testcase(void) { char *s1; char *s2; unsigned long i; s1 = memalign(128, SIZE); if (!s1) { perror("memalign"); exit(1); } s2 = memalign(128, SIZE); if (!s2) { perror("memalign"); exit(1); } for (i = 0; i < SIZE; i++) { s1[i] = i & 0xff; s2[i] = i & 0xff; } for (i = 0; i < ITERATIONS; i++) { int ret = test_memcmp(s1, s2, SIZE); if (ret) { printf("return %d at[%ld]! should have returned zero\n", ret, i); abort(); } } return 0; } int main(void) { return test_harness(testcase, "memcmp"); } ------ Without this patch (but with the first patch "powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()." in the series): 4.726728762 seconds time elapsed ( +- 3.54%) With VMX patch: 4.234335473 seconds time elapsed ( +- 2.63%) There is ~+10% improvement. Testing with unaligned and different offset version (make s1 and s2 shift random offset within 16 bytes) can archieve higher improvement than 10%.. Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:53 -04:00
ENTER_VMX_OPS
beq cr1,.Llong_novmx_cmp
3:
/* need to check whether r4 has the same offset with r3
* for 16 bytes boundary.
*/
xor r0,r3,r4
andi. r0,r0,0xf
bne .Ldiffoffset_vmx_cmp_start
/* len is no less than 4KB. Need to align with 16 bytes further.
*/
andi. rA,r3,8
LD rA,0,r3
beq 4f
LD rB,0,r4
cmpld cr0,rA,rB
addi r3,r3,8
addi r4,r4,8
addi r5,r5,-8
beq cr0,4f
/* save and restore cr0 */
mfocrf r5,128
EXIT_VMX_OPS
mtocrf 128,r5
b .LcmpAB_lightweight
4:
/* compare 32 bytes for each loop */
srdi r0,r5,5
mtctr r0
clrldi r5,r5,59
li off16,16
.balign 16
5:
lvx v0,0,r3
lvx v1,0,r4
VCMPEQUD_RC(v0,v0,v1)
bnl cr6,7f
lvx v0,off16,r3
lvx v1,off16,r4
VCMPEQUD_RC(v0,v0,v1)
bnl cr6,6f
addi r3,r3,32
addi r4,r4,32
bdnz 5b
EXIT_VMX_OPS
cmpdi r5,0
beq .Lzero
b .Lcmp_lt32bytes
6:
addi r3,r3,16
addi r4,r4,16
7:
/* diff the last 16 bytes */
EXIT_VMX_OPS
LD rA,0,r3
LD rB,0,r4
cmpld cr0,rA,rB
li off8,8
bne cr0,.LcmpAB_lightweight
LD rA,off8,r3
LD rB,off8,r4
cmpld cr0,rA,rB
bne cr0,.LcmpAB_lightweight
b .Lzero
#endif
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
.Ldiffoffset_8bytes_make_align_start:
/* now try to align s1 with 8 bytes */
rlwinm r6,r3,3,26,28
beq .Ldiffoffset_align_s1_8bytes
clrrdi r3,r3,3
LD rA,0,r3
LD rB,0,r4 /* unaligned load */
sld rA,rA,r6
srd rA,rA,r6
srd rB,rB,r6
cmpld cr0,rA,rB
srwi r6,r6,3
bne cr0,.LcmpAB_lightweight
subfic r6,r6,8
subf. r5,r6,r5
addi r3,r3,8
add r4,r4,r6
beq .Lzero
.Ldiffoffset_align_s1_8bytes:
/* now s1 is aligned with 8 bytes. */
powerpc/64: add 32 bytes prechecking before using VMX optimization on memcmp() This patch is based on the previous VMX patch on memcmp(). To optimize ppc64 memcmp() with VMX instruction, we need to think about the VMX penalty brought with: If kernel uses VMX instruction, it needs to save/restore current thread's VMX registers. There are 32 x 128 bits VMX registers in PPC, which means 32 x 16 = 512 bytes for load and store. The major concern regarding the memcmp() performance in kernel is KSM, who will use memcmp() frequently to merge identical pages. So it will make sense to take some measures/enhancement on KSM to see whether any improvement can be done here. Cyril Bur indicates that the memcmp() for KSM has a higher possibility to fail (unmatch) early in previous bytes in following mail. https://patchwork.ozlabs.org/patch/817322/#1773629 And I am taking a follow-up on this with this patch. Per some testing, it shows KSM memcmp() will fail early at previous 32 bytes. More specifically: - 76% cases will fail/unmatch before 16 bytes; - 83% cases will fail/unmatch before 32 bytes; - 84% cases will fail/unmatch before 64 bytes; So 32 bytes looks a better choice than other bytes for pre-checking. The early failure is also true for memcmp() for non-KSM case. With a non-typical call load, it shows ~73% cases fail before first 32 bytes. This patch adds a 32 bytes pre-checking firstly before jumping into VMX operations, to avoid the unnecessary VMX penalty. It is not limited to KSM case. And the testing shows ~20% improvement on memcmp() average execution time with this patch. And note the 32B pre-checking is only performed when the compare size is long enough (>=4K currently) to allow VMX operation. The detail data and analysis is at: https://github.com/justdoitqd/publicFiles/blob/master/memcmp/README.md Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:54 -04:00
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
/* only do vmx ops when the size equal or greater than 4K bytes */
cmpdi cr5,r5,VMX_THRESH
bge cr5,.Ldiffoffset_vmx_cmp
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
.Ldiffoffset_novmx_cmp:
#endif
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
cmpdi cr5,r5,31
ble cr5,.Lcmp_lt32bytes
powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision This patch add VMX primitives to do memcmp() in case the compare size is equal or greater than 4K bytes. KSM feature can benefit from this. Test result with following test program(replace the "^>" with ""): ------ ># cat tools/testing/selftests/powerpc/stringloops/memcmp.c >#include <malloc.h> >#include <stdlib.h> >#include <string.h> >#include <time.h> >#include "utils.h" >#define SIZE (1024 * 1024 * 900) >#define ITERATIONS 40 int test_memcmp(const void *s1, const void *s2, size_t n); static int testcase(void) { char *s1; char *s2; unsigned long i; s1 = memalign(128, SIZE); if (!s1) { perror("memalign"); exit(1); } s2 = memalign(128, SIZE); if (!s2) { perror("memalign"); exit(1); } for (i = 0; i < SIZE; i++) { s1[i] = i & 0xff; s2[i] = i & 0xff; } for (i = 0; i < ITERATIONS; i++) { int ret = test_memcmp(s1, s2, SIZE); if (ret) { printf("return %d at[%ld]! should have returned zero\n", ret, i); abort(); } } return 0; } int main(void) { return test_harness(testcase, "memcmp"); } ------ Without this patch (but with the first patch "powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()." in the series): 4.726728762 seconds time elapsed ( +- 3.54%) With VMX patch: 4.234335473 seconds time elapsed ( +- 2.63%) There is ~+10% improvement. Testing with unaligned and different offset version (make s1 and s2 shift random offset within 16 bytes) can archieve higher improvement than 10%.. Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:53 -04:00
#ifdef CONFIG_ALTIVEC
b .Llong_novmx_cmp
#else
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
b .Llong
powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision This patch add VMX primitives to do memcmp() in case the compare size is equal or greater than 4K bytes. KSM feature can benefit from this. Test result with following test program(replace the "^>" with ""): ------ ># cat tools/testing/selftests/powerpc/stringloops/memcmp.c >#include <malloc.h> >#include <stdlib.h> >#include <string.h> >#include <time.h> >#include "utils.h" >#define SIZE (1024 * 1024 * 900) >#define ITERATIONS 40 int test_memcmp(const void *s1, const void *s2, size_t n); static int testcase(void) { char *s1; char *s2; unsigned long i; s1 = memalign(128, SIZE); if (!s1) { perror("memalign"); exit(1); } s2 = memalign(128, SIZE); if (!s2) { perror("memalign"); exit(1); } for (i = 0; i < SIZE; i++) { s1[i] = i & 0xff; s2[i] = i & 0xff; } for (i = 0; i < ITERATIONS; i++) { int ret = test_memcmp(s1, s2, SIZE); if (ret) { printf("return %d at[%ld]! should have returned zero\n", ret, i); abort(); } } return 0; } int main(void) { return test_harness(testcase, "memcmp"); } ------ Without this patch (but with the first patch "powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()." in the series): 4.726728762 seconds time elapsed ( +- 3.54%) With VMX patch: 4.234335473 seconds time elapsed ( +- 2.63%) There is ~+10% improvement. Testing with unaligned and different offset version (make s1 and s2 shift random offset within 16 bytes) can archieve higher improvement than 10%.. Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:53 -04:00
#endif
#ifdef CONFIG_ALTIVEC
.Ldiffoffset_vmx_cmp:
powerpc/64: add 32 bytes prechecking before using VMX optimization on memcmp() This patch is based on the previous VMX patch on memcmp(). To optimize ppc64 memcmp() with VMX instruction, we need to think about the VMX penalty brought with: If kernel uses VMX instruction, it needs to save/restore current thread's VMX registers. There are 32 x 128 bits VMX registers in PPC, which means 32 x 16 = 512 bytes for load and store. The major concern regarding the memcmp() performance in kernel is KSM, who will use memcmp() frequently to merge identical pages. So it will make sense to take some measures/enhancement on KSM to see whether any improvement can be done here. Cyril Bur indicates that the memcmp() for KSM has a higher possibility to fail (unmatch) early in previous bytes in following mail. https://patchwork.ozlabs.org/patch/817322/#1773629 And I am taking a follow-up on this with this patch. Per some testing, it shows KSM memcmp() will fail early at previous 32 bytes. More specifically: - 76% cases will fail/unmatch before 16 bytes; - 83% cases will fail/unmatch before 32 bytes; - 84% cases will fail/unmatch before 64 bytes; So 32 bytes looks a better choice than other bytes for pre-checking. The early failure is also true for memcmp() for non-KSM case. With a non-typical call load, it shows ~73% cases fail before first 32 bytes. This patch adds a 32 bytes pre-checking firstly before jumping into VMX operations, to avoid the unnecessary VMX penalty. It is not limited to KSM case. And the testing shows ~20% improvement on memcmp() average execution time with this patch. And note the 32B pre-checking is only performed when the compare size is long enough (>=4K currently) to allow VMX operation. The detail data and analysis is at: https://github.com/justdoitqd/publicFiles/blob/master/memcmp/README.md Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:54 -04:00
/* perform a 32 bytes pre-checking before
* enable VMX operations.
*/
li r0,4
mtctr r0
.Ldiffoffset_prechk_32B_loop:
LD rA,0,r3
LD rB,0,r4
cmpld cr0,rA,rB
addi r3,r3,8
addi r4,r4,8
bne cr0,.LcmpAB_lightweight
addi r5,r5,-8
bdnz .Ldiffoffset_prechk_32B_loop
powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision This patch add VMX primitives to do memcmp() in case the compare size is equal or greater than 4K bytes. KSM feature can benefit from this. Test result with following test program(replace the "^>" with ""): ------ ># cat tools/testing/selftests/powerpc/stringloops/memcmp.c >#include <malloc.h> >#include <stdlib.h> >#include <string.h> >#include <time.h> >#include "utils.h" >#define SIZE (1024 * 1024 * 900) >#define ITERATIONS 40 int test_memcmp(const void *s1, const void *s2, size_t n); static int testcase(void) { char *s1; char *s2; unsigned long i; s1 = memalign(128, SIZE); if (!s1) { perror("memalign"); exit(1); } s2 = memalign(128, SIZE); if (!s2) { perror("memalign"); exit(1); } for (i = 0; i < SIZE; i++) { s1[i] = i & 0xff; s2[i] = i & 0xff; } for (i = 0; i < ITERATIONS; i++) { int ret = test_memcmp(s1, s2, SIZE); if (ret) { printf("return %d at[%ld]! should have returned zero\n", ret, i); abort(); } } return 0; } int main(void) { return test_harness(testcase, "memcmp"); } ------ Without this patch (but with the first patch "powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()." in the series): 4.726728762 seconds time elapsed ( +- 3.54%) With VMX patch: 4.234335473 seconds time elapsed ( +- 2.63%) There is ~+10% improvement. Testing with unaligned and different offset version (make s1 and s2 shift random offset within 16 bytes) can archieve higher improvement than 10%.. Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:53 -04:00
ENTER_VMX_OPS
beq cr1,.Ldiffoffset_novmx_cmp
.Ldiffoffset_vmx_cmp_start:
/* Firstly try to align r3 with 16 bytes */
andi. r6,r3,0xf
li off16,16
beq .Ldiffoffset_vmx_s1_16bytes_align
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp() Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:51 -04:00
powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision This patch add VMX primitives to do memcmp() in case the compare size is equal or greater than 4K bytes. KSM feature can benefit from this. Test result with following test program(replace the "^>" with ""): ------ ># cat tools/testing/selftests/powerpc/stringloops/memcmp.c >#include <malloc.h> >#include <stdlib.h> >#include <string.h> >#include <time.h> >#include "utils.h" >#define SIZE (1024 * 1024 * 900) >#define ITERATIONS 40 int test_memcmp(const void *s1, const void *s2, size_t n); static int testcase(void) { char *s1; char *s2; unsigned long i; s1 = memalign(128, SIZE); if (!s1) { perror("memalign"); exit(1); } s2 = memalign(128, SIZE); if (!s2) { perror("memalign"); exit(1); } for (i = 0; i < SIZE; i++) { s1[i] = i & 0xff; s2[i] = i & 0xff; } for (i = 0; i < ITERATIONS; i++) { int ret = test_memcmp(s1, s2, SIZE); if (ret) { printf("return %d at[%ld]! should have returned zero\n", ret, i); abort(); } } return 0; } int main(void) { return test_harness(testcase, "memcmp"); } ------ Without this patch (but with the first patch "powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()." in the series): 4.726728762 seconds time elapsed ( +- 3.54%) With VMX patch: 4.234335473 seconds time elapsed ( +- 2.63%) There is ~+10% improvement. Testing with unaligned and different offset version (make s1 and s2 shift random offset within 16 bytes) can archieve higher improvement than 10%.. Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-06 21:57:53 -04:00
LVS v3,0,r3
LVS v4,0,r4
lvx v5,0,r3
lvx v6,0,r4
LD_VSR_CROSS16B(r3,v3,v5,v7,v9)
LD_VSR_CROSS16B(r4,v4,v6,v8,v10)
VCMPEQUB_RC(v7,v9,v10)
bnl cr6,.Ldiffoffset_vmx_diff_found
subfic r6,r6,16
subf r5,r6,r5
add r3,r3,r6
add r4,r4,r6
.Ldiffoffset_vmx_s1_16bytes_align:
/* now s1 is aligned with 16 bytes */
lvx v6,0,r4
LVS v4,0,r4
srdi r6,r5,5 /* loop for 32 bytes each */
clrldi r5,r5,59
mtctr r6
.balign 16
.Ldiffoffset_vmx_32bytesloop:
/* the first qw of r4 was saved in v6 */
lvx v9,0,r3
LD_VSR_CROSS16B(r4,v4,v6,v8,v10)
VCMPEQUB_RC(v7,v9,v10)
vor v6,v8,v8
bnl cr6,.Ldiffoffset_vmx_diff_found
addi r3,r3,16
addi r4,r4,16
lvx v9,0,r3
LD_VSR_CROSS16B(r4,v4,v6,v8,v10)
VCMPEQUB_RC(v7,v9,v10)
vor v6,v8,v8
bnl cr6,.Ldiffoffset_vmx_diff_found
addi r3,r3,16
addi r4,r4,16
bdnz .Ldiffoffset_vmx_32bytesloop
EXIT_VMX_OPS
cmpdi r5,0
beq .Lzero
b .Lcmp_lt32bytes
.Ldiffoffset_vmx_diff_found:
EXIT_VMX_OPS
/* anyway, the diff will appear in next 16 bytes */
li r5,16
b .Lcmp_lt32bytes
#endif
EXPORT_SYMBOL(memcmp)