android_kernel_xiaomi_sm8350/kernel/sched/deadline.c
Michael Bestas 635c74d37d
Merge tag 'ASB-2022-12-05_11-5.4' of https://android.googlesource.com/kernel/common into android13-5.4-lahaina
https://source.android.com/docs/security/bulletin/2022-12-01
CVE-2022-23960

* tag 'ASB-2022-12-05_11-5.4' of https://android.googlesource.com/kernel/common:
  UPSTREAM: bpf: Ensure correct locking around vulnerable function find_vpid()
  UPSTREAM: HID: roccat: Fix use-after-free in roccat_read()
  ANDROID: arm64: mm: perform clean & invalidation in __dma_map_area
  UPSTREAM: mmc: hsq: Fix data stomping during mmc recovery
  UPSTREAM: pinctrl: sunxi: Fix name for A100 R_PIO
  BACKPORT: mmc: core: Fix UHS-I SD 1.8V workaround branch
  UPSTREAM: Bluetooth: L2CAP: Fix l2cap_global_chan_by_psm regression
  UPSTREAM: wifi: mac80211_hwsim: set virtio device ready in probe()
  BACKPORT: f2fs: don't use casefolded comparison for "." and ".."
  UPSTREAM: Revert "mm/cma.c: remove redundant cma_mutex lock"
  UPSTREAM: usb: dwc3: Try usb-role-switch first in dwc3_drd_init
  BACKPORT: usb: typec: ucsi: Fix reuse of completion structure
  BACKPORT: tipc: fix incorrect order of state message data sanity check
  UPSTREAM: net: fix up skbs delta_truesize in UDP GRO frag_list
  UPSTREAM: cgroup-v1: Correct privileges check in release_agent writes
  UPSTREAM: mm: don't try to NUMA-migrate COW pages that have other uses
  UPSTREAM: usb: raw-gadget: fix handling of dual-direction-capable endpoints
  UPSTREAM: selinux: check return value of sel_make_avc_files
  UPSTREAM: usb: musb: select GENERIC_PHY instead of depending on it
  BACKPORT: driver core: Fix error return code in really_probe()
  UPSTREAM: fscrypt: fix derivation of SipHash keys on big endian CPUs
  BACKPORT: fscrypt: rename FS_KEY_DERIVATION_NONCE_SIZE
  UPSTREAM: socionext: account for napi_gro_receive never returning GRO_DROP
  UPSTREAM: net: socionext: netsec: fix xdp stats accounting
  BACKPORT: fs: align IOCB_* flags with RWF_* flags
  UPSTREAM: efi: capsule-loader: Fix use-after-free in efi_capsule_write
  BACKPORT: ARM: 9039/1: assembler: generalize byte swapping macro into rev_l
  BACKPORT: ARM: 9035/1: uncompress: Add be32tocpu macro
  UPSTREAM: drm/meson: Fix overflow implicit truncation warnings
  UPSTREAM: irqchip/tegra: Fix overflow implicit truncation warnings
  UPSTREAM: video: fbdev: pxa3xx-gcu: Fix integer overflow in pxa3xx_gcu_write
  ANDROID: GKI: db845c: Update symbols list and ABI
  Linux 5.4.219
  wifi: mac80211: fix MBSSID parsing use-after-free
  wifi: mac80211: don't parse mbssid in assoc response
  mac80211: mlme: find auth challenge directly
  Revert "fs: check FMODE_LSEEK to control internal pipe splicing"
  Linux 5.4.218
  Input: xpad - fix wireless 360 controller breaking after suspend
  Input: xpad - add supported devices as contributed on github
  wifi: cfg80211: update hidden BSSes to avoid WARN_ON
  wifi: mac80211_hwsim: avoid mac80211 warning on bad rate
  wifi: cfg80211: avoid nontransmitted BSS list corruption
  wifi: cfg80211: fix BSS refcounting bugs
  wifi: cfg80211: ensure length byte is present before access
  wifi: cfg80211/mac80211: reject bad MBSSID elements
  wifi: cfg80211: fix u8 overflow in cfg80211_update_notlisted_nontrans()
  random: use expired timer rather than wq for mixing fast pool
  random: avoid reading two cache lines on irq randomness
  random: restore O_NONBLOCK support
  USB: serial: qcserial: add new usb-id for Dell branded EM7455
  scsi: stex: Properly zero out the passthrough command structure
  efi: Correct Macmini DMI match in uefi cert quirk
  ALSA: hda: Fix position reporting on Poulsbo
  random: clamp credited irq bits to maximum mixed
  ceph: don't truncate file in atomic_open
  nilfs2: replace WARN_ONs by nilfs_error for checkpoint acquisition failure
  nilfs2: fix leak of nilfs_root in case of writer thread creation failure
  nilfs2: fix NULL pointer dereference at nilfs_bmap_lookup_at_level()
  rpmsg: qcom: glink: replace strncpy() with strscpy_pad()
  mmc: core: Terminate infinite loop in SD-UHS voltage switch
  mmc: core: Replace with already defined values for readability
  USB: serial: ftdi_sio: fix 300 bps rate for SIO
  usb: mon: make mmapped memory read only
  arch: um: Mark the stack non-executable to fix a binutils warning
  um: Cleanup compiler warning in arch/x86/um/tls_32.c
  um: Cleanup syscall_handler_t cast in syscalls_32.h
  net/ieee802154: fix uninit value bug in dgram_sendmsg
  scsi: qedf: Fix a UAF bug in __qedf_probe()
  ARM: dts: fix Moxa SDIO 'compatible', remove 'sdhci' misnomer
  dmaengine: xilinx_dma: Report error in case of dma_set_mask_and_coherent API failure
  dmaengine: xilinx_dma: cleanup for fetching xlnx,num-fstores property
  firmware: arm_scmi: Add SCMI PM driver remove routine
  fs: fix UAF/GPF bug in nilfs_mdt_destroy
  perf tools: Fixup get_current_dir_name() compilation
  mm: pagewalk: Fix race between unmap and page walker
  Linux 5.4.217
  docs: update mediator information in CoC docs
  Makefile.extrawarn: Move -Wcast-function-type-strict to W=1
  Revert "drm/amdgpu: use dirty framebuffer helper"
  xfs: remove unused variable 'done'
  xfs: fix uninitialized variable in xfs_attr3_leaf_inactive
  xfs: streamline xfs_attr3_leaf_inactive
  xfs: move incore structures out of xfs_da_format.h
  xfs: fix memory corruption during remote attr value buffer invalidation
  xfs: refactor remote attr value buffer invalidation
  xfs: fix IOCB_NOWAIT handling in xfs_file_dio_aio_read
  xfs: fix s_maxbytes computation on 32-bit kernels
  xfs: truncate should remove all blocks, not just to the end of the page cache
  xfs: introduce XFS_MAX_FILEOFF
  xfs: fix misuse of the XFS_ATTR_INCOMPLETE flag
  x86/speculation: Add RSB VM Exit protections
  x86/bugs: Warn when "ibrs" mitigation is selected on Enhanced IBRS parts
  x86/speculation: Use DECLARE_PER_CPU for x86_spec_ctrl_current
  x86/speculation: Disable RRSBA behavior
  x86/bugs: Add Cannon lake to RETBleed affected CPU list
  x86/cpu/amd: Enumerate BTC_NO
  x86/common: Stamp out the stepping madness
  x86/speculation: Fill RSB on vmexit for IBRS
  KVM: VMX: Fix IBRS handling after vmexit
  KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS
  KVM: VMX: Convert launched argument to flags
  KVM: VMX: Flatten __vmx_vcpu_run()
  KVM/nVMX: Use __vmx_vcpu_run in nested_vmx_check_vmentry_hw
  KVM/VMX: Use TEST %REG,%REG instead of CMP $0,%REG in vmenter.S
  x86/speculation: Remove x86_spec_ctrl_mask
  x86/speculation: Use cached host SPEC_CTRL value for guest entry/exit
  x86/speculation: Fix SPEC_CTRL write on SMT state change
  x86/speculation: Fix firmware entry SPEC_CTRL handling
  x86/speculation: Fix RSB filling with CONFIG_RETPOLINE=n
  x86/speculation: Change FILL_RETURN_BUFFER to work with objtool
  intel_idle: Disable IBRS during long idle
  x86/bugs: Report Intel retbleed vulnerability
  x86/bugs: Split spectre_v2_select_mitigation() and spectre_v2_user_select_mitigation()
  x86/speculation: Add spectre_v2=ibrs option to support Kernel IBRS
  x86/bugs: Optimize SPEC_CTRL MSR writes
  x86/entry: Add kernel IBRS implementation
  x86/entry: Remove skip_r11rcx
  x86/bugs: Keep a per-CPU IA32_SPEC_CTRL value
  x86/bugs: Add AMD retbleed= boot parameter
  x86/bugs: Report AMD retbleed vulnerability
  x86/cpufeatures: Move RETPOLINE flags to word 11
  x86/kvm/vmx: Make noinstr clean
  x86/cpu: Add a steppings field to struct x86_cpu_id
  x86/cpu: Add consistent CPU match macros
  x86/devicetable: Move x86 specific macro out of generic code
  Revert "x86/cpu: Add a steppings field to struct x86_cpu_id"
  Revert "x86/speculation: Add RSB VM Exit protections"
  Linux 5.4.216
  clk: iproc: Do not rely on node name for correct PLL setup
  clk: imx: imx6sx: remove the SET_RATE_PARENT flag for QSPI clocks
  selftests: Fix the if conditions of in test_extra_filter()
  nvme: Fix IOC_PR_CLEAR and IOC_PR_RELEASE ioctls for nvme devices
  nvme: add new line after variable declatation
  usbnet: Fix memory leak in usbnet_disconnect()
  Input: melfas_mip4 - fix return value check in mip4_probe()
  Revert "drm: bridge: analogix/dp: add panel prepare/unprepare in suspend/resume time"
  soc: sunxi: sram: Fix debugfs info for A64 SRAM C
  soc: sunxi: sram: Fix probe function ordering issues
  soc: sunxi_sram: Make use of the helper function devm_platform_ioremap_resource()
  soc: sunxi: sram: Prevent the driver from being unbound
  soc: sunxi: sram: Actually claim SRAM regions
  ARM: dts: am33xx: Fix MMCHS0 dma properties
  ARM: dts: Move am33xx and am43xx mmc nodes to sdhci-omap driver
  media: dvb_vb2: fix possible out of bound access
  mm: fix madivse_pageout mishandling on non-LRU page
  mm/migrate_device.c: flush TLB while holding PTL
  mm: prevent page_frag_alloc() from corrupting the memory
  mm/page_alloc: fix race condition between build_all_zonelists and page allocation
  mmc: moxart: fix 4-bit bus width and remove 8-bit bus width
  libata: add ATA_HORKAGE_NOLPM for Pioneer BDR-207M and BDR-205
  Revert "net: mvpp2: debugfs: fix memory leak when using debugfs_lookup()"
  ntfs: fix BUG_ON in ntfs_lookup_inode_by_name()
  ARM: dts: integrator: Tag PCI host with device_type
  clk: ingenic-tcu: Properly enable registers before accessing timers
  net: usb: qmi_wwan: Add new usb-id for Dell branded EM7455
  uas: ignore UAS for Thinkplus chips
  usb-storage: Add Hiksemi USB3-FW to IGNORE_UAS
  uas: add no-uas quirk for Hiksemi usb_disk
  Linux 5.4.215
  ext4: make directory inode spreading reflect flexbg size
  xfs: fix use-after-free when aborting corrupt attr inactivation
  xfs: fix an ABBA deadlock in xfs_rename
  xfs: don't commit sunit/swidth updates to disk if that would cause repair failures
  xfs: split the sunit parameter update into two parts
  xfs: refactor agfl length computation function
  xfs: use bitops interface for buf log item AIL flag check
  xfs: stabilize insert range start boundary to avoid COW writeback race
  xfs: fix some memory leaks in log recovery
  xfs: always log corruption errors
  xfs: constify the buffer pointer arguments to error functions
  xfs: convert EIO to EFSCORRUPTED when log contents are invalid
  xfs: Fix deadlock between AGI and AGF when target_ip exists in xfs_rename()
  xfs: attach dquots and reserve quota blocks during unwritten conversion
  xfs: range check ri_cnt when recovering log items
  xfs: add missing assert in xfs_fsmap_owner_from_rmap
  xfs: slightly tweak an assert in xfs_fs_map_blocks
  xfs: replace -EIO with -EFSCORRUPTED for corrupt metadata
  ext4: fix bug in extents parsing when eh_entries == 0 and eh_depth > 0
  workqueue: don't skip lockdep work dependency in cancel_work_sync()
  drm/rockchip: Fix return type of cdn_dp_connector_mode_valid
  drm/amd/display: Limit user regamma to a valid value
  drm/amdgpu: use dirty framebuffer helper
  Drivers: hv: Never allocate anything besides framebuffer from framebuffer memory region
  cifs: always initialize struct msghdr smb_msg completely
  usb: xhci-mtk: fix issue of out-of-bounds array access
  s390/dasd: fix Oops in dasd_alias_get_start_dev due to missing pavgroup
  serial: tegra-tcu: Use uart_xmit_advance(), fixes icount.tx accounting
  serial: tegra: Use uart_xmit_advance(), fixes icount.tx accounting
  serial: Create uart_xmit_advance()
  net: sched: fix possible refcount leak in tc_new_tfilter()
  net: sunhme: Fix packet reception for len < RX_COPY_THRESHOLD
  perf kcore_copy: Do not check /proc/modules is unchanged
  perf jit: Include program header in ELF files
  can: gs_usb: gs_can_open(): fix race dev->can.state condition
  netfilter: ebtables: fix memory leak when blob is malformed
  net/sched: taprio: make qdisc_leaf() see the per-netdev-queue pfifo child qdiscs
  net/sched: taprio: avoid disabling offload when it was never enabled
  of: mdio: Add of_node_put() when breaking out of for_each_xx
  i40e: Fix set max_tx_rate when it is lower than 1 Mbps
  i40e: Fix VF set max MTU size
  iavf: Fix set max MTU size with port VLAN and jumbo frames
  iavf: Fix bad page state
  MIPS: Loongson32: Fix PHY-mode being left unspecified
  MIPS: lantiq: export clk_get_io() for lantiq_wdt.ko
  net: team: Unsync device addresses on ndo_stop
  ipvlan: Fix out-of-bound bugs caused by unset skb->mac_header
  iavf: Fix cached head and tail value for iavf_get_tx_pending
  netfilter: nfnetlink_osf: fix possible bogus match in nf_osf_find()
  netfilter: nf_conntrack_irc: Tighten matching on DCC message
  netfilter: nf_conntrack_sip: fix ct_sip_walk_headers
  arm64: dts: rockchip: Remove 'enable-active-low' from rk3399-puma
  arm64: dts: rockchip: Set RK3399-Gru PCLK_EDP to 24 MHz
  arm64: dts: rockchip: Pull up wlan wake# on Gru-Bob
  mm/slub: fix to return errno if kmalloc() fails
  efi: libstub: check Shim mode using MokSBStateRT
  ALSA: hda/realtek: Enable 4-speaker output Dell Precision 5530 laptop
  ALSA: hda/realtek: Add quirk for ASUS GA503R laptop
  ALSA: hda/realtek: Add pincfg for ASUS G533Z HP jack
  ALSA: hda/realtek: Add pincfg for ASUS G513 HP jack
  ALSA: hda/realtek: Re-arrange quirk table entries
  ALSA: hda/realtek: Add quirk for Huawei WRT-WX9
  ALSA: hda: add Intel 5 Series / 3400 PCI DID
  ALSA: hda/tegra: set depop delay for tegra
  USB: serial: option: add Quectel RM520N
  USB: serial: option: add Quectel BG95 0x0203 composition
  USB: core: Fix RST error in hub.c
  Revert "usb: gadget: udc-xilinx: replace memcpy with memcpy_toio"
  Revert "usb: add quirks for Lenovo OneLink+ Dock"
  usb: cdns3: fix issue with rearming ISO OUT endpoint
  usb: gadget: udc-xilinx: replace memcpy with memcpy_toio
  usb: add quirks for Lenovo OneLink+ Dock
  tty: serial: atmel: Preserve previous USART mode if RS485 disabled
  serial: atmel: remove redundant assignment in rs485_config
  tty/serial: atmel: RS485 & ISO7816: wait for TXRDY before sending data
  wifi: mac80211: Fix UAF in ieee80211_scan_rx()
  usb: xhci-mtk: relax TT periodic bandwidth allocation
  usb: xhci-mtk: allow multiple Start-Split in a microframe
  usb: xhci-mtk: add some schedule error number
  usb: xhci-mtk: add a function to (un)load bandwidth info
  usb: xhci-mtk: use @sch_tt to check whether need do TT schedule
  usb: xhci-mtk: add only one extra CS for FS/LS INTR
  usb: xhci-mtk: get the microframe boundary for ESIT
  usb: dwc3: gadget: Avoid duplicate requests to enable Run/Stop
  usb: dwc3: gadget: Don't modify GEVNTCOUNT in pullup()
  usb: dwc3: gadget: Refactor pullup()
  usb: dwc3: gadget: Prevent repeat pullup()
  usb: dwc3: Issue core soft reset before enabling run/stop
  usb: dwc3: gadget: Avoid starting DWC3 gadget during UDC unbind
  ALSA: hda/sigmatel: Fix unused variable warning for beep power change
  cgroup: Add missing cpus_read_lock() to cgroup_attach_task_all()
  video: fbdev: pxa3xx-gcu: Fix integer overflow in pxa3xx_gcu_write
  mksysmap: Fix the mismatch of 'L0' symbols in System.map
  MIPS: OCTEON: irq: Fix octeon_irq_force_ciu_mapping()
  afs: Return -EAGAIN, not -EREMOTEIO, when a file already locked
  net: usb: qmi_wwan: add Quectel RM520N
  ALSA: hda/tegra: Align BDL entry to 4KB boundary
  ALSA: hda/sigmatel: Keep power up while beep is enabled
  rxrpc: Fix calc of resend age
  rxrpc: Fix local destruction being repeated
  regulator: pfuze100: Fix the global-out-of-bounds access in pfuze100_regulator_probe()
  ASoC: nau8824: Fix semaphore unbalance at error paths
  iomap: iomap that extends beyond EOF should be marked dirty
  MAINTAINERS: add Chandan as xfs maintainer for 5.4.y
  cifs: don't send down the destination address to sendmsg for a SOCK_STREAM
  cifs: revalidate mapping when doing direct writes
  tracing: hold caller_addr to hardirq_{enable,disable}_ip
  task_stack, x86/cea: Force-inline stack helpers
  ALSA: pcm: oss: Fix race at SNDCTL_DSP_SYNC
  parisc: ccio-dma: Add missing iounmap in error path in ccio_probe()
  drm/meson: Fix OSD1 RGB to YCbCr coefficient
  drm/meson: Correct OSD1 global alpha value
  gpio: mpc8xxx: Fix support for IRQ_TYPE_LEVEL_LOW flow_type in mpc85xx
  NFSv4: Turn off open-by-filehandle and NFS re-export for NFSv4.0
  of: fdt: fix off-by-one error in unflatten_dt_nodes()
  Revert "USB: core: Prevent nested device-reset calls"
  Revert "io_uring: disable polling pollfree files"
  Revert "netfilter: conntrack: NF_CONNTRACK_PROCFS should no longer default to y"
  Revert "sched/deadline: Fix priority inheritance with multiple scheduling classes"
  Revert "kernel/sched: Remove dl_boosted flag comment"
  Revert "mm/rmap: Fix anon_vma->degree ambiguity leading to double-reuse"
  Revert "fs: check FMODE_LSEEK to control internal pipe splicing"
  Linux 5.4.214
  tracefs: Only clobber mode/uid/gid on remount if asked
  soc: fsl: select FSL_GUTS driver for DPIO
  net: dp83822: disable rx error interrupt
  mm: Fix TLB flush for not-first PFNMAP mappings in unmap_region()
  usb: storage: Add ASUS <0x0b05:0x1932> to IGNORE_UAS
  platform/x86: acer-wmi: Acer Aspire One AOD270/Packard Bell Dot keymap fixes
  perf/arm_pmu_platform: fix tests for platform_get_irq() failure
  nvmet-tcp: fix unhandled tcp states in nvmet_tcp_state_change()
  Input: iforce - add support for Boeder Force Feedback Wheel
  ieee802154: cc2520: add rc code in cc2520_tx()
  tg3: Disable tg3 device on system reboot to avoid triggering AER
  hid: intel-ish-hid: ishtp: Fix ishtp client sending disordered message
  HID: ishtp-hid-clientHID: ishtp-hid-client: Fix comment typo
  drm/msm/rd: Fix FIFO-full deadlock
  Linux 5.4.213
  MIPS: loongson32: ls1c: Fix hang during startup
  x86/nospec: Fix i386 RSB stuffing
  sch_sfb: Also store skb len before calling child enqueue
  tcp: fix early ETIMEDOUT after spurious non-SACK RTO
  nvme-tcp: fix UAF when detecting digest errors
  RDMA/mlx5: Set local port to one when accessing counters
  ipv6: sr: fix out-of-bounds read when setting HMAC data.
  RDMA/siw: Pass a pointer to virt_to_page()
  i40e: Fix kernel crash during module removal
  tipc: fix shift wrapping bug in map_get()
  sch_sfb: Don't assume the skb is still around after enqueueing to child
  afs: Use the operation issue time instead of the reply time for callbacks
  rxrpc: Fix an insufficiently large sglist in rxkad_verify_packet_2()
  netfilter: nf_conntrack_irc: Fix forged IP logic
  netfilter: br_netfilter: Drop dst references before setting.
  RDMA/hns: Fix supported page size
  soc: brcmstb: pm-arm: Fix refcount leak and __iomem leak bugs
  RDMA/cma: Fix arguments order in net device validation
  regulator: core: Clean up on enable failure
  ARM: dts: imx6qdl-kontron-samx6i: remove duplicated node
  smb3: missing inode locks in punch hole
  cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock
  cgroup: Elide write-locking threadgroup_rwsem when updating csses on an empty subtree
  cgroup: Optimize single thread migration
  scsi: lpfc: Add missing destroy_workqueue() in error path
  scsi: mpt3sas: Fix use-after-free warning
  nvmet: fix a use-after-free
  debugfs: add debugfs_lookup_and_remove()
  kprobes: Prohibit probes in gate area
  ALSA: usb-audio: Fix an out-of-bounds bug in __snd_usb_parse_audio_interface()
  ALSA: aloop: Fix random zeros in capture data when using jiffies timer
  ALSA: emu10k1: Fix out of bounds access in snd_emu10k1_pcm_channel_alloc()
  drm/amdgpu: mmVM_L2_CNTL3 register not initialized correctly
  fbdev: chipsfb: Add missing pci_disable_device() in chipsfb_pci_init()
  arm64: cacheinfo: Fix incorrect assignment of signed error value to unsigned fw_level
  parisc: Add runtime check to prevent PA2.0 kernels on PA1.x machines
  parisc: ccio-dma: Handle kmalloc failure in ccio_init_resources()
  drm/radeon: add a force flush to delay work when radeon
  drm/amdgpu: Check num_gfx_rings for gfx v9_0 rb setup.
  drm/gem: Fix GEM handle release errors
  scsi: megaraid_sas: Fix double kfree()
  USB: serial: ch341: fix disabled rx timer on older devices
  USB: serial: ch341: fix lost character on LCR updates
  usb: dwc3: disable USB core PHY management
  usb: dwc3: fix PHY disable sequence
  btrfs: harden identification of a stale device
  drm/i915/glk: ECS Liva Q2 needs GLK HDMI port timing quirk
  ALSA: seq: Fix data-race at module auto-loading
  ALSA: seq: oss: Fix data-race for max_midi_devs access
  net: mac802154: Fix a condition in the receive path
  ip: fix triggering of 'icmp redirect'
  wifi: mac80211: Don't finalize CSA in IBSS mode if state is disconnected
  driver core: Don't probe devices after bus_type.match() probe deferral
  usb: gadget: mass_storage: Fix cdrom data transfers on MAC-OS
  USB: core: Prevent nested device-reset calls
  s390: fix nospec table alignments
  s390/hugetlb: fix prepare_hugepage_range() check for 2 GB hugepages
  usb-storage: Add ignore-residue quirk for NXP PN7462AU
  USB: cdc-acm: Add Icom PMR F3400 support (0c26:0020)
  usb: dwc2: fix wrong order of phy_power_on and phy_init
  usb: typec: altmodes/displayport: correct pin assignment for UFP receptacles
  USB: serial: option: add support for Cinterion MV32-WA/WB RmNet mode
  USB: serial: option: add Quectel EM060K modem
  USB: serial: option: add support for OPPO R11 diag port
  USB: serial: cp210x: add Decagon UCA device id
  xhci: Add grace period after xHC start to prevent premature runtime suspend.
  thunderbolt: Use the actual buffer in tb_async_error()
  gpio: pca953x: Add mutex_lock for regcache sync in PM
  hwmon: (gpio-fan) Fix array out of bounds access
  clk: bcm: rpi: Fix error handling of raspberrypi_fw_get_rate
  Input: rk805-pwrkey - fix module autoloading
  clk: core: Fix runtime PM sequence in clk_core_unprepare()
  Revert "clk: core: Honor CLK_OPS_PARENT_ENABLE for clk gate ops"
  clk: core: Honor CLK_OPS_PARENT_ENABLE for clk gate ops
  drm/i915/reg: Fix spelling mistake "Unsupport" -> "Unsupported"
  usb: dwc3: qcom: fix use-after-free on runtime-PM wakeup
  binder: fix UAF of ref->proc caused by race condition
  USB: serial: ftdi_sio: add Omron CS1W-CIF31 device id
  misc: fastrpc: fix memory corruption on open
  misc: fastrpc: fix memory corruption on probe
  iio: adc: mcp3911: use correct formula for AD conversion
  Input: iforce - wake up after clearing IFORCE_XMIT_RUNNING flag
  tty: serial: lpuart: disable flow control while waiting for the transmit engine to complete
  vt: Clear selection before changing the font
  powerpc: align syscall table for ppc32
  staging: rtl8712: fix use after free bugs
  serial: fsl_lpuart: RS485 RTS polariy is inverse
  net/smc: Remove redundant refcount increase
  Revert "sch_cake: Return __NET_XMIT_STOLEN when consuming enqueued skb"
  tcp: annotate data-race around challenge_timestamp
  sch_cake: Return __NET_XMIT_STOLEN when consuming enqueued skb
  kcm: fix strp_init() order and cleanup
  ethernet: rocker: fix sleep in atomic context bug in neigh_timer_handler
  net: sched: tbf: don't call qdisc_put() while holding tree lock
  Revert "xhci: turn off port power in shutdown"
  wifi: cfg80211: debugfs: fix return type in ht40allow_map_read()
  ieee802154/adf7242: defer destroy_workqueue call
  iio: adc: mcp3911: make use of the sign bit
  platform/x86: pmc_atom: Fix SLP_TYPx bitfield mask
  drm/msm/dsi: Fix number of regulators for msm8996_dsi_cfg
  drm/msm/dsi: fix the inconsistent indenting
  net: dp83822: disable false carrier interrupt
  Revert "mm: kmemleak: take a full lowmem check in kmemleak_*_phys()"
  fs: only do a memory barrier for the first set_buffer_uptodate()
  net: mvpp2: debugfs: fix memory leak when using debugfs_lookup()
  wifi: iwlegacy: 4965: corrected fix for potential off-by-one overflow in il4965_rs_fill_link_cmd()
  efi: capsule-loader: Fix use-after-free in efi_capsule_write
  Linux 5.4.212
  net: neigh: don't call kfree_skb() under spin_lock_irqsave()
  net/af_packet: check len when min_header_len equals to 0
  io_uring: disable polling pollfree files
  kprobes: don't call disarm_kprobe() for disabled kprobes
  lib/vdso: Mark do_hres() and do_coarse() as __always_inline
  lib/vdso: Let do_coarse() return 0 to simplify the callsite
  btrfs: tree-checker: check for overlapping extent items
  netfilter: conntrack: NF_CONNTRACK_PROCFS should no longer default to y
  drm/amd/display: Fix pixel clock programming
  s390/hypfs: avoid error message under KVM
  neigh: fix possible DoS due to net iface start/stop loop
  drm/amd/display: clear optc underflow before turn off odm clock
  drm/amd/display: Avoid MPC infinite loop
  btrfs: unify lookup return value when dir entry is missing
  btrfs: do not pin logs too early during renames
  btrfs: introduce btrfs_lookup_match_dir
  mm/rmap: Fix anon_vma->degree ambiguity leading to double-reuse
  bpf: Don't redirect packets with invalid pkt_len
  ftrace: Fix NULL pointer dereference in is_ftrace_trampoline when ftrace is dead
  fbdev: fb_pm2fb: Avoid potential divide by zero error
  HID: hidraw: fix memory leak in hidraw_release()
  media: pvrusb2: fix memory leak in pvr_probe
  udmabuf: Set the DMA mask for the udmabuf device (v2)
  HID: steam: Prevent NULL pointer dereference in steam_{recv,send}_report
  Bluetooth: L2CAP: Fix build errors in some archs
  kbuild: Fix include path in scripts/Makefile.modpost
  x86/bugs: Add "unknown" reporting for MMIO Stale Data
  s390/mm: do not trigger write fault when vma does not allow VM_WRITE
  mm: Force TLB flush for PFNMAP mappings before unlink_file_vma()
  scsi: storvsc: Remove WQ_MEM_RECLAIM from storvsc_error_wq
  perf/x86/intel/uncore: Fix broken read_counter() for SNB IMC PMU
  md: call __md_stop_writes in md_stop
  mm/hugetlb: fix hugetlb not supporting softdirty tracking
  ACPI: processor: Remove freq Qos request for all CPUs
  s390: fix double free of GS and RI CBs on fork() failure
  asm-generic: sections: refactor memory_intersects
  loop: Check for overflow while configuring loop
  x86/unwind/orc: Unwind ftrace trampolines with correct ORC entry
  btrfs: check if root is readonly while setting security xattr
  btrfs: add info when mount fails due to stale replace target
  btrfs: replace: drop assert for suspended replace
  btrfs: fix silent failure when deleting root reference
  ixgbe: stop resetting SYSTIME in ixgbe_ptp_start_cyclecounter
  net: Fix a data-race around sysctl_somaxconn.
  net: Fix a data-race around netdev_budget_usecs.
  net: Fix a data-race around netdev_budget.
  net: Fix a data-race around sysctl_net_busy_read.
  net: Fix a data-race around sysctl_net_busy_poll.
  net: Fix a data-race around sysctl_tstamp_allow_data.
  ratelimit: Fix data-races in ___ratelimit().
  net: Fix data-races around netdev_tstamp_prequeue.
  net: Fix data-races around weight_p and dev_weight_[rt]x_bias.
  netfilter: nft_tunnel: restrict it to netdev family
  netfilter: nft_osf: restrict osf to ipv4, ipv6 and inet families
  netfilter: nft_payload: do not truncate csum_offset and csum_type
  netfilter: nft_payload: report ERANGE for too long offset and length
  bnxt_en: fix NQ resource accounting during vf creation on 57500 chips
  netfilter: ebtables: reject blobs that don't provide all entry points
  net: ipvtap - add __init/__exit annotations to module init/exit funcs
  bonding: 802.3ad: fix no transmission of LACPDUs
  net: moxa: get rid of asymmetry in DMA mapping/unmapping
  net/mlx5e: Properly disable vlan strip on non-UL reps
  rose: check NULL rose_loopback_neigh->loopback
  SUNRPC: RPC level errors should set task->tk_rpc_status
  af_key: Do not call xfrm_probe_algs in parallel
  xfrm: fix refcount leak in __xfrm_policy_check()
  kernel/sched: Remove dl_boosted flag comment
  sched/deadline: Fix priority inheritance with multiple scheduling classes
  sched/deadline: Fix stale throttling on de-/boosted tasks
  sched/deadline: Unthrottle PI boosted threads while enqueuing
  pinctrl: amd: Don't save/restore interrupt status and wake status bits
  Revert "selftests/bpf: Fix test_align verifier log patterns"
  Revert "selftests/bpf: Fix "dubious pointer arithmetic" test"
  usb: cdns3: Fix issue for clear halt endpoint
  kernel/sys_ni: add compat entry for fadvise64_64
  parisc: Fix exception handler for fldw and fstw instructions
  audit: fix potential double free on error path from fsnotify_add_inode_mark
  Revert "USB: HCD: Fix URB giveback issue in tasklet function"
  Linux 5.4.211
  btrfs: raid56: don't trust any cached sector in __raid56_parity_recover()
  btrfs: only write the sectors in the vertical stripe which has data stripes
  can: j1939: j1939_session_destroy(): fix memory leak of skbs
  can: j1939: j1939_sk_queue_activate_next_locked(): replace WARN_ON_ONCE with netdev_warn_once()
  tracing/probes: Have kprobes and uprobes use $COMM too
  MIPS: tlbex: Explicitly compare _PAGE_NO_EXEC against 0
  video: fbdev: i740fb: Check the argument of i740_calc_vclk()
  powerpc/64: Init jump labels before parse_early_param()
  smb3: check xattr value length earlier
  f2fs: fix to avoid use f2fs_bug_on() in f2fs_new_node_page()
  ALSA: timer: Use deferred fasync helper
  ALSA: core: Add async signal helpers
  powerpc/32: Don't always pass -mcpu=powerpc to the compiler
  watchdog: export lockup_detector_reconfigure
  RISC-V: Add fast call path of crash_kexec()
  riscv: mmap with PROT_WRITE but no PROT_READ is invalid
  mips: cavium-octeon: Fix missing of_node_put() in octeon2_usb_clocks_start
  vfio: Clear the caps->buf to NULL after free
  tty: serial: Fix refcount leak bug in ucc_uart.c
  lib/list_debug.c: Detect uninitialized lists
  ext4: avoid resizing to a partial cluster size
  ext4: avoid remove directory when directory is corrupted
  drivers:md:fix a potential use-after-free bug
  nvmet-tcp: fix lockdep complaint on nvmet_tcp_wq flush during queue teardown
  dmaengine: sprd: Cleanup in .remove() after pm_runtime_get_sync() failed
  selftests/kprobe: Do not test for GRP/ without event failures
  um: add "noreboot" command line option for PANIC_TIMEOUT=-1 setups
  PCI/ACPI: Guard ARM64-specific mcfg_quirks
  cxl: Fix a memory leak in an error handling path
  gadgetfs: ep_io - wait until IRQ finishes
  scsi: lpfc: Prevent buffer overflow crashes in debugfs with malformed user input
  clk: qcom: ipq8074: dont disable gcc_sleep_clk_src
  vboxguest: Do not use devm for irq
  usb: renesas: Fix refcount leak bug
  usb: host: ohci-ppc-of: Fix refcount leak bug
  drm/meson: Fix overflow implicit truncation warnings
  irqchip/tegra: Fix overflow implicit truncation warnings
  usb: gadget: uvc: call uvc uvcg_warn on completed status instead of uvcg_info
  usb: cdns3 fix use-after-free at workaround 2
  PCI: Add ACS quirk for Broadcom BCM5750x NICs
  drm/meson: Fix refcount bugs in meson_vpu_has_available_connectors()
  locking/atomic: Make test_and_*_bit() ordered on failure
  gcc-plugins: Undefine LATENT_ENTROPY_PLUGIN when plugin disabled for a file
  igb: Add lock to avoid data race
  fec: Fix timer capture timing in `fec_ptp_enable_pps()`
  i40e: Fix to stop tx_timeout recovery if GLOBR fails
  ice: Ignore EEXIST when setting promisc mode
  net: dsa: microchip: ksz9477: fix fdb_dump last invalid entry
  net: moxa: pass pdev instead of ndev to DMA functions
  net: dsa: mv88e6060: prevent crash on an unused port
  powerpc/pci: Fix get_phb_number() locking
  netfilter: nf_tables: really skip inactive sets when allocating name
  clk: rockchip: add sclk_mac_lbtest to rk3188_critical_clocks
  iavf: Fix adminq error handling
  nios2: add force_successful_syscall_return()
  nios2: restarts apply only to the first sigframe we build...
  nios2: fix syscall restart checks
  nios2: traced syscall does need to check the syscall number
  nios2: don't leave NULLs in sys_call_table[]
  nios2: page fault et.al. are *not* restartable syscalls...
  tee: add overflow check in register_shm_helper()
  dpaa2-eth: trace the allocated address instead of page struct
  atm: idt77252: fix use-after-free bugs caused by tst_timer
  xen/xenbus: fix return type in xenbus_file_read()
  nfp: ethtool: fix the display error of `ethtool -m DEVNAME`
  NTB: ntb_tool: uninitialized heap data in tool_fn_write()
  tools build: Switch to new openssl API for test-libcrypto
  tools/vm/slabinfo: use alphabetic order when two values are equal
  dt-bindings: arm: qcom: fix MSM8916 MTP compatibles
  vsock: Set socket state back to SS_UNCONNECTED in vsock_connect_timeout()
  vsock: Fix memory leak in vsock_connect()
  plip: avoid rcu debug splat
  geneve: do not use RT_TOS for IPv6 flowlabel
  ACPI: property: Return type of acpi_add_nondev_subnodes() should be bool
  pinctrl: sunxi: Add I/O bias setting for H6 R-PIO
  pinctrl: qcom: msm8916: Allow CAMSS GP clocks to be muxed
  pinctrl: nomadik: Fix refcount leak in nmk_pinctrl_dt_subnode_to_map
  net: bgmac: Fix a BUG triggered by wrong bytes_compl
  devlink: Fix use-after-free after a failed reload
  SUNRPC: Reinitialise the backchannel request buffers before reuse
  sunrpc: fix expiry of auth creds
  can: mcp251x: Fix race condition on receive interrupt
  NFSv4/pnfs: Fix a use-after-free bug in open
  NFSv4.1: RECLAIM_COMPLETE must handle EACCES
  NFSv4: Fix races in the legacy idmapper upcall
  NFSv4.1: Handle NFS4ERR_DELAY replies to OP_SEQUENCE correctly
  NFSv4.1: Don't decrease the value of seq_nr_highest_sent
  Documentation: ACPI: EINJ: Fix obsolete example
  apparmor: Fix memleak in aa_simple_write_to_buffer()
  apparmor: fix reference count leak in aa_pivotroot()
  apparmor: fix overlapping attachment computation
  apparmor: fix aa_label_asxprint return check
  apparmor: Fix failed mount permission check error message
  apparmor: fix absroot causing audited secids to begin with =
  apparmor: fix quiet_denied for file rules
  can: ems_usb: fix clang's -Wunaligned-access warning
  tracing: Have filter accept "common_cpu" to be consistent
  btrfs: fix lost error handling when looking up extended ref on log replay
  mmc: pxamci: Fix an error handling path in pxamci_probe()
  mmc: pxamci: Fix another error handling path in pxamci_probe()
  ata: libata-eh: Add missing command name
  rds: add missing barrier to release_refill
  ALSA: info: Fix llseek return value when using callback
  net_sched: cls_route: disallow handle of 0
  net/9p: Initialize the iounit field during fid creation
  Bluetooth: L2CAP: Fix l2cap_global_chan_by_psm regression
  Revert "net: usb: ax88179_178a needs FLAG_SEND_ZLP"
  scsi: sg: Allow waiting for commands to complete on removed device
  tcp: fix over estimation in sk_forced_mem_schedule()
  KVM: x86: Avoid theoretical NULL pointer dereference in kvm_irq_delivery_to_apic_fast()
  KVM: x86: Check lapic_in_kernel() before attempting to set a SynIC irq
  KVM: Add infrastructure and macro to mark VM as bugged
  btrfs: reject log replay if there is unsupported RO compat flag
  net_sched: cls_route: remove from list when handle is 0
  iommu/vt-d: avoid invalid memory access via node_online(NUMA_NO_NODE)
  firmware: arm_scpi: Ensure scpi_info is not assigned if the probe fails
  timekeeping: contribute wall clock to rng on time change
  ACPI: CPPC: Do not prevent CPPC from working in the future
  dm writecache: set a default MAX_WRITEBACK_JOBS
  dm thin: fix use-after-free crash in dm_sm_register_threshold_callback
  dm raid: fix address sanitizer warning in raid_status
  dm raid: fix address sanitizer warning in raid_resume
  intel_th: pci: Add Meteor Lake-P support
  intel_th: pci: Add Raptor Lake-S PCH support
  intel_th: pci: Add Raptor Lake-S CPU support
  ext4: correct the misjudgment in ext4_iget_extra_inode
  ext4: correct max_inline_xattr_value_size computing
  ext4: fix extent status tree race in writeback error recovery path
  ext4: update s_overhead_clusters in the superblock during an on-line resize
  ext4: fix use-after-free in ext4_xattr_set_entry
  ext4: make sure ext4_append() always allocates new block
  ext4: add EXT4_INODE_HAS_XATTR_SPACE macro in xattr.h
  btrfs: reset block group chunk force if we have to wait
  tpm: eventlog: Fix section mismatch for DEBUG_SECTION_MISMATCH
  kexec, KEYS, s390: Make use of built-in and secondary keyring for signature verification
  spmi: trace: fix stack-out-of-bound access in SPMI tracing functions
  x86/olpc: fix 'logical not is only applied to the left hand side'
  scsi: qla2xxx: Fix erroneous mailbox timeout after PCI error injection
  scsi: qla2xxx: Turn off multi-queue for 8G adapters
  scsi: qla2xxx: Fix discovery issues in FC-AL topology
  scsi: zfcp: Fix missing auto port scan and thus missing target ports
  video: fbdev: s3fb: Check the size of screen before memset_io()
  video: fbdev: arkfb: Check the size of screen before memset_io()
  video: fbdev: vt8623fb: Check the size of screen before memset_io()
  tools/thermal: Fix possible path truncations
  video: fbdev: arkfb: Fix a divide-by-zero bug in ark_set_pixclock()
  x86/numa: Use cpumask_available instead of hardcoded NULL check
  scripts/faddr2line: Fix vmlinux detection on arm64
  genelf: Use HAVE_LIBCRYPTO_SUPPORT, not the never defined HAVE_LIBCRYPTO
  powerpc/pci: Fix PHB numbering when using opal-phbid
  kprobes: Forbid probing on trampoline and BPF code areas
  perf symbol: Fail to read phdr workaround
  powerpc/cell/axon_msi: Fix refcount leak in setup_msi_msg_address
  powerpc/xive: Fix refcount leak in xive_get_max_prio
  powerpc/spufs: Fix refcount leak in spufs_init_isolated_loader
  powerpc/pci: Prefer PCI domain assignment via DT 'linux,pci-domain' and alias
  powerpc/32: Do not allow selection of e5500 or e6500 CPUs on PPC32
  video: fbdev: sis: fix typos in SiS_GetModeID()
  video: fbdev: amba-clcd: Fix refcount leak bugs
  watchdog: armada_37xx_wdt: check the return value of devm_ioremap() in armada_37xx_wdt_probe()
  ASoC: audio-graph-card: Add of_node_put() in fail path
  fuse: Remove the control interface for virtio-fs
  ASoC: qcom: q6dsp: Fix an off-by-one in q6adm_alloc_copp()
  s390/zcore: fix race when reading from hardware system area
  iommu/arm-smmu: qcom_iommu: Add of_node_put() when breaking out of loop
  mfd: max77620: Fix refcount leak in max77620_initialise_fps
  mfd: t7l66xb: Drop platform disable callback
  kfifo: fix kfifo_to_user() return type
  rpmsg: qcom_smd: Fix refcount leak in qcom_smd_parse_edge
  iommu/exynos: Handle failed IOMMU device registration properly
  tty: n_gsm: fix missing corner cases in gsmld_poll()
  tty: n_gsm: fix DM command
  tty: n_gsm: fix wrong T1 retry count handling
  vfio/ccw: Do not change FSM state in subchannel event
  remoteproc: qcom: wcnss: Fix handling of IRQs
  tty: n_gsm: fix race condition in gsmld_write()
  tty: n_gsm: fix packet re-transmission without open control channel
  tty: n_gsm: fix non flow control frames during mux flow off
  profiling: fix shift too large makes kernel panic
  ASoC: codecs: wcd9335: move gains from SX_TLV to S8_TLV
  ASoC: codecs: msm8916-wcd-digital: move gains from SX_TLV to S8_TLV
  serial: 8250_dw: Store LSR into lsr_saved_flags in dw8250_tx_wait_empty()
  ASoC: mediatek: mt8173-rt5650: Fix refcount leak in mt8173_rt5650_dev_probe
  ASoC: codecs: da7210: add check for i2c_add_driver
  ASoC: mt6797-mt6351: Fix refcount leak in mt6797_mt6351_dev_probe
  ASoC: mediatek: mt8173: Fix refcount leak in mt8173_rt5650_rt5676_dev_probe
  opp: Fix error check in dev_pm_opp_attach_genpd()
  jbd2: fix assertion 'jh->b_frozen_data == NULL' failure when journal aborted
  ext4: recover csum seed of tmp_inode after migrating to extents
  jbd2: fix outstanding credits assert in jbd2_journal_commit_transaction()
  null_blk: fix ida error handling in null_add_dev()
  RDMA/rxe: Fix error unwind in rxe_create_qp()
  mm/mmap.c: fix missing call to vm_unacct_memory in mmap_region
  platform/olpc: Fix uninitialized data in debugfs write
  USB: serial: fix tty-port initialized comments
  PCI: tegra194: Fix link up retry sequence
  PCI: tegra194: Fix Root Port interrupt handling
  HID: alps: Declare U1_UNICORN_LEGACY support
  mmc: cavium-thunderx: Add of_node_put() when breaking out of loop
  mmc: cavium-octeon: Add of_node_put() when breaking out of loop
  gpio: gpiolib-of: Fix refcount bugs in of_mm_gpiochip_add_data()
  RDMA/hfi1: fix potential memory leak in setup_base_ctxt()
  RDMA/siw: Fix duplicated reported IW_CM_EVENT_CONNECT_REPLY event
  RDMA/hns: Fix incorrect clearing of interrupt status register
  usb: gadget: udc: amd5536 depends on HAS_DMA
  scsi: smartpqi: Fix DMA direction for RAID requests
  mmc: sdhci-of-at91: fix set_uhs_signaling rewriting of MC1R
  memstick/ms_block: Fix a memory leak
  memstick/ms_block: Fix some incorrect memory allocation
  mmc: sdhci-of-esdhc: Fix refcount leak in esdhc_signal_voltage_switch
  staging: rtl8192u: Fix sleep in atomic context bug in dm_fsync_timer_callback
  intel_th: msu: Fix vmalloced buffers
  intel_th: msu-sink: Potential dereference of null pointer
  intel_th: Fix a resource leak in an error handling path
  soundwire: bus_type: fix remove and shutdown support
  clk: qcom: camcc-sdm845: Fix topology around titan_top power domain
  clk: qcom: ipq8074: set BRANCH_HALT_DELAY flag for UBI clocks
  clk: qcom: ipq8074: fix NSS port frequency tables
  usb: host: xhci: use snprintf() in xhci_decode_trb()
  clk: qcom: clk-krait: unlock spin after mux completion
  driver core: fix potential deadlock in __driver_attach
  misc: rtsx: Fix an error handling path in rtsx_pci_probe()
  clk: mediatek: reset: Fix written reset bit offset
  usb: xhci: tegra: Fix error check
  usb: ohci-nxp: Fix refcount leak in ohci_hcd_nxp_probe
  usb: host: Fix refcount leak in ehci_hcd_ppc_of_probe
  fpga: altera-pr-ip: fix unsigned comparison with less than zero
  mtd: st_spi_fsm: Add a clk_disable_unprepare() in .probe()'s error path
  mtd: partitions: Fix refcount leak in parse_redboot_of
  mtd: sm_ftl: Fix deadlock caused by cancel_work_sync in sm_release
  HID: cp2112: prevent a buffer overflow in cp2112_xfer()
  mtd: rawnand: meson: Fix a potential double free issue
  mtd: maps: Fix refcount leak in ap_flash_init
  mtd: maps: Fix refcount leak in of_flash_probe_versatile
  clk: renesas: r9a06g032: Fix UART clkgrp bitsel
  dccp: put dccp_qpolicy_full() and dccp_qpolicy_push() in the same lock
  net: rose: fix netdev reference changes
  netdevsim: Avoid allocation warnings triggered from user space
  iavf: Fix max_rate limiting
  crypto: inside-secure - Add missing MODULE_DEVICE_TABLE for of
  net/mlx5e: Fix the value of MLX5E_MAX_RQ_NUM_MTTS
  wifi: libertas: Fix possible refcount leak in if_usb_probe()
  wifi: iwlwifi: mvm: fix double list_add at iwl_mvm_mac_wake_tx_queue
  wifi: wil6210: debugfs: fix uninitialized variable use in `wil_write_file_wmi()`
  i2c: mux-gpmux: Add of_node_put() when breaking out of loop
  i2c: cadence: Support PEC for SMBus block read
  Bluetooth: hci_intel: Add check for platform_driver_register
  can: pch_can: pch_can_error(): initialize errc before using it
  can: error: specify the values of data[5..7] of CAN error frames
  can: usb_8dev: do not report txerr and rxerr during bus-off
  can: kvaser_usb_leaf: do not report txerr and rxerr during bus-off
  can: kvaser_usb_hydra: do not report txerr and rxerr during bus-off
  can: sun4i_can: do not report txerr and rxerr during bus-off
  can: hi311x: do not report txerr and rxerr during bus-off
  can: sja1000: do not report txerr and rxerr during bus-off
  can: rcar_can: do not report txerr and rxerr during bus-off
  can: pch_can: do not report txerr and rxerr during bus-off
  selftests/bpf: fix a test for snprintf() overflow
  wifi: p54: add missing parentheses in p54_flush()
  wifi: p54: Fix an error handling path in p54spi_probe()
  wifi: wil6210: debugfs: fix info leak in wil_write_file_wmi()
  fs: check FMODE_LSEEK to control internal pipe splicing
  selftests: timers: clocksource-switch: fix passing errors from child
  selftests: timers: valid-adjtimex: build fix for newer toolchains
  libbpf: Fix the name of a reused map
  tcp: make retransmitted SKB fit into the send window
  drm/exynos/exynos7_drm_decon: free resources when clk_set_parent() failed.
  mediatek: mt76: mac80211: Fix missing of_node_put() in mt76_led_init()
  media: platform: mtk-mdp: Fix mdp_ipi_comm structure alignment
  crypto: hisilicon - Kunpeng916 crypto driver don't sleep when in softirq
  drm/msm/mdp5: Fix global state lock backoff
  drm: bridge: sii8620: fix possible off-by-one
  drm/mediatek: dpi: Only enable dpi after the bridge is enabled
  drm/mediatek: dpi: Remove output format of YUV
  drm/rockchip: Fix an error handling path rockchip_dp_probe()
  drm/rockchip: vop: Don't crash for invalid duplicate_state()
  crypto: arm64/gcm - Select AEAD for GHASH_ARM64_CE
  drm/vc4: dsi: Correct DSI divider calculations
  drm/vc4: plane: Fix margin calculations for the right/bottom edges
  drm/vc4: plane: Remove subpixel positioning check
  media: hdpvr: fix error value returns in hdpvr_read
  drm/mcde: Fix refcount leak in mcde_dsi_bind
  drm: bridge: adv7511: Add check for mipi_dsi_driver_register
  wifi: iwlegacy: 4965: fix potential off-by-one overflow in il4965_rs_fill_link_cmd()
  ath9k: fix use-after-free in ath9k_hif_usb_rx_cb
  media: tw686x: Register the irq at the end of probe
  i2c: Fix a potential use after free
  drm: adv7511: override i2c address of cec before accessing it
  drm/mediatek: Add pull-down MIPI operation in mtk_dsi_poweroff function
  drm/radeon: fix potential buffer overflow in ni_set_mc_special_registers()
  drm/mipi-dbi: align max_chunk to 2 in spi_transfer
  wifi: rtlwifi: fix error codes in rtl_debugfs_set_write_h2c()
  ath10k: do not enforce interrupt trigger type
  dm: return early from dm_pr_call() if DM device is suspended
  thermal/tools/tmon: Include pthread and time headers in tmon.h
  nohz/full, sched/rt: Fix missed tick-reenabling bug in dequeue_task_rt()
  regulator: of: Fix refcount leak bug in of_get_regulation_constraints()
  blk-mq: don't create hctx debugfs dir until q->debugfs_dir is created
  erofs: avoid consecutive detection for Highmem memory
  arm64: dts: mt7622: fix BPI-R64 WPS button
  bus: hisi_lpc: fix missing platform_device_put() in hisi_lpc_acpi_probe()
  ARM: dts: qcom: pm8841: add required thermal-sensor-cells
  soc: qcom: aoss: Fix refcount leak in qmp_cooling_devices_register
  cpufreq: zynq: Fix refcount leak in zynq_get_revision
  ARM: OMAP2+: Fix refcount leak in omap3xxx_prm_late_init
  ARM: OMAP2+: Fix refcount leak in omapdss_init_of
  ARM: dts: qcom: mdm9615: add missing PMIC GPIO reg
  soc: fsl: guts: machine variable might be unset
  ARM: dts: ast2600-evb: fix board compatible
  ARM: dts: ast2500-evb: fix board compatible
  x86/pmem: Fix platform-device leak in error path
  ARM: bcm: Fix refcount leak in bcm_kona_smc_init
  meson-mx-socinfo: Fix refcount leak in meson_mx_socinfo_init
  ARM: findbit: fix overflowing offset
  spi: spi-rspi: Fix PIO fallback on RZ platforms
  selinux: Add boundary check in put_entry()
  PM: hibernate: defer device probing when resuming from hibernation
  ARM: shmobile: rcar-gen2: Increase refcount for new reference
  arm64: dts: allwinner: a64: orangepi-win: Fix LED node name
  arm64: dts: qcom: ipq8074: fix NAND node name
  ACPI: LPSS: Fix missing check in register_device_clock()
  ACPI: PM: save NVS memory for Lenovo G40-45
  ACPI: EC: Remove duplicate ThinkPad X1 Carbon 6th entry from DMI quirks
  ARM: OMAP2+: display: Fix refcount leak bug
  spi: synquacer: Add missing clk_disable_unprepare()
  ARM: dts: imx6ul: fix qspi node compatible
  ARM: dts: imx6ul: fix lcdif node compatible
  ARM: dts: imx6ul: fix csi node compatible
  ARM: dts: imx6ul: change operating-points to uint32-matrix
  ARM: dts: imx6ul: add missing properties for sram
  wait: Fix __wait_event_hrtimeout for RT/DL tasks
  genirq: Don't return error on missing optional irq_request_resources()
  ext2: Add more validity checks for inode counts
  arm64: fix oops in concurrently setting insn_emulation sysctls
  arm64: Do not forget syscall when starting a new thread.
  x86: Handle idle=nomwait cmdline properly for x86_idle
  epoll: autoremove wakers even more aggressively
  netfilter: nf_tables: fix null deref due to zeroed list head
  netfilter: nf_tables: do not allow RULE_ID to refer to another chain
  netfilter: nf_tables: do not allow SET_ID to refer to another table
  arm64: dts: uniphier: Fix USB interrupts for PXs3 SoC
  ARM: dts: uniphier: Fix USB interrupts for PXs2 SoC
  USB: HCD: Fix URB giveback issue in tasklet function
  coresight: Clear the connection field properly
  MIPS: cpuinfo: Fix a warning for CONFIG_CPUMASK_OFFSTACK
  powerpc/powernv: Avoid crashing if rng is NULL
  powerpc/ptdump: Fix display of RW pages on FSL_BOOK3E
  powerpc/fsl-pci: Fix Class Code of PCIe Root Port
  PCI: Add defines for normal and subtractive PCI bridges
  ia64, processor: fix -Wincompatible-pointer-types in ia64_get_irr()
  md-raid10: fix KASAN warning
  serial: mvebu-uart: uart2 error bits clearing
  fuse: limit nsec
  iio: light: isl29028: Fix the warning in isl29028_remove()
  drm/amdgpu: Check BO's requested pinning domains against its preferred_domains
  drm/nouveau: fix another off-by-one in nvbios_addr
  drm/gem: Properly annotate WW context on drm_gem_lock_reservations() error
  parisc: io_pgetevents_time64() needs compat syscall in 32-bit compat mode
  parisc: Fix device names in /proc/iomem
  ovl: drop WARN_ON() dentry is NULL in ovl_encode_fh()
  usbnet: Fix linkwatch use-after-free on disconnect
  fbcon: Fix boundary checks for fbcon=vc:n1-n2 parameters
  thermal: sysfs: Fix cooling_device_stats_setup() error code path
  fs: Add missing umask strip in vfs_tmpfile
  vfs: Check the truncate maximum size in inode_newsize_ok()
  tty: vt: initialize unicode screen buffer
  ALSA: hda/realtek: Add quirk for another Asus K42JZ model
  ALSA: hda/cirrus - support for iMac 12,1 model
  ALSA: hda/conexant: Add quirk for LENOVO 20149 Notebook model
  mm/mremap: hold the rmap lock in write mode when moving page table entries.
  KVM: x86: Set error code to segment selector on LLDT/LTR non-canonical #GP
  KVM: x86: Mark TSS busy during LTR emulation _after_ all fault checks
  KVM: nVMX: Let userspace set nVMX MSR to any _host_ supported value
  KVM: SVM: Don't BUG if userspace injects an interrupt with GIF=0
  KVM: nVMX: Snapshot pre-VM-Enter DEBUGCTL for !nested_run_pending case
  KVM: nVMX: Snapshot pre-VM-Enter BNDCFGS for !nested_run_pending case
  HID: wacom: Don't register pad_input for touch switch
  HID: wacom: Only report rotation for art pen
  add barriers to buffer_uptodate and set_buffer_uptodate
  wifi: mac80211_hwsim: use 32-bit skb cookie
  wifi: mac80211_hwsim: add back erroneously removed cast
  wifi: mac80211_hwsim: fix race condition in pending packet
  igc: Remove _I_PHY_ID checking
  ALSA: bcd2000: Fix a UAF bug on the error path of probing
  scsi: Revert "scsi: qla2xxx: Fix disk failure to rediscover"
  x86: link vdso and boot with -z noexecstack --no-warn-rwx-segments
  Makefile: link with -z noexecstack --no-warn-rwx-segments

 Conflicts:
	Documentation/devicetree/bindings/arm/qcom.yaml
	Documentation/devicetree/bindings~HEAD
	arch/x86/boot/compressed/Makefile
	drivers/mmc/core/sd.c
	drivers/rpmsg/qcom_glink_native.c
	drivers/usb/dwc3/core.c
	drivers/usb/dwc3/gadget.c
	drivers/usb/typec/ucsi/ucsi.c
	net/core/dev.c
	net/netfilter/nf_conntrack_irc.c

Change-Id: I796398110bc61fa6a8bb94f7ef41b9209683dbf7
2022-12-17 02:26:00 +02:00

2837 lines
77 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Deadline Scheduling Class (SCHED_DEADLINE)
*
* Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
*
* Tasks that periodically executes their instances for less than their
* runtime won't miss any of their deadlines.
* Tasks that are not periodic or sporadic or that tries to execute more
* than their reserved bandwidth will be slowed down (and may potentially
* miss some of their deadlines), and won't affect any other task.
*
* Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
* Juri Lelli <juri.lelli@gmail.com>,
* Michael Trimarchi <michael@amarulasolutions.com>,
* Fabio Checconi <fchecconi@gmail.com>
*/
#include "sched.h"
#include "pelt.h"
#include "walt/walt.h"
struct dl_bandwidth def_dl_bandwidth;
static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
{
return container_of(dl_se, struct task_struct, dl);
}
static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
{
return container_of(dl_rq, struct rq, dl);
}
static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
{
struct task_struct *p = dl_task_of(dl_se);
struct rq *rq = task_rq(p);
return &rq->dl;
}
static inline int on_dl_rq(struct sched_dl_entity *dl_se)
{
return !RB_EMPTY_NODE(&dl_se->rb_node);
}
#ifdef CONFIG_SMP
static inline struct dl_bw *dl_bw_of(int i)
{
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
"sched RCU must be held");
return &cpu_rq(i)->rd->dl_bw;
}
static inline int dl_bw_cpus(int i)
{
struct root_domain *rd = cpu_rq(i)->rd;
int cpus = 0;
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
"sched RCU must be held");
for_each_cpu_and(i, rd->span, cpu_active_mask)
cpus++;
return cpus;
}
#else
static inline struct dl_bw *dl_bw_of(int i)
{
return &cpu_rq(i)->dl.dl_bw;
}
static inline int dl_bw_cpus(int i)
{
return 1;
}
#endif
static inline
void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;
lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->running_bw += dl_bw;
SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
/* kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
}
static inline
void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;
lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->running_bw -= dl_bw;
SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
if (dl_rq->running_bw > old)
dl_rq->running_bw = 0;
/* kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
}
static inline
void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;
lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->this_bw += dl_bw;
SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
}
static inline
void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;
lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->this_bw -= dl_bw;
SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
if (dl_rq->this_bw > old)
dl_rq->this_bw = 0;
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
}
static inline
void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
if (!dl_entity_is_special(dl_se))
__add_rq_bw(dl_se->dl_bw, dl_rq);
}
static inline
void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
if (!dl_entity_is_special(dl_se))
__sub_rq_bw(dl_se->dl_bw, dl_rq);
}
static inline
void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
if (!dl_entity_is_special(dl_se))
__add_running_bw(dl_se->dl_bw, dl_rq);
}
static inline
void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
if (!dl_entity_is_special(dl_se))
__sub_running_bw(dl_se->dl_bw, dl_rq);
}
void dl_change_utilization(struct task_struct *p, u64 new_bw)
{
struct rq *rq;
BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
if (task_on_rq_queued(p))
return;
rq = task_rq(p);
if (p->dl.dl_non_contending) {
sub_running_bw(&p->dl, &rq->dl);
p->dl.dl_non_contending = 0;
/*
* If the timer handler is currently running and the
* timer cannot be cancelled, inactive_task_timer()
* will see that dl_not_contending is not set, and
* will not touch the rq's active utilization,
* so we are still safe.
*/
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
}
__sub_rq_bw(p->dl.dl_bw, &rq->dl);
__add_rq_bw(new_bw, &rq->dl);
}
/*
* The utilization of a task cannot be immediately removed from
* the rq active utilization (running_bw) when the task blocks.
* Instead, we have to wait for the so called "0-lag time".
*
* If a task blocks before the "0-lag time", a timer (the inactive
* timer) is armed, and running_bw is decreased when the timer
* fires.
*
* If the task wakes up again before the inactive timer fires,
* the timer is cancelled, whereas if the task wakes up after the
* inactive timer fired (and running_bw has been decreased) the
* task's utilization has to be added to running_bw again.
* A flag in the deadline scheduling entity (dl_non_contending)
* is used to avoid race conditions between the inactive timer handler
* and task wakeups.
*
* The following diagram shows how running_bw is updated. A task is
* "ACTIVE" when its utilization contributes to running_bw; an
* "ACTIVE contending" task is in the TASK_RUNNING state, while an
* "ACTIVE non contending" task is a blocked task for which the "0-lag time"
* has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
* time already passed, which does not contribute to running_bw anymore.
* +------------------+
* wakeup | ACTIVE |
* +------------------>+ contending |
* | add_running_bw | |
* | +----+------+------+
* | | ^
* | dequeue | |
* +--------+-------+ | |
* | | t >= 0-lag | | wakeup
* | INACTIVE |<---------------+ |
* | | sub_running_bw | |
* +--------+-------+ | |
* ^ | |
* | t < 0-lag | |
* | | |
* | V |
* | +----+------+------+
* | sub_running_bw | ACTIVE |
* +-------------------+ |
* inactive timer | non contending |
* fired +------------------+
*
* The task_non_contending() function is invoked when a task
* blocks, and checks if the 0-lag time already passed or
* not (in the first case, it directly updates running_bw;
* in the second case, it arms the inactive timer).
*
* The task_contending() function is invoked when a task wakes
* up, and checks if the task is still in the "ACTIVE non contending"
* state or not (in the second case, it updates running_bw).
*/
static void task_non_contending(struct task_struct *p)
{
struct sched_dl_entity *dl_se = &p->dl;
struct hrtimer *timer = &dl_se->inactive_timer;
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
s64 zerolag_time;
/*
* If this is a non-deadline task that has been boosted,
* do nothing
*/
if (dl_se->dl_runtime == 0)
return;
if (dl_entity_is_special(dl_se))
return;
WARN_ON(dl_se->dl_non_contending);
zerolag_time = dl_se->deadline -
div64_long((dl_se->runtime * dl_se->dl_period),
dl_se->dl_runtime);
/*
* Using relative times instead of the absolute "0-lag time"
* allows to simplify the code
*/
zerolag_time -= rq_clock(rq);
/*
* If the "0-lag time" already passed, decrease the active
* utilization now, instead of starting a timer
*/
if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
if (dl_task(p))
sub_running_bw(dl_se, dl_rq);
if (!dl_task(p) || p->state == TASK_DEAD) {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
if (p->state == TASK_DEAD)
sub_rq_bw(&p->dl, &rq->dl);
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
__dl_clear_params(p);
raw_spin_unlock(&dl_b->lock);
}
return;
}
dl_se->dl_non_contending = 1;
get_task_struct(p);
hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
}
static void task_contending(struct sched_dl_entity *dl_se, int flags)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
/*
* If this is a non-deadline task that has been boosted,
* do nothing
*/
if (dl_se->dl_runtime == 0)
return;
if (flags & ENQUEUE_MIGRATED)
add_rq_bw(dl_se, dl_rq);
if (dl_se->dl_non_contending) {
dl_se->dl_non_contending = 0;
/*
* If the timer handler is currently running and the
* timer cannot be cancelled, inactive_task_timer()
* will see that dl_not_contending is not set, and
* will not touch the rq's active utilization,
* so we are still safe.
*/
if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
put_task_struct(dl_task_of(dl_se));
} else {
/*
* Since "dl_non_contending" is not set, the
* task's utilization has already been removed from
* active utilization (either when the task blocked,
* when the "inactive timer" fired).
* So, add it back.
*/
add_running_bw(dl_se, dl_rq);
}
}
static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
{
struct sched_dl_entity *dl_se = &p->dl;
return dl_rq->root.rb_leftmost == &dl_se->rb_node;
}
void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
{
raw_spin_lock_init(&dl_b->dl_runtime_lock);
dl_b->dl_period = period;
dl_b->dl_runtime = runtime;
}
void init_dl_bw(struct dl_bw *dl_b)
{
raw_spin_lock_init(&dl_b->lock);
raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
if (global_rt_runtime() == RUNTIME_INF)
dl_b->bw = -1;
else
dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
dl_b->total_bw = 0;
}
void init_dl_rq(struct dl_rq *dl_rq)
{
dl_rq->root = RB_ROOT_CACHED;
#ifdef CONFIG_SMP
/* zero means no -deadline tasks */
dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
dl_rq->dl_nr_migratory = 0;
dl_rq->overloaded = 0;
dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
#else
init_dl_bw(&dl_rq->dl_bw);
#endif
dl_rq->running_bw = 0;
dl_rq->this_bw = 0;
init_dl_rq_bw_ratio(dl_rq);
}
#ifdef CONFIG_SMP
static inline int dl_overloaded(struct rq *rq)
{
return atomic_read(&rq->rd->dlo_count);
}
static inline void dl_set_overload(struct rq *rq)
{
if (!rq->online)
return;
cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
/*
* Must be visible before the overload count is
* set (as in sched_rt.c).
*
* Matched by the barrier in pull_dl_task().
*/
smp_wmb();
atomic_inc(&rq->rd->dlo_count);
}
static inline void dl_clear_overload(struct rq *rq)
{
if (!rq->online)
return;
atomic_dec(&rq->rd->dlo_count);
cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
}
static void update_dl_migration(struct dl_rq *dl_rq)
{
if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
if (!dl_rq->overloaded) {
dl_set_overload(rq_of_dl_rq(dl_rq));
dl_rq->overloaded = 1;
}
} else if (dl_rq->overloaded) {
dl_clear_overload(rq_of_dl_rq(dl_rq));
dl_rq->overloaded = 0;
}
}
static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
struct task_struct *p = dl_task_of(dl_se);
if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory++;
update_dl_migration(dl_rq);
}
static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
struct task_struct *p = dl_task_of(dl_se);
if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory--;
update_dl_migration(dl_rq);
}
/*
* The list of pushable -deadline task is not a plist, like in
* sched_rt.c, it is an rb-tree with tasks ordered by deadline.
*/
static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
struct dl_rq *dl_rq = &rq->dl;
struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
struct rb_node *parent = NULL;
struct task_struct *entry;
bool leftmost = true;
BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
while (*link) {
parent = *link;
entry = rb_entry(parent, struct task_struct,
pushable_dl_tasks);
if (dl_entity_preempt(&p->dl, &entry->dl))
link = &parent->rb_left;
else {
link = &parent->rb_right;
leftmost = false;
}
}
if (leftmost)
dl_rq->earliest_dl.next = p->dl.deadline;
rb_link_node(&p->pushable_dl_tasks, parent, link);
rb_insert_color_cached(&p->pushable_dl_tasks,
&dl_rq->pushable_dl_tasks_root, leftmost);
}
static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
struct dl_rq *dl_rq = &rq->dl;
if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
return;
if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
struct rb_node *next_node;
next_node = rb_next(&p->pushable_dl_tasks);
if (next_node) {
dl_rq->earliest_dl.next = rb_entry(next_node,
struct task_struct, pushable_dl_tasks)->dl.deadline;
}
}
rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
}
static inline int has_pushable_dl_tasks(struct rq *rq)
{
return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
}
static int push_dl_task(struct rq *rq);
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
{
return dl_task(prev);
}
static DEFINE_PER_CPU(struct callback_head, dl_push_head);
static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
static void push_dl_tasks(struct rq *);
static void pull_dl_task(struct rq *);
static inline void deadline_queue_push_tasks(struct rq *rq)
{
if (!has_pushable_dl_tasks(rq))
return;
queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
}
static inline void deadline_queue_pull_task(struct rq *rq)
{
queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
}
static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
{
struct rq *later_rq = NULL;
struct dl_bw *dl_b;
later_rq = find_lock_later_rq(p, rq);
if (!later_rq) {
int cpu;
/*
* If we cannot preempt any rq, fall back to pick any
* online CPU:
*/
cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
if (cpu >= nr_cpu_ids) {
/*
* Failed to find any suitable CPU.
* The task will never come back!
*/
BUG_ON(dl_bandwidth_enabled());
/*
* If admission control is disabled we
* try a little harder to let the task
* run.
*/
cpu = cpumask_any(cpu_active_mask);
}
later_rq = cpu_rq(cpu);
double_lock_balance(rq, later_rq);
}
if (p->dl.dl_non_contending || p->dl.dl_throttled) {
/*
* Inactive timer is armed (or callback is running, but
* waiting for us to release rq locks). In any case, when it
* will fire (or continue), it will see running_bw of this
* task migrated to later_rq (and correctly handle it).
*/
sub_running_bw(&p->dl, &rq->dl);
sub_rq_bw(&p->dl, &rq->dl);
add_rq_bw(&p->dl, &later_rq->dl);
add_running_bw(&p->dl, &later_rq->dl);
} else {
sub_rq_bw(&p->dl, &rq->dl);
add_rq_bw(&p->dl, &later_rq->dl);
}
/*
* And we finally need to fixup root_domain(s) bandwidth accounting,
* since p is still hanging out in the old (now moved to default) root
* domain.
*/
dl_b = &rq->rd->dl_bw;
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
raw_spin_unlock(&dl_b->lock);
dl_b = &later_rq->rd->dl_bw;
raw_spin_lock(&dl_b->lock);
__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
raw_spin_unlock(&dl_b->lock);
set_task_cpu(p, later_rq->cpu);
double_unlock_balance(later_rq, rq);
return later_rq;
}
#else
static inline
void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
}
static inline
void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
}
static inline
void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
}
static inline
void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
}
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
{
return false;
}
static inline void pull_dl_task(struct rq *rq)
{
}
static inline void deadline_queue_push_tasks(struct rq *rq)
{
}
static inline void deadline_queue_pull_task(struct rq *rq)
{
}
#endif /* CONFIG_SMP */
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
/*
* We are being explicitly informed that a new instance is starting,
* and this means that:
* - the absolute deadline of the entity has to be placed at
* current time + relative deadline;
* - the runtime of the entity has to be set to the maximum value.
*
* The capability of specifying such event is useful whenever a -deadline
* entity wants to (try to!) synchronize its behaviour with the scheduler's
* one, and to (try to!) reconcile itself with its own scheduling
* parameters.
*/
static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
WARN_ON(dl_se->dl_boosted);
WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
/*
* We are racing with the deadline timer. So, do nothing because
* the deadline timer handler will take care of properly recharging
* the runtime and postponing the deadline
*/
if (dl_se->dl_throttled)
return;
/*
* We use the regular wall clock time to set deadlines in the
* future; in fact, we must consider execution overheads (time
* spent on hardirq context, etc.).
*/
dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
dl_se->runtime = dl_se->dl_runtime;
}
/*
* Pure Earliest Deadline First (EDF) scheduling does not deal with the
* possibility of a entity lasting more than what it declared, and thus
* exhausting its runtime.
*
* Here we are interested in making runtime overrun possible, but we do
* not want a entity which is misbehaving to affect the scheduling of all
* other entities.
* Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
* is used, in order to confine each entity within its own bandwidth.
*
* This function deals exactly with that, and ensures that when the runtime
* of a entity is replenished, its deadline is also postponed. That ensures
* the overrunning entity can't interfere with other entity in the system and
* can't make them miss their deadlines. Reasons why this kind of overruns
* could happen are, typically, a entity voluntarily trying to overcome its
* runtime, or it just underestimated it during sched_setattr().
*/
static void replenish_dl_entity(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
BUG_ON(pi_se->dl_runtime <= 0);
/*
* This could be the case for a !-dl task that is boosted.
* Just go with full inherited parameters.
*/
if (dl_se->dl_deadline == 0) {
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
}
if (dl_se->dl_yielded && dl_se->runtime > 0)
dl_se->runtime = 0;
/*
* We keep moving the deadline away until we get some
* available runtime for the entity. This ensures correct
* handling of situations where the runtime overrun is
* arbitrary large.
*/
while (dl_se->runtime <= 0) {
dl_se->deadline += pi_se->dl_period;
dl_se->runtime += pi_se->dl_runtime;
}
/*
* At this point, the deadline really should be "in
* the future" with respect to rq->clock. If it's
* not, we are, for some reason, lagging too much!
* Anyway, after having warn userspace abut that,
* we still try to keep the things running by
* resetting the deadline and the budget of the
* entity.
*/
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
printk_deferred_once("sched: DL replenish lagged too much\n");
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
}
if (dl_se->dl_yielded)
dl_se->dl_yielded = 0;
if (dl_se->dl_throttled)
dl_se->dl_throttled = 0;
}
/*
* Here we check if --at time t-- an entity (which is probably being
* [re]activated or, in general, enqueued) can use its remaining runtime
* and its current deadline _without_ exceeding the bandwidth it is
* assigned (function returns true if it can't). We are in fact applying
* one of the CBS rules: when a task wakes up, if the residual runtime
* over residual deadline fits within the allocated bandwidth, then we
* can keep the current (absolute) deadline and residual budget without
* disrupting the schedulability of the system. Otherwise, we should
* refill the runtime and set the deadline a period in the future,
* because keeping the current (absolute) deadline of the task would
* result in breaking guarantees promised to other tasks (refer to
* Documentation/scheduler/sched-deadline.rst for more information).
*
* This function returns true if:
*
* runtime / (deadline - t) > dl_runtime / dl_deadline ,
*
* IOW we can't recycle current parameters.
*
* Notice that the bandwidth check is done against the deadline. For
* task with deadline equal to period this is the same of using
* dl_period instead of dl_deadline in the equation above.
*/
static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se, u64 t)
{
u64 left, right;
/*
* left and right are the two sides of the equation above,
* after a bit of shuffling to use multiplications instead
* of divisions.
*
* Note that none of the time values involved in the two
* multiplications are absolute: dl_deadline and dl_runtime
* are the relative deadline and the maximum runtime of each
* instance, runtime is the runtime left for the last instance
* and (deadline - t), since t is rq->clock, is the time left
* to the (absolute) deadline. Even if overflowing the u64 type
* is very unlikely to occur in both cases, here we scale down
* as we want to avoid that risk at all. Scaling down by 10
* means that we reduce granularity to 1us. We are fine with it,
* since this is only a true/false check and, anyway, thinking
* of anything below microseconds resolution is actually fiction
* (but still we want to give the user that illusion >;).
*/
left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
right = ((dl_se->deadline - t) >> DL_SCALE) *
(pi_se->dl_runtime >> DL_SCALE);
return dl_time_before(right, left);
}
/*
* Revised wakeup rule [1]: For self-suspending tasks, rather then
* re-initializing task's runtime and deadline, the revised wakeup
* rule adjusts the task's runtime to avoid the task to overrun its
* density.
*
* Reasoning: a task may overrun the density if:
* runtime / (deadline - t) > dl_runtime / dl_deadline
*
* Therefore, runtime can be adjusted to:
* runtime = (dl_runtime / dl_deadline) * (deadline - t)
*
* In such way that runtime will be equal to the maximum density
* the task can use without breaking any rule.
*
* [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
* bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
*/
static void
update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
{
u64 laxity = dl_se->deadline - rq_clock(rq);
/*
* If the task has deadline < period, and the deadline is in the past,
* it should already be throttled before this check.
*
* See update_dl_entity() comments for further details.
*/
WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
}
/*
* Regarding the deadline, a task with implicit deadline has a relative
* deadline == relative period. A task with constrained deadline has a
* relative deadline <= relative period.
*
* We support constrained deadline tasks. However, there are some restrictions
* applied only for tasks which do not have an implicit deadline. See
* update_dl_entity() to know more about such restrictions.
*
* The dl_is_implicit() returns true if the task has an implicit deadline.
*/
static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
{
return dl_se->dl_deadline == dl_se->dl_period;
}
/*
* When a deadline entity is placed in the runqueue, its runtime and deadline
* might need to be updated. This is done by a CBS wake up rule. There are two
* different rules: 1) the original CBS; and 2) the Revisited CBS.
*
* When the task is starting a new period, the Original CBS is used. In this
* case, the runtime is replenished and a new absolute deadline is set.
*
* When a task is queued before the begin of the next period, using the
* remaining runtime and deadline could make the entity to overflow, see
* dl_entity_overflow() to find more about runtime overflow. When such case
* is detected, the runtime and deadline need to be updated.
*
* If the task has an implicit deadline, i.e., deadline == period, the Original
* CBS is applied. the runtime is replenished and a new absolute deadline is
* set, as in the previous cases.
*
* However, the Original CBS does not work properly for tasks with
* deadline < period, which are said to have a constrained deadline. By
* applying the Original CBS, a constrained deadline task would be able to run
* runtime/deadline in a period. With deadline < period, the task would
* overrun the runtime/period allowed bandwidth, breaking the admission test.
*
* In order to prevent this misbehave, the Revisited CBS is used for
* constrained deadline tasks when a runtime overflow is detected. In the
* Revisited CBS, rather than replenishing & setting a new absolute deadline,
* the remaining runtime of the task is reduced to avoid runtime overflow.
* Please refer to the comments update_dl_revised_wakeup() function to find
* more about the Revised CBS rule.
*/
static void update_dl_entity(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
if (unlikely(!dl_is_implicit(dl_se) &&
!dl_time_before(dl_se->deadline, rq_clock(rq)) &&
!dl_se->dl_boosted)){
update_dl_revised_wakeup(dl_se, rq);
return;
}
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
}
}
static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
{
return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
}
/*
* If the entity depleted all its runtime, and if we want it to sleep
* while waiting for some new execution time to become available, we
* set the bandwidth replenishment timer to the replenishment instant
* and try to activate it.
*
* Notice that it is important for the caller to know if the timer
* actually started or not (i.e., the replenishment instant is in
* the future or in the past).
*/
static int start_dl_timer(struct task_struct *p)
{
struct sched_dl_entity *dl_se = &p->dl;
struct hrtimer *timer = &dl_se->dl_timer;
struct rq *rq = task_rq(p);
ktime_t now, act;
s64 delta;
lockdep_assert_held(&rq->lock);
/*
* We want the timer to fire at the deadline, but considering
* that it is actually coming from rq->clock and not from
* hrtimer's time base reading.
*/
act = ns_to_ktime(dl_next_period(dl_se));
now = hrtimer_cb_get_time(timer);
delta = ktime_to_ns(now) - rq_clock(rq);
act = ktime_add_ns(act, delta);
/*
* If the expiry time already passed, e.g., because the value
* chosen as the deadline is too small, don't even try to
* start the timer in the past!
*/
if (ktime_us_delta(act, now) < 0)
return 0;
/*
* !enqueued will guarantee another callback; even if one is already in
* progress. This ensures a balanced {get,put}_task_struct().
*
* The race against __run_timer() clearing the enqueued state is
* harmless because we're holding task_rq()->lock, therefore the timer
* expiring after we've done the check will wait on its task_rq_lock()
* and observe our state.
*/
if (!hrtimer_is_queued(timer)) {
get_task_struct(p);
hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
}
return 1;
}
/*
* This is the bandwidth enforcement timer callback. If here, we know
* a task is not on its dl_rq, since the fact that the timer was running
* means the task is throttled and needs a runtime replenishment.
*
* However, what we actually do depends on the fact the task is active,
* (it is on its rq) or has been removed from there by a call to
* dequeue_task_dl(). In the former case we must issue the runtime
* replenishment and add the task back to the dl_rq; in the latter, we just
* do nothing but clearing dl_throttled, so that runtime and deadline
* updating (and the queueing back to dl_rq) will be done by the
* next call to enqueue_task_dl().
*/
static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
{
struct sched_dl_entity *dl_se = container_of(timer,
struct sched_dl_entity,
dl_timer);
struct task_struct *p = dl_task_of(dl_se);
struct rq_flags rf;
struct rq *rq;
rq = task_rq_lock(p, &rf);
/*
* The task might have changed its scheduling policy to something
* different than SCHED_DEADLINE (through switched_from_dl()).
*/
if (!dl_task(p))
goto unlock;
/*
* The task might have been boosted by someone else and might be in the
* boosting/deboosting path, its not throttled.
*/
if (dl_se->dl_boosted)
goto unlock;
/*
* Spurious timer due to start_dl_timer() race; or we already received
* a replenishment from rt_mutex_setprio().
*/
if (!dl_se->dl_throttled)
goto unlock;
sched_clock_tick();
update_rq_clock(rq);
/*
* If the throttle happened during sched-out; like:
*
* schedule()
* deactivate_task()
* dequeue_task_dl()
* update_curr_dl()
* start_dl_timer()
* __dequeue_task_dl()
* prev->on_rq = 0;
*
* We can be both throttled and !queued. Replenish the counter
* but do not enqueue -- wait for our wakeup to do that.
*/
if (!task_on_rq_queued(p)) {
replenish_dl_entity(dl_se, dl_se);
goto unlock;
}
#ifdef CONFIG_SMP
if (unlikely(!rq->online)) {
/*
* If the runqueue is no longer available, migrate the
* task elsewhere. This necessarily changes rq.
*/
lockdep_unpin_lock(&rq->lock, rf.cookie);
rq = dl_task_offline_migration(rq, p);
rf.cookie = lockdep_pin_lock(&rq->lock);
update_rq_clock(rq);
/*
* Now that the task has been migrated to the new RQ and we
* have that locked, proceed as normal and enqueue the task
* there.
*/
}
#endif
enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
if (dl_task(rq->curr))
check_preempt_curr_dl(rq, p, 0);
else
resched_curr(rq);
#ifdef CONFIG_SMP
/*
* Queueing this task back might have overloaded rq, check if we need
* to kick someone away.
*/
if (has_pushable_dl_tasks(rq)) {
/*
* Nothing relies on rq->lock after this, so its safe to drop
* rq->lock.
*/
rq_unpin_lock(rq, &rf);
push_dl_task(rq);
rq_repin_lock(rq, &rf);
}
#endif
unlock:
task_rq_unlock(rq, p, &rf);
/*
* This can free the task_struct, including this hrtimer, do not touch
* anything related to that after this.
*/
put_task_struct(p);
return HRTIMER_NORESTART;
}
void init_dl_task_timer(struct sched_dl_entity *dl_se)
{
struct hrtimer *timer = &dl_se->dl_timer;
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
timer->function = dl_task_timer;
}
/*
* During the activation, CBS checks if it can reuse the current task's
* runtime and period. If the deadline of the task is in the past, CBS
* cannot use the runtime, and so it replenishes the task. This rule
* works fine for implicit deadline tasks (deadline == period), and the
* CBS was designed for implicit deadline tasks. However, a task with
* constrained deadline (deadine < period) might be awakened after the
* deadline, but before the next period. In this case, replenishing the
* task would allow it to run for runtime / deadline. As in this case
* deadline < period, CBS enables a task to run for more than the
* runtime / period. In a very loaded system, this can cause a domino
* effect, making other tasks miss their deadlines.
*
* To avoid this problem, in the activation of a constrained deadline
* task after the deadline but before the next period, throttle the
* task and set the replenishing timer to the begin of the next period,
* unless it is boosted.
*/
static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
{
struct task_struct *p = dl_task_of(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
return;
dl_se->dl_throttled = 1;
if (dl_se->runtime > 0)
dl_se->runtime = 0;
}
}
static
int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
{
return (dl_se->runtime <= 0);
}
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
/*
* This function implements the GRUB accounting rule:
* according to the GRUB reclaiming algorithm, the runtime is
* not decreased as "dq = -dt", but as
* "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
* where u is the utilization of the task, Umax is the maximum reclaimable
* utilization, Uinact is the (per-runqueue) inactive utilization, computed
* as the difference between the "total runqueue utilization" and the
* runqueue active utilization, and Uextra is the (per runqueue) extra
* reclaimable utilization.
* Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
* multiplied by 2^BW_SHIFT, the result has to be shifted right by
* BW_SHIFT.
* Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
* dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
* Since delta is a 64 bit variable, to have an overflow its value
* should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
* So, overflow is not an issue here.
*/
static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
{
u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
u64 u_act;
u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
/*
* Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
* we compare u_inact + rq->dl.extra_bw with
* 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
* u_inact + rq->dl.extra_bw can be larger than
* 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
* leading to wrong results)
*/
if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
u_act = u_act_min;
else
u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
return (delta * u_act) >> BW_SHIFT;
}
/*
* Update the current task's runtime statistics (provided it is still
* a -deadline task and has not been removed from the dl_rq).
*/
static void update_curr_dl(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_dl_entity *dl_se = &curr->dl;
u64 delta_exec, scaled_delta_exec;
int cpu = cpu_of(rq);
u64 now;
if (!dl_task(curr) || !on_dl_rq(dl_se))
return;
/*
* Consumed budget is computed considering the time as
* observed by schedulable tasks (excluding time spent
* in hardirq context, etc.). Deadlines are instead
* computed using hard walltime. This seems to be the more
* natural solution, but the full ramifications of this
* approach need further study.
*/
now = rq_clock_task(rq);
delta_exec = now - curr->se.exec_start;
if (unlikely((s64)delta_exec <= 0)) {
if (unlikely(dl_se->dl_yielded))
goto throttle;
return;
}
schedstat_set(curr->se.statistics.exec_max,
max(curr->se.statistics.exec_max, delta_exec));
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);
curr->se.exec_start = now;
cgroup_account_cputime(curr, delta_exec);
if (dl_entity_is_special(dl_se))
return;
/*
* For tasks that participate in GRUB, we implement GRUB-PA: the
* spare reclaimed bandwidth is used to clock down frequency.
*
* For the others, we still need to scale reservation parameters
* according to current frequency and CPU maximum capacity.
*/
if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
scaled_delta_exec = grub_reclaim(delta_exec,
rq,
&curr->dl);
} else {
unsigned long scale_freq = arch_scale_freq_capacity(cpu);
unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
scaled_delta_exec = cap_scale(delta_exec, scale_freq);
scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
}
dl_se->runtime -= scaled_delta_exec;
throttle:
if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
dl_se->dl_throttled = 1;
/* If requested, inform the user about runtime overruns. */
if (dl_runtime_exceeded(dl_se) &&
(dl_se->flags & SCHED_FLAG_DL_OVERRUN))
dl_se->dl_overrun = 1;
__dequeue_task_dl(rq, curr, 0);
if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
if (!is_leftmost(curr, &rq->dl))
resched_curr(rq);
}
/*
* Because -- for now -- we share the rt bandwidth, we need to
* account our runtime there too, otherwise actual rt tasks
* would be able to exceed the shared quota.
*
* Account to the root rt group for now.
*
* The solution we're working towards is having the RT groups scheduled
* using deadline servers -- however there's a few nasties to figure
* out before that can happen.
*/
if (rt_bandwidth_enabled()) {
struct rt_rq *rt_rq = &rq->rt;
raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* We'll let actual RT tasks worry about the overflow here, we
* have our own CBS to keep us inline; only account when RT
* bandwidth is relevant.
*/
if (sched_rt_bandwidth_account(rt_rq))
rt_rq->rt_time += delta_exec;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
}
static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
{
struct sched_dl_entity *dl_se = container_of(timer,
struct sched_dl_entity,
inactive_timer);
struct task_struct *p = dl_task_of(dl_se);
struct rq_flags rf;
struct rq *rq;
rq = task_rq_lock(p, &rf);
sched_clock_tick();
update_rq_clock(rq);
if (!dl_task(p) || p->state == TASK_DEAD) {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
dl_se->dl_non_contending = 0;
}
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
raw_spin_unlock(&dl_b->lock);
__dl_clear_params(p);
goto unlock;
}
if (dl_se->dl_non_contending == 0)
goto unlock;
sub_running_bw(dl_se, &rq->dl);
dl_se->dl_non_contending = 0;
unlock:
task_rq_unlock(rq, p, &rf);
put_task_struct(p);
return HRTIMER_NORESTART;
}
void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
{
struct hrtimer *timer = &dl_se->inactive_timer;
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
timer->function = inactive_task_timer;
}
#ifdef CONFIG_SMP
static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
{
struct rq *rq = rq_of_dl_rq(dl_rq);
if (dl_rq->earliest_dl.curr == 0 ||
dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
dl_rq->earliest_dl.curr = deadline;
cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
}
}
static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
{
struct rq *rq = rq_of_dl_rq(dl_rq);
/*
* Since we may have removed our earliest (and/or next earliest)
* task we must recompute them.
*/
if (!dl_rq->dl_nr_running) {
dl_rq->earliest_dl.curr = 0;
dl_rq->earliest_dl.next = 0;
cpudl_clear(&rq->rd->cpudl, rq->cpu);
} else {
struct rb_node *leftmost = dl_rq->root.rb_leftmost;
struct sched_dl_entity *entry;
entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
dl_rq->earliest_dl.curr = entry->deadline;
cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
}
}
#else
static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
#endif /* CONFIG_SMP */
static inline
void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
int prio = dl_task_of(dl_se)->prio;
u64 deadline = dl_se->deadline;
WARN_ON(!dl_prio(prio));
dl_rq->dl_nr_running++;
add_nr_running(rq_of_dl_rq(dl_rq), 1);
walt_inc_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
inc_dl_deadline(dl_rq, deadline);
inc_dl_migration(dl_se, dl_rq);
}
static inline
void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
int prio = dl_task_of(dl_se)->prio;
WARN_ON(!dl_prio(prio));
WARN_ON(!dl_rq->dl_nr_running);
dl_rq->dl_nr_running--;
sub_nr_running(rq_of_dl_rq(dl_rq), 1);
walt_dec_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
dec_dl_deadline(dl_rq, dl_se->deadline);
dec_dl_migration(dl_se, dl_rq);
}
static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rb_node **link = &dl_rq->root.rb_root.rb_node;
struct rb_node *parent = NULL;
struct sched_dl_entity *entry;
int leftmost = 1;
BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
while (*link) {
parent = *link;
entry = rb_entry(parent, struct sched_dl_entity, rb_node);
if (dl_time_before(dl_se->deadline, entry->deadline))
link = &parent->rb_left;
else {
link = &parent->rb_right;
leftmost = 0;
}
}
rb_link_node(&dl_se->rb_node, parent, link);
rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
inc_dl_tasks(dl_se, dl_rq);
}
static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
if (RB_EMPTY_NODE(&dl_se->rb_node))
return;
rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
RB_CLEAR_NODE(&dl_se->rb_node);
dec_dl_tasks(dl_se, dl_rq);
}
static void
enqueue_dl_entity(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se, int flags)
{
BUG_ON(on_dl_rq(dl_se));
/*
* If this is a wakeup or a new instance, the scheduling
* parameters of the task might need updating. Otherwise,
* we want a replenishment of its runtime.
*/
if (flags & ENQUEUE_WAKEUP) {
task_contending(dl_se, flags);
update_dl_entity(dl_se, pi_se);
} else if (flags & ENQUEUE_REPLENISH) {
replenish_dl_entity(dl_se, pi_se);
} else if ((flags & ENQUEUE_RESTORE) &&
dl_time_before(dl_se->deadline,
rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
setup_new_dl_entity(dl_se);
}
__enqueue_dl_entity(dl_se);
}
static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
{
__dequeue_dl_entity(dl_se);
}
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
struct task_struct *pi_task = rt_mutex_get_top_task(p);
struct sched_dl_entity *pi_se = &p->dl;
/*
* Use the scheduling parameters of the top pi-waiter task if:
* - we have a top pi-waiter which is a SCHED_DEADLINE task AND
* - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
* smaller than our deadline OR we are a !SCHED_DEADLINE task getting
* boosted due to a SCHED_DEADLINE pi-waiter).
* Otherwise we keep our runtime and deadline.
*/
if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
pi_se = &pi_task->dl;
/*
* Because of delays in the detection of the overrun of a
* thread's runtime, it might be the case that a thread
* goes to sleep in a rt mutex with negative runtime. As
* a consequence, the thread will be throttled.
*
* While waiting for the mutex, this thread can also be
* boosted via PI, resulting in a thread that is throttled
* and boosted at the same time.
*
* In this case, the boost overrides the throttle.
*/
if (p->dl.dl_throttled) {
/*
* The replenish timer needs to be canceled. No
* problem if it fires concurrently: boosted threads
* are ignored in dl_task_timer().
*/
hrtimer_try_to_cancel(&p->dl.dl_timer);
p->dl.dl_throttled = 0;
}
} else if (!dl_prio(p->normal_prio)) {
/*
* Special case in which we have a !SCHED_DEADLINE task that is going
* to be deboosted, but exceeds its runtime while doing so. No point in
* replenishing it, as it's going to return back to its original
* scheduling class after this. If it has been throttled, we need to
* clear the flag, otherwise the task may wake up as throttled after
* being boosted again with no means to replenish the runtime and clear
* the throttle.
*/
p->dl.dl_throttled = 0;
BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
return;
}
/*
* Check if a constrained deadline task was activated
* after the deadline but before the next period.
* If that is the case, the task will be throttled and
* the replenishment timer will be set to the next period.
*/
if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
dl_check_constrained_dl(&p->dl);
if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
add_rq_bw(&p->dl, &rq->dl);
add_running_bw(&p->dl, &rq->dl);
}
/*
* If p is throttled, we do not enqueue it. In fact, if it exhausted
* its budget it needs a replenishment and, since it now is on
* its rq, the bandwidth timer callback (which clearly has not
* run yet) will take care of this.
* However, the active utilization does not depend on the fact
* that the task is on the runqueue or not (but depends on the
* task's state - in GRUB parlance, "inactive" vs "active contending").
* In other words, even if a task is throttled its utilization must
* be counted in the active utilization; hence, we need to call
* add_running_bw().
*/
if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
if (flags & ENQUEUE_WAKEUP)
task_contending(&p->dl, flags);
return;
}
enqueue_dl_entity(&p->dl, pi_se, flags);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
}
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
dequeue_dl_entity(&p->dl);
dequeue_pushable_dl_task(rq, p);
}
static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
update_curr_dl(rq);
__dequeue_task_dl(rq, p, flags);
if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
sub_running_bw(&p->dl, &rq->dl);
sub_rq_bw(&p->dl, &rq->dl);
}
/*
* This check allows to start the inactive timer (or to immediately
* decrease the active utilization, if needed) in two cases:
* when the task blocks and when it is terminating
* (p->state == TASK_DEAD). We can handle the two cases in the same
* way, because from GRUB's point of view the same thing is happening
* (the task moves from "active contending" to "active non contending"
* or "inactive")
*/
if (flags & DEQUEUE_SLEEP)
task_non_contending(p);
}
/*
* Yield task semantic for -deadline tasks is:
*
* get off from the CPU until our next instance, with
* a new runtime. This is of little use now, since we
* don't have a bandwidth reclaiming mechanism. Anyway,
* bandwidth reclaiming is planned for the future, and
* yield_task_dl will indicate that some spare budget
* is available for other task instances to use it.
*/
static void yield_task_dl(struct rq *rq)
{
/*
* We make the task go to sleep until its current deadline by
* forcing its runtime to zero. This way, update_curr_dl() stops
* it and the bandwidth timer will wake it up and will give it
* new scheduling parameters (thanks to dl_yielded=1).
*/
rq->curr->dl.dl_yielded = 1;
update_rq_clock(rq);
update_curr_dl(rq);
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
* and double the fastpath cost.
*/
rq_clock_skip_update(rq);
}
#ifdef CONFIG_SMP
static int find_later_rq(struct task_struct *task);
static int
#ifdef CONFIG_SCHED_WALT
select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags,
int sibling_count_hint)
#else
select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
#endif
{
struct task_struct *curr;
struct rq *rq;
if (sd_flag != SD_BALANCE_WAKE)
goto out;
rq = cpu_rq(cpu);
rcu_read_lock();
curr = READ_ONCE(rq->curr); /* unlocked access */
/*
* If we are dealing with a -deadline task, we must
* decide where to wake it up.
* If it has a later deadline and the current task
* on this rq can't move (provided the waking task
* can!) we prefer to send it somewhere else. On the
* other hand, if it has a shorter deadline, we
* try to make it stay here, it might be important.
*/
if (unlikely(dl_task(curr)) &&
(curr->nr_cpus_allowed < 2 ||
!dl_entity_preempt(&p->dl, &curr->dl)) &&
(p->nr_cpus_allowed > 1)) {
int target = find_later_rq(p);
if (target != -1 &&
(dl_time_before(p->dl.deadline,
cpu_rq(target)->dl.earliest_dl.curr) ||
(cpu_rq(target)->dl.dl_nr_running == 0)))
cpu = target;
}
rcu_read_unlock();
out:
return cpu;
}
static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
{
struct rq *rq;
if (p->state != TASK_WAKING)
return;
rq = task_rq(p);
/*
* Since p->state == TASK_WAKING, set_task_cpu() has been called
* from try_to_wake_up(). Hence, p->pi_lock is locked, but
* rq->lock is not... So, lock it
*/
raw_spin_lock(&rq->lock);
if (p->dl.dl_non_contending) {
update_rq_clock(rq);
sub_running_bw(&p->dl, &rq->dl);
p->dl.dl_non_contending = 0;
/*
* If the timer handler is currently running and the
* timer cannot be cancelled, inactive_task_timer()
* will see that dl_not_contending is not set, and
* will not touch the rq's active utilization,
* so we are still safe.
*/
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
}
sub_rq_bw(&p->dl, &rq->dl);
raw_spin_unlock(&rq->lock);
}
static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
{
/*
* Current can't be migrated, useless to reschedule,
* let's hope p can move out.
*/
if (rq->curr->nr_cpus_allowed == 1 ||
!cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
return;
/*
* p is migratable, so let's not schedule it and
* see if it is pushed or pulled somewhere else.
*/
if (p->nr_cpus_allowed != 1 &&
cpudl_find(&rq->rd->cpudl, p, NULL))
return;
resched_curr(rq);
}
static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
{
if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
/*
* This is OK, because current is on_cpu, which avoids it being
* picked for load-balance and preemption/IRQs are still
* disabled avoiding further scheduler activity on it and we've
* not yet started the picking loop.
*/
rq_unpin_lock(rq, rf);
pull_dl_task(rq);
rq_repin_lock(rq, rf);
}
return sched_stop_runnable(rq) || sched_dl_runnable(rq);
}
#endif /* CONFIG_SMP */
/*
* Only called when both the current and waking task are -deadline
* tasks.
*/
static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
int flags)
{
if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
resched_curr(rq);
return;
}
#ifdef CONFIG_SMP
/*
* In the unlikely case current and p have the same deadline
* let us try to decide what's the best thing to do...
*/
if ((p->dl.deadline == rq->curr->dl.deadline) &&
!test_tsk_need_resched(rq->curr))
check_preempt_equal_dl(rq, p);
#endif /* CONFIG_SMP */
}
#ifdef CONFIG_SCHED_HRTICK
static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
{
hrtick_start(rq, p->dl.runtime);
}
#else /* !CONFIG_SCHED_HRTICK */
static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
{
}
#endif
static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
{
p->se.exec_start = rq_clock_task(rq);
/* You can't push away the running task */
dequeue_pushable_dl_task(rq, p);
if (!first)
return;
if (hrtick_enabled(rq))
start_hrtick_dl(rq, p);
if (rq->curr->sched_class != &dl_sched_class)
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
deadline_queue_push_tasks(rq);
}
static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
struct dl_rq *dl_rq)
{
struct rb_node *left = rb_first_cached(&dl_rq->root);
if (!left)
return NULL;
return rb_entry(left, struct sched_dl_entity, rb_node);
}
static struct task_struct *
pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
struct sched_dl_entity *dl_se;
struct dl_rq *dl_rq = &rq->dl;
struct task_struct *p;
WARN_ON_ONCE(prev || rf);
if (!sched_dl_runnable(rq))
return NULL;
dl_se = pick_next_dl_entity(rq, dl_rq);
BUG_ON(!dl_se);
p = dl_task_of(dl_se);
set_next_task_dl(rq, p, true);
return p;
}
static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
{
update_curr_dl(rq);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
}
/*
* scheduler tick hitting a task of our scheduling class.
*
* NOTE: This function can be called remotely by the tick offload that
* goes along full dynticks. Therefore no local assumption can be made
* and everything must be accessed through the @rq and @curr passed in
* parameters.
*/
static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
{
update_curr_dl(rq);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
/*
* Even when we have runtime, update_curr_dl() might have resulted in us
* not being the leftmost task anymore. In that case NEED_RESCHED will
* be set and schedule() will start a new hrtick for the next task.
*/
if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
is_leftmost(p, &rq->dl))
start_hrtick_dl(rq, p);
}
static void task_fork_dl(struct task_struct *p)
{
/*
* SCHED_DEADLINE tasks cannot fork and this is achieved through
* sched_fork()
*/
}
#ifdef CONFIG_SMP
/* Only try algorithms three times */
#define DL_MAX_TRIES 3
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
cpumask_test_cpu(cpu, p->cpus_ptr))
return 1;
return 0;
}
/*
* Return the earliest pushable rq's task, which is suitable to be executed
* on the CPU, NULL otherwise:
*/
static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
{
struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
struct task_struct *p = NULL;
if (!has_pushable_dl_tasks(rq))
return NULL;
next_node:
if (next_node) {
p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
if (pick_dl_task(rq, p, cpu))
return p;
next_node = rb_next(next_node);
goto next_node;
}
return NULL;
}
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
static int find_later_rq(struct task_struct *task)
{
struct sched_domain *sd;
struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);
/* Make sure the mask is initialized first */
if (unlikely(!later_mask))
return -1;
if (task->nr_cpus_allowed == 1)
return -1;
/*
* We have to consider system topology and task affinity
* first, then we can look for a suitable CPU.
*/
if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
return -1;
/*
* If we are here, some targets have been found, including
* the most suitable which is, among the runqueues where the
* current tasks have later deadlines than the task's one, the
* rq with the latest possible one.
*
* Now we check how well this matches with task's
* affinity and system topology.
*
* The last CPU where the task run is our first
* guess, since it is most likely cache-hot there.
*/
if (cpumask_test_cpu(cpu, later_mask))
return cpu;
/*
* Check if this_cpu is to be skipped (i.e., it is
* not in the mask) or not.
*/
if (!cpumask_test_cpu(this_cpu, later_mask))
this_cpu = -1;
rcu_read_lock();
for_each_domain(cpu, sd) {
if (sd->flags & SD_WAKE_AFFINE) {
int best_cpu;
/*
* If possible, preempting this_cpu is
* cheaper than migrating.
*/
if (this_cpu != -1 &&
cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
rcu_read_unlock();
return this_cpu;
}
best_cpu = cpumask_first_and(later_mask,
sched_domain_span(sd));
/*
* Last chance: if a CPU being in both later_mask
* and current sd span is valid, that becomes our
* choice. Of course, the latest possible CPU is
* already under consideration through later_mask.
*/
if (best_cpu < nr_cpu_ids) {
rcu_read_unlock();
return best_cpu;
}
}
}
rcu_read_unlock();
/*
* At this point, all our guesses failed, we just return
* 'something', and let the caller sort the things out.
*/
if (this_cpu != -1)
return this_cpu;
cpu = cpumask_any(later_mask);
if (cpu < nr_cpu_ids)
return cpu;
return -1;
}
/* Locks the rq it finds */
static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
{
struct rq *later_rq = NULL;
int tries;
int cpu;
for (tries = 0; tries < DL_MAX_TRIES; tries++) {
cpu = find_later_rq(task);
if ((cpu == -1) || (cpu == rq->cpu))
break;
later_rq = cpu_rq(cpu);
if (later_rq->dl.dl_nr_running &&
!dl_time_before(task->dl.deadline,
later_rq->dl.earliest_dl.curr)) {
/*
* Target rq has tasks of equal or earlier deadline,
* retrying does not release any lock and is unlikely
* to yield a different result.
*/
later_rq = NULL;
break;
}
/* Retry if something changed. */
if (double_lock_balance(rq, later_rq)) {
if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
task_running(rq, task) ||
!dl_task(task) ||
!task_on_rq_queued(task))) {
double_unlock_balance(rq, later_rq);
later_rq = NULL;
break;
}
}
/*
* If the rq we found has no -deadline task, or
* its earliest one has a later deadline than our
* task, the rq is a good one.
*/
if (!later_rq->dl.dl_nr_running ||
dl_time_before(task->dl.deadline,
later_rq->dl.earliest_dl.curr))
break;
/* Otherwise we try again. */
double_unlock_balance(rq, later_rq);
later_rq = NULL;
}
return later_rq;
}
static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
{
struct task_struct *p;
if (!has_pushable_dl_tasks(rq))
return NULL;
p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
struct task_struct, pushable_dl_tasks);
BUG_ON(rq->cpu != task_cpu(p));
BUG_ON(task_current(rq, p));
BUG_ON(p->nr_cpus_allowed <= 1);
BUG_ON(!task_on_rq_queued(p));
BUG_ON(!dl_task(p));
return p;
}
/*
* See if the non running -deadline tasks on this rq
* can be sent to some other CPU where they can preempt
* and start executing.
*/
static int push_dl_task(struct rq *rq)
{
struct task_struct *next_task;
struct rq *later_rq;
int ret = 0;
if (!rq->dl.overloaded)
return 0;
next_task = pick_next_pushable_dl_task(rq);
if (!next_task)
return 0;
retry:
if (WARN_ON(next_task == rq->curr))
return 0;
/*
* If next_task preempts rq->curr, and rq->curr
* can move away, it makes sense to just reschedule
* without going further in pushing next_task.
*/
if (dl_task(rq->curr) &&
dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
rq->curr->nr_cpus_allowed > 1) {
resched_curr(rq);
return 0;
}
/* We might release rq lock */
get_task_struct(next_task);
/* Will lock the rq it'll find */
later_rq = find_lock_later_rq(next_task, rq);
if (!later_rq) {
struct task_struct *task;
/*
* We must check all this again, since
* find_lock_later_rq releases rq->lock and it is
* then possible that next_task has migrated.
*/
task = pick_next_pushable_dl_task(rq);
if (task == next_task) {
/*
* The task is still there. We don't try
* again, some other CPU will pull it when ready.
*/
goto out;
}
if (!task)
/* No more tasks */
goto out;
put_task_struct(next_task);
next_task = task;
goto retry;
}
deactivate_task(rq, next_task, 0);
next_task->on_rq = TASK_ON_RQ_MIGRATING;
set_task_cpu(next_task, later_rq->cpu);
next_task->on_rq = TASK_ON_RQ_QUEUED;
/*
* Update the later_rq clock here, because the clock is used
* by the cpufreq_update_util() inside __add_running_bw().
*/
update_rq_clock(later_rq);
activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
ret = 1;
resched_curr(later_rq);
double_unlock_balance(rq, later_rq);
out:
put_task_struct(next_task);
return ret;
}
static void push_dl_tasks(struct rq *rq)
{
/* push_dl_task() will return true if it moved a -deadline task */
while (push_dl_task(rq))
;
}
static void pull_dl_task(struct rq *this_rq)
{
int this_cpu = this_rq->cpu, cpu;
struct task_struct *p;
bool resched = false;
struct rq *src_rq;
u64 dmin = LONG_MAX;
if (likely(!dl_overloaded(this_rq)))
return;
/*
* Match the barrier from dl_set_overloaded; this guarantees that if we
* see overloaded we must also see the dlo_mask bit.
*/
smp_rmb();
for_each_cpu(cpu, this_rq->rd->dlo_mask) {
if (this_cpu == cpu)
continue;
src_rq = cpu_rq(cpu);
/*
* It looks racy, abd it is! However, as in sched_rt.c,
* we are fine with this.
*/
if (this_rq->dl.dl_nr_running &&
dl_time_before(this_rq->dl.earliest_dl.curr,
src_rq->dl.earliest_dl.next))
continue;
/* Might drop this_rq->lock */
double_lock_balance(this_rq, src_rq);
/*
* If there are no more pullable tasks on the
* rq, we're done with it.
*/
if (src_rq->dl.dl_nr_running <= 1)
goto skip;
p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
/*
* We found a task to be pulled if:
* - it preempts our current (if there's one),
* - it will preempt the last one we pulled (if any).
*/
if (p && dl_time_before(p->dl.deadline, dmin) &&
(!this_rq->dl.dl_nr_running ||
dl_time_before(p->dl.deadline,
this_rq->dl.earliest_dl.curr))) {
WARN_ON(p == src_rq->curr);
WARN_ON(!task_on_rq_queued(p));
/*
* Then we pull iff p has actually an earlier
* deadline than the current task of its runqueue.
*/
if (dl_time_before(p->dl.deadline,
src_rq->curr->dl.deadline))
goto skip;
resched = true;
deactivate_task(src_rq, p, 0);
p->on_rq = TASK_ON_RQ_MIGRATING;
set_task_cpu(p, this_cpu);
p->on_rq = TASK_ON_RQ_QUEUED;
activate_task(this_rq, p, 0);
dmin = p->dl.deadline;
/* Is there any other task even earlier? */
}
skip:
double_unlock_balance(this_rq, src_rq);
}
if (resched)
resched_curr(this_rq);
}
/*
* Since the task is not running and a reschedule is not going to happen
* anytime soon on its runqueue, we try pushing it away now.
*/
static void task_woken_dl(struct rq *rq, struct task_struct *p)
{
if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
p->nr_cpus_allowed > 1 &&
dl_task(rq->curr) &&
(rq->curr->nr_cpus_allowed < 2 ||
!dl_entity_preempt(&p->dl, &rq->curr->dl))) {
push_dl_tasks(rq);
}
}
static void set_cpus_allowed_dl(struct task_struct *p,
const struct cpumask *new_mask)
{
struct root_domain *src_rd;
struct rq *rq;
BUG_ON(!dl_task(p));
rq = task_rq(p);
src_rd = rq->rd;
/*
* Migrating a SCHED_DEADLINE task between exclusive
* cpusets (different root_domains) entails a bandwidth
* update. We already made space for us in the destination
* domain (see cpuset_can_attach()).
*/
if (!cpumask_intersects(src_rd->span, new_mask)) {
struct dl_bw *src_dl_b;
src_dl_b = dl_bw_of(cpu_of(rq));
/*
* We now free resources of the root_domain we are migrating
* off. In the worst case, sched_setattr() may temporary fail
* until we complete the update.
*/
raw_spin_lock(&src_dl_b->lock);
__dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
raw_spin_unlock(&src_dl_b->lock);
}
set_cpus_allowed_common(p, new_mask);
}
/* Assumes rq->lock is held */
static void rq_online_dl(struct rq *rq)
{
if (rq->dl.overloaded)
dl_set_overload(rq);
cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
if (rq->dl.dl_nr_running > 0)
cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
}
/* Assumes rq->lock is held */
static void rq_offline_dl(struct rq *rq)
{
if (rq->dl.overloaded)
dl_clear_overload(rq);
cpudl_clear(&rq->rd->cpudl, rq->cpu);
cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
}
void __init init_sched_dl_class(void)
{
unsigned int i;
for_each_possible_cpu(i)
zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
GFP_KERNEL, cpu_to_node(i));
}
void dl_add_task_root_domain(struct task_struct *p)
{
struct rq_flags rf;
struct rq *rq;
struct dl_bw *dl_b;
rq = task_rq_lock(p, &rf);
if (!dl_task(p))
goto unlock;
dl_b = &rq->rd->dl_bw;
raw_spin_lock(&dl_b->lock);
__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
raw_spin_unlock(&dl_b->lock);
unlock:
task_rq_unlock(rq, p, &rf);
}
void dl_clear_root_domain(struct root_domain *rd)
{
unsigned long flags;
raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
rd->dl_bw.total_bw = 0;
raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
}
#endif /* CONFIG_SMP */
static void switched_from_dl(struct rq *rq, struct task_struct *p)
{
/*
* task_non_contending() can start the "inactive timer" (if the 0-lag
* time is in the future). If the task switches back to dl before
* the "inactive timer" fires, it can continue to consume its current
* runtime using its current deadline. If it stays outside of
* SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
* will reset the task parameters.
*/
if (task_on_rq_queued(p) && p->dl.dl_runtime)
task_non_contending(p);
if (!task_on_rq_queued(p)) {
/*
* Inactive timer is armed. However, p is leaving DEADLINE and
* might migrate away from this rq while continuing to run on
* some other class. We need to remove its contribution from
* this rq running_bw now, or sub_rq_bw (below) will complain.
*/
if (p->dl.dl_non_contending)
sub_running_bw(&p->dl, &rq->dl);
sub_rq_bw(&p->dl, &rq->dl);
}
/*
* We cannot use inactive_task_timer() to invoke sub_running_bw()
* at the 0-lag time, because the task could have been migrated
* while SCHED_OTHER in the meanwhile.
*/
if (p->dl.dl_non_contending)
p->dl.dl_non_contending = 0;
/*
* Since this might be the only -deadline task on the rq,
* this is the right place to try to pull some other one
* from an overloaded CPU, if any.
*/
if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
return;
deadline_queue_pull_task(rq);
}
/*
* When switching to -deadline, we may overload the rq, then
* we try to push someone off, if possible.
*/
static void switched_to_dl(struct rq *rq, struct task_struct *p)
{
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
/* If p is not queued we will update its parameters at next wakeup. */
if (!task_on_rq_queued(p)) {
add_rq_bw(&p->dl, &rq->dl);
return;
}
if (rq->curr != p) {
#ifdef CONFIG_SMP
if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
deadline_queue_push_tasks(rq);
#endif
if (dl_task(rq->curr))
check_preempt_curr_dl(rq, p, 0);
else
resched_curr(rq);
} else {
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
}
}
/*
* If the scheduling parameters of a -deadline task changed,
* a push or pull operation might be needed.
*/
static void prio_changed_dl(struct rq *rq, struct task_struct *p,
int oldprio)
{
if (task_on_rq_queued(p) || rq->curr == p) {
#ifdef CONFIG_SMP
/*
* This might be too much, but unfortunately
* we don't have the old deadline value, and
* we can't argue if the task is increasing
* or lowering its prio, so...
*/
if (!rq->dl.overloaded)
deadline_queue_pull_task(rq);
/*
* If we now have a earlier deadline task than p,
* then reschedule, provided p is still on this
* runqueue.
*/
if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
resched_curr(rq);
#else
/*
* Again, we don't know if p has a earlier
* or later deadline, so let's blindly set a
* (maybe not needed) rescheduling point.
*/
resched_curr(rq);
#endif /* CONFIG_SMP */
}
}
const struct sched_class dl_sched_class = {
.next = &rt_sched_class,
.enqueue_task = enqueue_task_dl,
.dequeue_task = dequeue_task_dl,
.yield_task = yield_task_dl,
.check_preempt_curr = check_preempt_curr_dl,
.pick_next_task = pick_next_task_dl,
.put_prev_task = put_prev_task_dl,
.set_next_task = set_next_task_dl,
#ifdef CONFIG_SMP
.balance = balance_dl,
.select_task_rq = select_task_rq_dl,
.migrate_task_rq = migrate_task_rq_dl,
.set_cpus_allowed = set_cpus_allowed_dl,
.rq_online = rq_online_dl,
.rq_offline = rq_offline_dl,
.task_woken = task_woken_dl,
#endif
.task_tick = task_tick_dl,
.task_fork = task_fork_dl,
.prio_changed = prio_changed_dl,
.switched_from = switched_from_dl,
.switched_to = switched_to_dl,
.update_curr = update_curr_dl,
};
int sched_dl_global_validate(void)
{
u64 runtime = global_rt_runtime();
u64 period = global_rt_period();
u64 new_bw = to_ratio(period, runtime);
struct dl_bw *dl_b;
int cpu, cpus, ret = 0;
unsigned long flags;
/*
* Here we want to check the bandwidth not being set to some
* value smaller than the currently allocated bandwidth in
* any of the root_domains.
*
* FIXME: Cycling on all the CPUs is overdoing, but simpler than
* cycling on root_domains... Discussion on different/better
* solutions is welcome!
*/
for_each_possible_cpu(cpu) {
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
cpus = dl_bw_cpus(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
if (new_bw * cpus < dl_b->total_bw)
ret = -EBUSY;
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
if (ret)
break;
}
return ret;
}
void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
{
if (global_rt_runtime() == RUNTIME_INF) {
dl_rq->bw_ratio = 1 << RATIO_SHIFT;
dl_rq->extra_bw = 1 << BW_SHIFT;
} else {
dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
dl_rq->extra_bw = to_ratio(global_rt_period(),
global_rt_runtime());
}
}
void sched_dl_do_global(void)
{
u64 new_bw = -1;
struct dl_bw *dl_b;
int cpu;
unsigned long flags;
def_dl_bandwidth.dl_period = global_rt_period();
def_dl_bandwidth.dl_runtime = global_rt_runtime();
if (global_rt_runtime() != RUNTIME_INF)
new_bw = to_ratio(global_rt_period(), global_rt_runtime());
/*
* FIXME: As above...
*/
for_each_possible_cpu(cpu) {
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
dl_b->bw = new_bw;
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
}
}
/*
* We must be sure that accepting a new task (or allowing changing the
* parameters of an existing one) is consistent with the bandwidth
* constraints. If yes, this function also accordingly updates the currently
* allocated bandwidth to reflect the new situation.
*
* This function is called while holding p's rq->lock.
*/
int sched_dl_overflow(struct task_struct *p, int policy,
const struct sched_attr *attr)
{
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
u64 period = attr->sched_period ?: attr->sched_deadline;
u64 runtime = attr->sched_runtime;
u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
int cpus, err = -1;
if (attr->sched_flags & SCHED_FLAG_SUGOV)
return 0;
/* !deadline task may carry old deadline bandwidth */
if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
return 0;
/*
* Either if a task, enters, leave, or stays -deadline but changes
* its parameters, we may need to update accordingly the total
* allocated bandwidth of the container.
*/
raw_spin_lock(&dl_b->lock);
cpus = dl_bw_cpus(task_cpu(p));
if (dl_policy(policy) && !task_has_dl_policy(p) &&
!__dl_overflow(dl_b, cpus, 0, new_bw)) {
if (hrtimer_active(&p->dl.inactive_timer))
__dl_sub(dl_b, p->dl.dl_bw, cpus);
__dl_add(dl_b, new_bw, cpus);
err = 0;
} else if (dl_policy(policy) && task_has_dl_policy(p) &&
!__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
/*
* XXX this is slightly incorrect: when the task
* utilization decreases, we should delay the total
* utilization change until the task's 0-lag point.
* But this would require to set the task's "inactive
* timer" when the task is not inactive.
*/
__dl_sub(dl_b, p->dl.dl_bw, cpus);
__dl_add(dl_b, new_bw, cpus);
dl_change_utilization(p, new_bw);
err = 0;
} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
/*
* Do not decrease the total deadline utilization here,
* switched_from_dl() will take care to do it at the correct
* (0-lag) time.
*/
err = 0;
}
raw_spin_unlock(&dl_b->lock);
return err;
}
/*
* This function initializes the sched_dl_entity of a newly becoming
* SCHED_DEADLINE task.
*
* Only the static values are considered here, the actual runtime and the
* absolute deadline will be properly calculated when the task is enqueued
* for the first time with its new policy.
*/
void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
{
struct sched_dl_entity *dl_se = &p->dl;
dl_se->dl_runtime = attr->sched_runtime;
dl_se->dl_deadline = attr->sched_deadline;
dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
}
void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
{
struct sched_dl_entity *dl_se = &p->dl;
attr->sched_priority = p->rt_priority;
attr->sched_runtime = dl_se->dl_runtime;
attr->sched_deadline = dl_se->dl_deadline;
attr->sched_period = dl_se->dl_period;
attr->sched_flags &= ~SCHED_DL_FLAGS;
attr->sched_flags |= dl_se->flags;
}
/*
* This function validates the new parameters of a -deadline task.
* We ask for the deadline not being zero, and greater or equal
* than the runtime, as well as the period of being zero or
* greater than deadline. Furthermore, we have to be sure that
* user parameters are above the internal resolution of 1us (we
* check sched_runtime only since it is always the smaller one) and
* below 2^63 ns (we have to check both sched_deadline and
* sched_period, as the latter can be zero).
*/
bool __checkparam_dl(const struct sched_attr *attr)
{
/* special dl tasks don't actually use any parameter */
if (attr->sched_flags & SCHED_FLAG_SUGOV)
return true;
/* deadline != 0 */
if (attr->sched_deadline == 0)
return false;
/*
* Since we truncate DL_SCALE bits, make sure we're at least
* that big.
*/
if (attr->sched_runtime < (1ULL << DL_SCALE))
return false;
/*
* Since we use the MSB for wrap-around and sign issues, make
* sure it's not set (mind that period can be equal to zero).
*/
if (attr->sched_deadline & (1ULL << 63) ||
attr->sched_period & (1ULL << 63))
return false;
/* runtime <= deadline <= period (if period != 0) */
if ((attr->sched_period != 0 &&
attr->sched_period < attr->sched_deadline) ||
attr->sched_deadline < attr->sched_runtime)
return false;
return true;
}
/*
* This function clears the sched_dl_entity static params.
*/
void __dl_clear_params(struct task_struct *p)
{
struct sched_dl_entity *dl_se = &p->dl;
dl_se->dl_runtime = 0;
dl_se->dl_deadline = 0;
dl_se->dl_period = 0;
dl_se->flags = 0;
dl_se->dl_bw = 0;
dl_se->dl_density = 0;
dl_se->dl_boosted = 0;
dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0;
dl_se->dl_non_contending = 0;
dl_se->dl_overrun = 0;
}
bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
{
struct sched_dl_entity *dl_se = &p->dl;
if (dl_se->dl_runtime != attr->sched_runtime ||
dl_se->dl_deadline != attr->sched_deadline ||
dl_se->dl_period != attr->sched_period ||
dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
return true;
return false;
}
#ifdef CONFIG_SMP
int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
{
unsigned int dest_cpu;
struct dl_bw *dl_b;
bool overflow;
int cpus, ret;
unsigned long flags;
dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
rcu_read_lock_sched();
dl_b = dl_bw_of(dest_cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
cpus = dl_bw_cpus(dest_cpu);
overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
if (overflow) {
ret = -EBUSY;
} else {
/*
* We reserve space for this task in the destination
* root_domain, as we can't fail after this point.
* We will free resources in the source root_domain
* later on (see set_cpus_allowed_dl()).
*/
__dl_add(dl_b, p->dl.dl_bw, cpus);
ret = 0;
}
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
return ret;
}
int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
const struct cpumask *trial)
{
int ret = 1, trial_cpus;
struct dl_bw *cur_dl_b;
unsigned long flags;
rcu_read_lock_sched();
cur_dl_b = dl_bw_of(cpumask_any(cur));
trial_cpus = cpumask_weight(trial);
raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
if (cur_dl_b->bw != -1 &&
cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
ret = 0;
raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
rcu_read_unlock_sched();
return ret;
}
bool dl_cpu_busy(unsigned int cpu)
{
unsigned long flags;
struct dl_bw *dl_b;
bool overflow;
int cpus;
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
cpus = dl_bw_cpus(cpu);
overflow = __dl_overflow(dl_b, cpus, 0, 0);
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
return overflow;
}
#endif
#ifdef CONFIG_SCHED_DEBUG
void print_dl_stats(struct seq_file *m, int cpu)
{
print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
}
#endif /* CONFIG_SCHED_DEBUG */