android_kernel_xiaomi_sm8350/drivers/target/target_core_configfs.c
Raghavendra Rao Ananta 5bd75403be Merge remote-tracking branch 'remotes/origin/tmp-f686d9f' into msm-lahaina
* remotes/origin/tmp-f686d9f:
  ANDROID: update abi_gki_aarch64.xml for 5.2-rc6
  Linux 5.2-rc6
  Revert "iommu/vt-d: Fix lock inversion between iommu->lock and device_domain_lock"
  Bluetooth: Fix regression with minimum encryption key size alignment
  tcp: refine memory limit test in tcp_fragment()
  x86/vdso: Prevent segfaults due to hoisted vclock reads
  SUNRPC: Fix a credential refcount leak
  Revert "SUNRPC: Declare RPC timers as TIMER_DEFERRABLE"
  net :sunrpc :clnt :Fix xps refcount imbalance on the error path
  NFS4: Only set creation opendata if O_CREAT
  ANDROID: gki_defconfig: workaround to enable configs
  ANDROID: gki_defconfig: more configs for partners
  ARM: 8867/1: vdso: pass --be8 to linker if necessary
  KVM: nVMX: reorganize initial steps of vmx_set_nested_state
  KVM: PPC: Book3S HV: Invalidate ERAT when flushing guest TLB entries
  habanalabs: use u64_to_user_ptr() for reading user pointers
  nfsd: replace Jeff by Chuck as nfsd co-maintainer
  inet: clear num_timeout reqsk_alloc()
  PCI/P2PDMA: Ignore root complex whitelist when an IOMMU is present
  net: mvpp2: debugfs: Add pmap to fs dump
  ipv6: Default fib6_type to RTN_UNICAST when not set
  net: hns3: Fix inconsistent indenting
  net/af_iucv: always register net_device notifier
  net/af_iucv: build proper skbs for HiperTransport
  net/af_iucv: remove GFP_DMA restriction for HiperTransport
  doc: fix documentation about UIO_MEM_LOGICAL using
  MAINTAINERS / Documentation: Thorsten Scherer is the successor of Gavin Schenk
  docs: fb: Add TER16x32 to the available font names
  MAINTAINERS: fpga: hand off maintainership to Moritz
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 507
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 506
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 505
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 504
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 503
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 502
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 501
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 499
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 498
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 497
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 496
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 495
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 491
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 490
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 489
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 488
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 487
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 486
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 485
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 484
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 482
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 481
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 480
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 479
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 477
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 475
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 474
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 473
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 472
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 471
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 469
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 468
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 467
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 466
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 465
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 464
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 463
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 462
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 461
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 460
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 459
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 457
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 456
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 455
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 454
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 452
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 451
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 250
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 248
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 247
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 246
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 245
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 244
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 243
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 239
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 238
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 237
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 235
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 233
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 232
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 231
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 230
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 226
  KVM: arm/arm64: Fix emulated ptimer irq injection
  net: dsa: mv88e6xxx: fix shift of FID bits in mv88e6185_g1_vtu_loadpurge()
  tests: kvm: Check for a kernel warning
  kvm: tests: Sort tests in the Makefile alphabetically
  KVM: x86/mmu: Allocate PAE root array when using SVM's 32-bit NPT
  KVM: x86: Modify struct kvm_nested_state to have explicit fields for data
  fanotify: update connector fsid cache on add mark
  quota: fix a problem about transfer quota
  drm/i915: Don't clobber M/N values during fastset check
  powerpc: enable a 30-bit ZONE_DMA for 32-bit pmac
  ovl: make i_ino consistent with st_ino in more cases
  scsi: qla2xxx: Fix hardlockup in abort command during driver remove
  scsi: ufs: Avoid runtime suspend possibly being blocked forever
  scsi: qedi: update driver version to 8.37.0.20
  scsi: qedi: Check targetname while finding boot target information
  hvsock: fix epollout hang from race condition
  net/udp_gso: Allow TX timestamp with UDP GSO
  net: netem: fix use after free and double free with packet corruption
  net: netem: fix backlog accounting for corrupted GSO frames
  net: lio_core: fix potential sign-extension overflow on large shift
  tipc: pass tunnel dev as NULL to udp_tunnel(6)_xmit_skb
  ip6_tunnel: allow not to count pkts on tstats by passing dev as NULL
  ip_tunnel: allow not to count pkts on tstats by setting skb's dev to NULL
  apparmor: reset pos on failure to unpack for various functions
  apparmor: enforce nullbyte at end of tag string
  apparmor: fix PROFILE_MEDIATES for untrusted input
  RDMA/efa: Handle mmap insertions overflow
  tun: wake up waitqueues after IFF_UP is set
  drm: return -EFAULT if copy_to_user() fails
  net: remove duplicate fetch in sock_getsockopt
  tipc: fix issues with early FAILOVER_MSG from peer
  bnx2x: Check if transceiver implements DDM before access
  xhci: detect USB 3.2 capable host controllers correctly
  usb: xhci: Don't try to recover an endpoint if port is in error state.
  KVM: fix typo in documentation
  drm/panfrost: Make sure a BO is only unmapped when appropriate
  md: fix for divide error in status_resync
  soc: ixp4xx: npe: Fix an IS_ERR() vs NULL check in probe
  arm64/mm: don't initialize pgd_cache twice
  MAINTAINERS: Update my email address
  arm64/sve: <uapi/asm/ptrace.h> should not depend on <uapi/linux/prctl.h>
  ovl: fix typo in MODULE_PARM_DESC
  ovl: fix bogus -Wmaybe-unitialized warning
  ovl: don't fail with disconnected lower NFS
  mmc: core: Prevent processing SDIO IRQs when the card is suspended
  mmc: sdhci: sdhci-pci-o2micro: Correctly set bus width when tuning
  brcmfmac: sdio: Don't tune while the card is off
  mmc: core: Add sdio_retune_hold_now() and sdio_retune_release()
  brcmfmac: sdio: Disable auto-tuning around commands expected to fail
  mmc: core: API to temporarily disable retuning for SDIO CRC errors
  Revert "brcmfmac: disable command decode in sdio_aos"
  ARM: ixp4xx: include irqs.h where needed
  ARM: ixp4xx: mark ixp4xx_irq_setup as __init
  ARM: ixp4xx: don't select SERIAL_OF_PLATFORM
  firmware: trusted_foundations: add ARMv7 dependency
  usb: dwc2: Use generic PHY width in params setup
  RDMA/efa: Fix success return value in case of error
  IB/hfi1: Handle port down properly in pio
  IB/hfi1: Handle wakeup of orphaned QPs for pio
  IB/hfi1: Wakeup QPs orphaned on wait list after flush
  IB/hfi1: Use aborts to trigger RC throttling
  IB/hfi1: Create inline to get extended headers
  IB/hfi1: Silence txreq allocation warnings
  IB/hfi1: Avoid hardlockup with flushlist_lock
  KVM: PPC: Book3S HV: Only write DAWR[X] when handling h_set_dawr in real mode
  KVM: PPC: Book3S HV: Fix r3 corruption in h_set_dabr()
  fs/namespace: fix unprivileged mount propagation
  vfs: fsmount: add missing mntget()
  cifs: fix GlobalMid_Lock bug in cifs_reconnect
  SMB3: retry on STATUS_INSUFFICIENT_RESOURCES instead of failing write
  staging: erofs: add requirements field in superblock
  arm64: ssbd: explicitly depend on <linux/prctl.h>
  block: fix page leak when merging to same page
  block: return from __bio_try_merge_page if merging occured in the same page
  Btrfs: fix failure to persist compression property xattr deletion on fsync
  riscv: remove unused barrier defines
  usb: chipidea: udc: workaround for endpoint conflict issue
  MAINTAINERS: Change QCOM repo location
  mmc: mediatek: fix SDIO IRQ detection issue
  mmc: mediatek: fix SDIO IRQ interrupt handle flow
  mmc: core: complete HS400 before checking status
  riscv: mm: synchronize MMU after pte change
  MAINTAINERS: Update my email address to use @kernel.org
  ANDROID: update abi_gki_aarch64.xml for 5.2-rc5
  riscv: dts: add initial board data for the SiFive HiFive Unleashed
  riscv: dts: add initial support for the SiFive FU540-C000 SoC
  dt-bindings: riscv: convert cpu binding to json-schema
  dt-bindings: riscv: sifive: add YAML documentation for the SiFive FU540
  arch: riscv: add support for building DTB files from DT source data
  drm/i915/gvt: ignore unexpected pvinfo write
  lapb: fixed leak of control-blocks.
  tipc: purge deferredq list for each grp member in tipc_group_delete
  ax25: fix inconsistent lock state in ax25_destroy_timer
  neigh: fix use-after-free read in pneigh_get_next
  tcp: fix compile error if !CONFIG_SYSCTL
  hv_sock: Suppress bogus "may be used uninitialized" warnings
  be2net: Fix number of Rx queues used for flow hashing
  net: handle 802.1P vlan 0 packets properly
  Linux 5.2-rc5
  tcp: enforce tcp_min_snd_mss in tcp_mtu_probing()
  tcp: add tcp_min_snd_mss sysctl
  tcp: tcp_fragment() should apply sane memory limits
  tcp: limit payload size of sacked skbs
  Revert "net: phylink: set the autoneg state in phylink_phy_change"
  bpf: fix nested bpf tracepoints with per-cpu data
  bpf: Fix out of bounds memory access in bpf_sk_storage
  vsock/virtio: set SOCK_DONE on peer shutdown
  net: dsa: rtl8366: Fix up VLAN filtering
  net: phylink: set the autoneg state in phylink_phy_change
  powerpc/32: fix build failure on book3e with KVM
  powerpc/booke: fix fast syscall entry on SMP
  powerpc/32s: fix initial setup of segment registers on secondary CPU
  x86/microcode, cpuhotplug: Add a microcode loader CPU hotplug callback
  net: add high_order_alloc_disable sysctl/static key
  tcp: add tcp_tx_skb_cache sysctl
  tcp: add tcp_rx_skb_cache sysctl
  sysctl: define proc_do_static_key()
  hv_netvsc: Set probe mode to sync
  net: sched: flower: don't call synchronize_rcu() on mask creation
  net: dsa: fix warning same module names
  sctp: Free cookie before we memdup a new one
  net: dsa: microchip: Don't try to read stats for unused ports
  qmi_wwan: extend permitted QMAP mux_id value range
  qmi_wwan: avoid RCU stalls on device disconnect when in QMAP mode
  qmi_wwan: add network device usage statistics for qmimux devices
  qmi_wwan: add support for QMAP padding in the RX path
  bpf, x64: fix stack layout of JITed bpf code
  Smack: Restore the smackfsdef mount option and add missing prefixes
  bpf, devmap: Add missing RCU read lock on flush
  bpf, devmap: Add missing bulk queue free
  bpf, devmap: Fix premature entry free on destroying map
  ftrace: Fix NULL pointer dereference in free_ftrace_func_mapper()
  module: Fix livepatch/ftrace module text permissions race
  tracing/uprobe: Fix obsolete comment on trace_uprobe_create()
  tracing/uprobe: Fix NULL pointer dereference in trace_uprobe_create()
  tracing: Make two symbols static
  tracing: avoid build warning with HAVE_NOP_MCOUNT
  tracing: Fix out-of-range read in trace_stack_print()
  gfs2: Fix rounding error in gfs2_iomap_page_prepare
  net: phylink: further mac_config documentation improvements
  nfc: Ensure presence of required attributes in the deactivate_target handler
  btrfs: start readahead also in seed devices
  x86/kasan: Fix boot with 5-level paging and KASAN
  cfg80211: report measurement start TSF correctly
  cfg80211: fix memory leak of wiphy device name
  cfg80211: util: fix bit count off by one
  mac80211: do not start any work during reconfigure flow
  cfg80211: use BIT_ULL in cfg80211_parse_mbssid_data()
  mac80211: only warn once on chanctx_conf being NULL
  mac80211: drop robust management frames from unknown TA
  gpu: ipu-v3: image-convert: Fix image downsize coefficients
  gpu: ipu-v3: image-convert: Fix input bytesperline for packed formats
  gpu: ipu-v3: image-convert: Fix input bytesperline width/height align
  thunderbolt: Implement CIO reset correctly for Titan Ridge
  ARM: davinci: da8xx: specify dma_coherent_mask for lcdc
  ARM: davinci: da850-evm: call regulator_has_full_constraints()
  timekeeping: Repair ktime_get_coarse*() granularity
  Revert "ALSA: hda/realtek - Improve the headset mic for Acer Aspire laptops"
  ANDROID: update abi_gki_aarch64.xml
  mm/devm_memremap_pages: fix final page put race
  PCI/P2PDMA: track pgmap references per resource, not globally
  lib/genalloc: introduce chunk owners
  PCI/P2PDMA: fix the gen_pool_add_virt() failure path
  mm/devm_memremap_pages: introduce devm_memunmap_pages
  drivers/base/devres: introduce devm_release_action()
  mm/vmscan.c: fix trying to reclaim unevictable LRU page
  coredump: fix race condition between collapse_huge_page() and core dumping
  mm/mlock.c: change count_mm_mlocked_page_nr return type
  mm: mmu_gather: remove __tlb_reset_range() for force flush
  fs/ocfs2: fix race in ocfs2_dentry_attach_lock()
  mm/vmscan.c: fix recent_rotated history
  mm/mlock.c: mlockall error for flag MCL_ONFAULT
  scripts/decode_stacktrace.sh: prefix addr2line with $CROSS_COMPILE
  mm/list_lru.c: fix memory leak in __memcg_init_list_lru_node
  mm: memcontrol: don't batch updates of local VM stats and events
  PCI: PM: Skip devices in D0 for suspend-to-idle
  ANDROID: Removed extraneous configs from gki
  powerpc/bpf: use unsigned division instruction for 64-bit operations
  bpf: fix div64 overflow tests to properly detect errors
  bpf: sync BPF_FIB_LOOKUP flag changes with BPF uapi
  bpf: simplify definition of BPF_FIB_LOOKUP related flags
  cifs: add spinlock for the openFileList to cifsInodeInfo
  cifs: fix panic in smb2_reconnect
  x86/fpu: Don't use current->mm to check for a kthread
  KVM: nVMX: use correct clean fields when copying from eVMCS
  vfio-ccw: Destroy kmem cache region on module exit
  block/ps3vram: Use %llu to format sector_t after LBDAF removal
  libata: Extend quirks for the ST1000LM024 drives with NOLPM quirk
  bcache: only set BCACHE_DEV_WB_RUNNING when cached device attached
  bcache: fix stack corruption by PRECEDING_KEY()
  arm64/sve: Fix missing SVE/FPSIMD endianness conversions
  blk-mq: remove WARN_ON(!q->elevator) from blk_mq_sched_free_requests
  blkio-controller.txt: Remove references to CFQ
  block/switching-sched.txt: Update to blk-mq schedulers
  null_blk: remove duplicate check for report zone
  blk-mq: no need to check return value of debugfs_create functions
  io_uring: fix memory leak of UNIX domain socket inode
  block: force select mq-deadline for zoned block devices
  binder: fix possible UAF when freeing buffer
  drm/amdgpu: return 0 by default in amdgpu_pm_load_smu_firmware
  drm/amdgpu: Fix bounds checking in amdgpu_ras_is_supported()
  ANDROID: x86 gki_defconfig: enable DMA_CMA
  ANDROID: Fixed x86 regression
  ANDROID: gki_defconfig: enable DMA_CMA
  Input: synaptics - enable SMBus on ThinkPad E480 and E580
  net: mvpp2: prs: Use the correct helpers when removing all VID filters
  net: mvpp2: prs: Fix parser range for VID filtering
  mlxsw: spectrum: Disallow prio-tagged packets when PVID is removed
  mlxsw: spectrum_buffers: Reduce pool size on Spectrum-2
  selftests: tc_flower: Add TOS matching test
  mlxsw: spectrum_flower: Fix TOS matching
  selftests: mlxsw: Test nexthop offload indication
  mlxsw: spectrum_router: Refresh nexthop neighbour when it becomes dead
  mlxsw: spectrum: Use different seeds for ECMP and LAG hash
  net: tls, correctly account for copied bytes with multiple sk_msgs
  vrf: Increment Icmp6InMsgs on the original netdev
  cpuset: restore sanity to cpuset_cpus_allowed_fallback()
  net: ethtool: Allow matching on vlan DEI bit
  linux-next: DOC: RDS: Fix a typo in rds.txt
  x86/kgdb: Return 0 from kgdb_arch_set_breakpoint()
  mpls: fix af_mpls dependencies for real
  selinux: fix a missing-check bug in selinux_sb_eat_lsm_opts()
  selinux: fix a missing-check bug in selinux_add_mnt_opt( )
  arm64: tlbflush: Ensure start/end of address range are aligned to stride
  usb: typec: Make sure an alt mode exist before getting its partner
  KVM: arm/arm64: vgic: Fix kvm_device leak in vgic_its_destroy
  KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST
  KVM: arm64: Implement vq_present() as a macro
  xdp: check device pointer before clearing
  bpf: net: Set sk_bpf_storage back to NULL for cloned sk
  Btrfs: fix race between block group removal and block group allocation
  clocksource/drivers/arm_arch_timer: Don't trace count reader functions
  i2c: pca-platform: Fix GPIO lookup code
  thunderbolt: Make sure device runtime resume completes before taking domain lock
  drm: add fallback override/firmware EDID modes workaround
  i2c: acorn: fix i2c warning
  arm64: Don't unconditionally add -Wno-psabi to KBUILD_CFLAGS
  drm/edid: abstract override/firmware EDID retrieval
  platform/mellanox: mlxreg-hotplug: Add devm_free_irq call to remove flow
  platform/x86: mlx-platform: Fix parent device in i2c-mux-reg device registration
  platform/x86: intel-vbtn: Report switch events when event wakes device
  platform/x86: asus-wmi: Only Tell EC the OS will handle display hotkeys from asus_nb_wmi
  ARM: mvebu_v7_defconfig: fix Ethernet on Clearfog
  x86/resctrl: Prevent NULL pointer dereference when local MBM is disabled
  x86/resctrl: Don't stop walking closids when a locksetup group is found
  iommu/arm-smmu: Avoid constant zero in TLBI writes
  drm/i915/perf: fix whitelist on Gen10+
  drm/i915/sdvo: Implement proper HDMI audio support for SDVO
  drm/i915: Fix per-pixel alpha with CCS
  drm/i915/dmc: protect against reading random memory
  drm/i915/dsi: Use a fuzzy check for burst mode clock check
  Input: imx_keypad - make sure keyboard can always wake up system
  selinux: log raw contexts as untrusted strings
  ptrace: restore smp_rmb() in __ptrace_may_access()
  IB/hfi1: Correct tid qp rcd to match verbs context
  IB/hfi1: Close PSM sdma_progress sleep window
  IB/hfi1: Validate fault injection opcode user input
  geneve: Don't assume linear buffers in error handler
  vxlan: Don't assume linear buffers in error handler
  net: openvswitch: do not free vport if register_netdevice() is failed.
  net: correct udp zerocopy refcnt also when zerocopy only on append
  drm/amdgpu/{uvd,vcn}: fetch ring's read_ptr after alloc
  ovl: fix wrong flags check in FS_IOC_FS[SG]ETXATTR ioctls
  riscv: Fix udelay in RV32.
  drm/vmwgfx: fix a warning due to missing dma_parms
  riscv: export pm_power_off again
  drm/vmwgfx: Honor the sg list segment size limitation
  RISC-V: defconfig: enable clocks, serial console
  drm/vmwgfx: Use the backdoor port if the HB port is not available
  bpf: lpm_trie: check left child of last leftmost node for NULL
  Revert "fuse: require /dev/fuse reads to have enough buffer capacity"
  ALSA: ice1712: Check correct return value to snd_i2c_sendbytes (EWS/DMX 6Fire)
  ALSA: oxfw: allow PCM capture for Stanton SCS.1m
  ALSA: firewire-motu: fix destruction of data for isochronous resources
  s390/ctl_reg: mark __ctl_set_bit and __ctl_clear_bit as __always_inline
  s390/boot: disable address-of-packed-member warning
  ANDROID: update gki aarch64 ABI representation
  cgroup: Fix css_task_iter_advance_css_set() cset skip condition
  drm/panfrost: Require the simple_ondemand governor
  drm/panfrost: make devfreq optional again
  drm/gem_shmem: Use a writecombine mapping for ->vaddr
  mmc: sdhi: disallow HS400 for M3-W ES1.2, RZ/G2M, and V3H
  ASoC: Intel: sst: fix kmalloc call with wrong flags
  ASoC: core: Fix deadlock in snd_soc_instantiate_card()
  cgroup/bfq: revert bfq.weight symlink change
  ARM: dts: am335x phytec boards: Fix cd-gpios active level
  ARM: dts: dra72x: Disable usb4_tm target module
  nfp: ensure skb network header is set for packet redirect
  tcp: fix undo spurious SYNACK in passive Fast Open
  mpls: fix af_mpls dependencies
  ibmvnic: Fix unchecked return codes of memory allocations
  ibmvnic: Refresh device multicast list after reset
  ibmvnic: Do not close unopened driver during reset
  mpls: fix warning with multi-label encap
  net: phy: rename Asix Electronics PHY driver
  ipv6: flowlabel: fl6_sock_lookup() must use atomic_inc_not_zero
  net: ipv4: fib_semantics: fix uninitialized variable
  Input: iqs5xx - get axis info before calling input_mt_init_slots()
  Linux 5.2-rc4
  drm: panel-orientation-quirks: Add quirk for GPD MicroPC
  drm: panel-orientation-quirks: Add quirk for GPD pocket2
  counter/ftm-quaddec: Add missing dependencies in Kconfig
  staging: iio: adt7316: Fix build errors when GPIOLIB is not set
  x86/fpu: Update kernel's FPU state before using for the fsave header
  MAINTAINERS: Karthikeyan Ramasubramanian is MIA
  i2c: xiic: Add max_read_len quirk
  ANDROID: update ABI representation
  gpio: pca953x: hack to fix 24 bit gpio expanders
  net/mlx5e: Support tagged tunnel over bond
  net/mlx5e: Avoid detaching non-existing netdev under switchdev mode
  net/mlx5e: Fix source port matching in fdb peer flow rule
  net/mlx5e: Replace reciprocal_scale in TX select queue function
  net/mlx5e: Add ndo_set_feature for uplink representor
  net/mlx5: Avoid reloading already removed devices
  net/mlx5: Update pci error handler entries and command translation
  RAS/CEC: Convert the timer callback to a workqueue
  RAS/CEC: Fix binary search function
  x86/mm/KASLR: Compute the size of the vmemmap section properly
  can: purge socket error queue on sock destruct
  can: flexcan: Remove unneeded registration message
  can: af_can: Fix error path of can_init()
  can: m_can: implement errata "Needless activation of MRAF irq"
  can: mcp251x: add support for mcp25625
  dt-bindings: can: mcp251x: add mcp25625 support
  can: xilinx_can: use correct bittiming_const for CAN FD core
  can: flexcan: fix timeout when set small bitrate
  can: usb: Kconfig: Remove duplicate menu entry
  lockref: Limit number of cmpxchg loop retries
  uaccess: add noop untagged_addr definition
  x86/insn-eval: Fix use-after-free access to LDT entry
  kbuild: use more portable 'command -v' for cc-cross-prefix
  s390/unwind: correct stack switching during unwind
  scsi: hpsa: correct ioaccel2 chaining
  btrfs: Always trim all unallocated space in btrfs_trim_free_extents
  netfilter: ipv6: nf_defrag: accept duplicate fragments again
  powerpc/32s: fix booting with CONFIG_PPC_EARLY_DEBUG_BOOTX
  drm/meson: fix G12A primary plane disabling
  drm/meson: fix primary plane disabling
  drm/meson: fix G12A HDMI PLL settings for 4K60 1000/1001 variations
  block, bfq: add weight symlink to the bfq.weight cgroup parameter
  cgroup: let a symlink too be created with a cftype file
  powerpc/64s: __find_linux_pte() synchronization vs pmdp_invalidate()
  powerpc/64s: Fix THP PMD collapse serialisation
  powerpc: Fix kexec failure on book3s/32
  drm/nouveau/secboot/gp10[2467]: support newer FW to fix SEC2 failures on some boards
  drm/nouveau/secboot: enable loading of versioned LS PMU/SEC2 ACR msgqueue FW
  drm/nouveau/secboot: split out FW version-specific LS function pointers
  drm/nouveau/secboot: pass max supported FW version to LS load funcs
  drm/nouveau/core: support versioned firmware loading
  drm/nouveau/core: pass subdev into nvkm_firmware_get, rather than device
  block: free sched's request pool in blk_cleanup_queue
  bpf: expand section tests for test_section_names
  bpf: more msg_name rewrite tests to test_sock_addr
  bpf, bpftool: enable recvmsg attach types
  bpf, libbpf: enable recvmsg attach types
  bpf: sync tooling uapi header
  bpf: fix unconnected udp hooks
  vfio/mdev: Synchronize device create/remove with parent removal
  vfio/mdev: Avoid creating sysfs remove file on stale device removal
  pktgen: do not sleep with the thread lock held.
  net: mvpp2: Use strscpy to handle stat strings
  net: rds: fix memory leak in rds_ib_flush_mr_pool
  ipv6: fix EFAULT on sendto with icmpv6 and hdrincl
  ipv6: use READ_ONCE() for inet->hdrincl as in ipv4
  soundwire: intel: set dai min and max channels correctly
  soundwire: stream: fix bad unlock balance
  x86/fpu: Use fault_in_pages_writeable() for pre-faulting
  nvme-rdma: use dynamic dma mapping per command
  nvme: Fix u32 overflow in the number of namespace list calculation
  vfio/mdev: Improve the create/remove sequence
  SoC: rt274: Fix internal jack assignment in set_jack callback
  ALSA: hdac: fix memory release for SST and SOF drivers
  ASoC: SOF: Intel: hda: use the defined ppcap functions
  ASoC: core: move DAI pre-links initiation to snd_soc_instantiate_card
  ASoC: Intel: cht_bsw_rt5672: fix kernel oops with platform_name override
  ASoC: Intel: cht_bsw_nau8824: fix kernel oops with platform_name override
  ASoC: Intel: bytcht_es8316: fix kernel oops with platform_name override
  ASoC: Intel: cht_bsw_max98090: fix kernel oops with platform_name override
  Revert "gfs2: Replace gl_revokes with a GLF flag"
  arm64: Silence gcc warnings about arch ABI drift
  parisc: Fix crash due alternative coding for NP iopdir_fdc bit
  parisc: Use lpa instruction to load physical addresses in driver code
  parisc: configs: Remove useless UEVENT_HELPER_PATH
  parisc: Use implicit space register selection for loading the coherence index of I/O pdirs
  usb: gadget: udc: lpc32xx: fix return value check in lpc32xx_udc_probe()
  usb: gadget: dwc2: fix zlp handling
  usb: dwc2: Set actual frame number for completed ISOC transfer for none DDMA
  usb: gadget: udc: lpc32xx: allocate descriptor with GFP_ATOMIC
  usb: gadget: fusb300_udc: Fix memory leak of fusb300->ep[i]
  usb: phy: mxs: Disable external charger detect in mxs_phy_hw_init()
  usb: dwc2: Fix DMA cache alignment issues
  usb: dwc2: host: Fix wMaxPacketSize handling (fix webcam regression)
  ARM64: trivial: s/TIF_SECOMP/TIF_SECCOMP/ comment typo fix
  drm/komeda: Potential error pointer dereference
  drm/komeda: remove set but not used variable 'kcrtc'
  x86/CPU: Add more Icelake model numbers
  hwmon: (pmbus/core) Treat parameters as paged if on multiple pages
  hwmon: (pmbus/core) mutex_lock write in pmbus_set_samples
  hwmon: (core) add thermal sensors only if dev->of_node is present
  Revert "fib_rules: return 0 directly if an exactly same rule exists when NLM_F_EXCL not supplied"
  net: aquantia: fix wol configuration not applied sometimes
  ethtool: fix potential userspace buffer overflow
  Fix memory leak in sctp_process_init
  net: rds: fix memory leak when unload rds_rdma
  ipv6: fix the check before getting the cookie in rt6_get_cookie
  ipv4: not do cache for local delivery if bc_forwarding is enabled
  selftests: vm: Fix test build failure when built by itself
  tools: bpftool: Fix JSON output when lookup fails
  mmc: also set max_segment_size in the device
  mtip32xx: also set max_segment_size in the device
  rsxx: don't call dma_set_max_seg_size
  nvme-pci: don't limit DMA segement size
  s390/qeth: handle error when updating TX queue count
  s390/qeth: fix VLAN attribute in bridge_hostnotify udev event
  s390/qeth: check dst entry before use
  s390/qeth: handle limited IPv4 broadcast in L3 TX path
  ceph: fix error handling in ceph_get_caps()
  ceph: avoid iput_final() while holding mutex or in dispatch thread
  ceph: single workqueue for inode related works
  cgroup: css_task_iter_skip()'d iterators must be advanced before accessed
  drm/amd/amdgpu: add RLC firmware to support raven1 refresh
  drm/amd/powerplay: add set_power_profile_mode for raven1_refresh
  drm/amdgpu: fix ring test failure issue during s3 in vce 3.0 (V2)
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 450
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 449
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 448
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 446
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 445
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 444
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 443
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 442
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 441
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 440
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 438
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 437
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 436
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 435
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 434
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 433
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 432
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 431
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 430
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 429
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 428
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 426
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 424
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 423
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 422
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 421
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 420
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 419
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 418
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 417
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 416
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 414
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 412
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 411
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 410
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 409
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 408
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 407
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 406
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 405
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 404
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 403
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 402
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 401
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 400
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 399
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 398
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 397
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 396
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 395
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 394
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 393
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 392
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 391
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 390
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 389
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 388
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 387
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 380
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 378
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 377
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 376
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 375
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 373
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 372
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 371
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 370
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 367
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 365
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 364
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 363
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 362
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 354
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 353
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 352
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 351
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 350
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 349
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 348
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 347
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 346
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 345
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 344
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 343
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 342
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 341
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 340
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 339
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 338
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 336
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 335
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 334
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 333
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 332
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 330
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 328
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 326
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 325
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 324
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 323
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 322
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 321
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 320
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 316
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 315
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 314
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 313
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 312
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 311
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 310
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 309
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 308
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 307
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 305
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 301
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 300
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 299
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 297
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 296
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 295
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 294
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 292
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 291
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 290
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 289
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 288
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 287
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 286
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 285
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 284
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 283
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 282
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 281
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 280
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 278
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 277
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 276
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 275
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 274
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 273
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 272
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 271
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 270
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 269
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 268
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 267
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 266
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 265
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 264
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 263
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 262
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 260
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 258
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 257
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 256
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 254
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 253
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 252
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 251
  lib/test_stackinit: Handle Clang auto-initialization pattern
  block: Drop unlikely before IS_ERR(_OR_NULL)
  xen/swiotlb: don't initialize swiotlb twice on arm64
  s390/mm: fix address space detection in exception handling
  HID: logitech-dj: Fix 064d:c52f receiver support
  Revert "HID: core: Call request_module before doing device_add"
  Revert "HID: core: Do not call request_module() in async context"
  Revert "HID: Increase maximum report size allowed by hid_field_extract()"
  tests: fix pidfd-test compilation
  signal: improve comments
  samples: fix pidfd-metadata compilation
  arm64: arch_timer: mark functions as __always_inline
  arm64: smp: Moved cpu_logical_map[] to smp.h
  arm64: cpufeature: Fix missing ZFR0 in __read_sysreg_by_encoding()
  selftests/bpf: move test_lirc_mode2_user to TEST_GEN_PROGS_EXTENDED
  USB: Fix chipmunk-like voice when using Logitech C270 for recording audio.
  USB: usb-storage: Add new ID to ums-realtek
  udmabuf: actually unmap the scatterlist
  net: fix indirect calls helpers for ptype list hooks.
  net: ipvlan: Fix ipvlan device tso disabled while NETIF_F_IP_CSUM is set
  scsi: smartpqi: unlock on error in pqi_submit_raid_request_synchronous()
  scsi: ufs: Check that space was properly alloced in copy_query_response
  udp: only choose unbound UDP socket for multicast when not in a VRF
  net/tls: replace the sleeping lock around RX resync with a bit lock
  Revert "net/tls: avoid NULL-deref on resync during device removal"
  block: aoe: no need to check return value of debugfs_create functions
  net: dsa: sja1105: Fix link speed not working at 100 Mbps and below
  net: phylink: avoid reducing support mask
  scripts/checkstack.pl: Fix arm64 wrong or unknown architecture
  kbuild: tar-pkg: enable communication with jobserver
  kconfig: tests: fix recursive inclusion unit test
  kbuild: teach kselftest-merge to find nested config files
  nvmet: fix data_len to 0 for bdev-backed write_zeroes
  MAINTAINERS: Hand over skd maintainership
  ASoC: sun4i-i2s: Add offset to RX channel select
  ASoC: sun4i-i2s: Fix sun8i tx channel offset mask
  ASoC: max98090: remove 24-bit format support if RJ is 0
  ASoC: da7219: Fix build error without CONFIG_I2C
  ASoC: SOF: Intel: hda: Fix COMPILE_TEST build error
  drm/arm/hdlcd: Allow a bit of clock tolerance
  drm/arm/hdlcd: Actually validate CRTC modes
  drm/arm/mali-dp: Add a loop around the second set CVAL and try 5 times
  drm/komeda: fixing of DMA mapping sg segment warning
  netfilter: ipv6: nf_defrag: fix leakage of unqueued fragments
  habanalabs: Read upper bits of trace buffer from RWPHI
  arm64: arch_k3: Fix kconfig dependency warning
  drm: don't block fb changes for async plane updates
  drm/vc4: fix fb references in async update
  drm/msm: fix fb references in async update
  drm/amd: fix fb references in async update
  drm/rockchip: fix fb references in async update
  xen-blkfront: switch kcalloc to kvcalloc for large array allocation
  drm/mediatek: call mtk_dsi_stop() after mtk_drm_crtc_atomic_disable()
  drm/mediatek: clear num_pipes when unbind driver
  drm/mediatek: call drm_atomic_helper_shutdown() when unbinding driver
  drm/mediatek: unbind components in mtk_drm_unbind()
  drm/mediatek: fix unbind functions
  net: sfp: read eeprom in maximum 16 byte increments
  selftests: set sysctl bc_forwarding properly in router_broadcast.sh
  ANDROID: update gki aarch64 ABI representation
  net: ethernet: mediatek: Use NET_IP_ALIGN to judge if HW RX_2BYTE_OFFSET is enabled
  net: ethernet: mediatek: Use hw_feature to judge if HWLRO is supported
  net: ethernet: ti: cpsw_ethtool: fix ethtool ring param set
  ANDROID: gki_defconfig: Enable CMA, SLAB_FREELIST (RANDOM and HARDENED) on x86
  bpf: udp: Avoid calling reuseport's bpf_prog from udp_gro
  bpf: udp: ipv6: Avoid running reuseport's bpf_prog from __udp6_lib_err
  rcu: locking and unlocking need to always be at least barriers
  ANDROID: gki_defconfig: enable SLAB_FREELIST_RANDOM, SLAB_FREELIST_HARDENED
  ANDROID: gki_defconfig: enable CMA and increase CMA_AREAS
  ASoC: SOF: fix DSP oops definitions in FW ABI
  ASoC: hda: fix unbalanced codec dev refcount for HDA_DEV_ASOC
  ASoC: SOF: ipc: replace fw ready bitfield with explicit bit ordering
  ASoC: SOF: bump to ABI 3.6
  ASoC: SOF: soundwire: add initial soundwire support
  ASoC: SOF: uapi: mirror firmware changes
  ASoC: Intel: Baytrail: add quirk for Aegex 10 (RU2) tablet
  xfs: inode btree scrubber should calculate im_boffset correctly
  mmc: sdhci_am654: Fix SLOTTYPE write
  usb: typec: ucsi: ccg: fix memory leak in do_flash
  ANDROID: update gki aarch64 ABI representation
  habanalabs: Fix virtual address access via debugfs for 2MB pages
  drm/komeda: Constify the usage of komeda_component/pipeline/dev_funcs
  x86/power: Fix 'nosmt' vs hibernation triple fault during resume
  mm/vmalloc: Avoid rare case of flushing TLB with weird arguments
  mm/vmalloc: Fix calculation of direct map addr range
  PM: sleep: Add kerneldoc comments to some functions
  drm/i915/gvt: save RING_HEAD into vreg when vgpu switched out
  sparc: perf: fix updated event period in response to PERF_EVENT_IOC_PERIOD
  mdesc: fix a missing-check bug in get_vdev_port_node_info()
  drm/i915/gvt: add F_CMD_ACCESS flag for wa regs
  sparc64: Fix regression in non-hypervisor TLB flush xcall
  packet: unconditionally free po->rollover
  Update my email address
  net: hns: Fix loopback test failed at copper ports
  Linux 5.2-rc3
  net: dsa: mv88e6xxx: avoid error message on remove from VLAN 0
  mm, compaction: make sure we isolate a valid PFN
  include/linux/generic-radix-tree.h: fix kerneldoc comment
  kernel/signal.c: trace_signal_deliver when signal_group_exit
  drivers/iommu/intel-iommu.c: fix variable 'iommu' set but not used
  spdxcheck.py: fix directory structures
  kasan: initialize tag to 0xff in __kasan_kmalloc
  z3fold: fix sheduling while atomic
  scripts/gdb: fix invocation when CONFIG_COMMON_CLK is not set
  mm/gup: continue VM_FAULT_RETRY processing even for pre-faults
  ocfs2: fix error path kobject memory leak
  memcg: make it work on sparse non-0-node systems
  mm, memcg: consider subtrees in memory.events
  prctl_set_mm: downgrade mmap_sem to read lock
  prctl_set_mm: refactor checks from validate_prctl_map
  kernel/fork.c: make max_threads symbol static
  arch/arm/boot/compressed/decompress.c: fix build error due to lz4 changes
  arch/parisc/configs/c8000_defconfig: remove obsoleted CONFIG_DEBUG_SLAB_LEAK
  mm/vmalloc.c: fix typo in comment
  lib/sort.c: fix kernel-doc notation warnings
  mm: fix Documentation/vm/hmm.rst Sphinx warnings
  treewide: fix typos of SPDX-License-Identifier
  crypto: ux500 - fix license comment syntax error
  MAINTAINERS: add I2C DT bindings to ARM platforms
  MAINTAINERS: add DT bindings to i2c drivers
  mwifiex: Fix heap overflow in mwifiex_uap_parse_tail_ies()
  iwlwifi: mvm: change TLC config cmd sent by rs to be async
  iwlwifi: Fix double-free problems in iwl_req_fw_callback()
  iwlwifi: fix AX201 killer sku loading firmware issue
  iwlwifi: print fseq info upon fw assert
  iwlwifi: clear persistence bit according to device family
  iwlwifi: fix load in rfkill flow for unified firmware
  iwlwifi: mvm: remove d3_sram debugfs file
  bpf, riscv: clear high 32 bits for ALU32 add/sub/neg/lsh/rsh/arsh
  libbpf: Return btf_fd for load_sk_storage_btf
  HID: a4tech: fix horizontal scrolling
  HID: hyperv: Add a module description line
  net: dsa: sja1105: Don't store frame type in skb->cb
  block: print offending values when cloned rq limits are exceeded
  blk-mq: Document the blk_mq_hw_queue_to_node() arguments
  blk-mq: Fix spelling in a source code comment
  block: Fix bsg_setup_queue() kernel-doc header
  block: Fix rq_qos_wait() kernel-doc header
  block: Fix blk_mq_*_map_queues() kernel-doc headers
  block: Fix throtl_pending_timer_fn() kernel-doc header
  block: Convert blk_invalidate_devt() header into a non-kernel-doc header
  block/partitions/ldm: Convert a kernel-doc header into a non-kernel-doc header
  leds: avoid flush_work in atomic context
  cgroup: Include dying leaders with live threads in PROCS iterations
  cgroup: Implement css_task_iter_skip()
  cgroup: Call cgroup_release() before __exit_signal()
  netfilter: nf_tables: fix module autoload with inet family
  Revert "lockd: Show pid of lockd for remote locks"
  ALSA: hda/realtek - Update headset mode for ALC256
  fs/adfs: fix filename fixup handling for "/" and "//" names
  fs/adfs: move append_filetype_suffix() into adfs_object_fixup()
  fs/adfs: remove truncated filename hashing
  fs/adfs: factor out filename fixup
  fs/adfs: factor out object fixups
  fs/adfs: factor out filename case lowering
  fs/adfs: factor out filename comparison
  ovl: doc: add non-standard corner cases
  pstore/ram: Run without kernel crash dump region
  MAINTAINERS: add Vasily Gorbik and Christian Borntraeger for s390
  MAINTAINERS: Farewell Martin Schwidefsky
  pstore: Set tfm to NULL on free_buf_for_compression
  nds32: add new emulations for floating point instruction
  nds32: Avoid IEX status being incorrectly modified
  math-emu: Use statement expressions to fix Wshift-count-overflow warning
  net: correct zerocopy refcnt with udp MSG_MORE
  ethtool: Check for vlan etype or vlan tci when parsing flow_rule
  net: don't clear sock->sk early to avoid trouble in strparser
  net-gro: fix use-after-free read in napi_gro_frags()
  net: dsa: tag_8021q: Create a stable binary format
  net: dsa: tag_8021q: Change order of rx_vid setup
  net: mvpp2: fix bad MVPP2_TXQ_SCHED_TOKEN_CNTR_REG queue value
  docs cgroups: add another example size for hugetlb
  NFSv4.1: Fix bug only first CB_NOTIFY_LOCK is handled
  NFSv4.1: Again fix a race where CB_NOTIFY_LOCK fails to wake a waiter
  ipv4: tcp_input: fix stack out of bounds when parsing TCP options.
  mlxsw: spectrum: Prevent force of 56G
  mlxsw: spectrum_acl: Avoid warning after identical rules insertion
  SUNRPC: Fix a use after free when a server rejects the RPCSEC_GSS credential
  net: dsa: mv88e6xxx: fix handling of upper half of STATS_TYPE_PORT
  SUNRPC fix regression in umount of a secure mount
  r8169: fix MAC address being lost in PCI D3
  treewide: Add SPDX license identifier - Kbuild
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 225
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 224
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 223
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 222
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 221
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 220
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 218
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 217
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 216
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 215
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 214
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 213
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 211
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 210
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 209
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 207
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 206
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 203
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 201
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 200
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 199
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 198
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 197
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 195
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 194
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 193
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 191
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 190
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 188
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 185
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 183
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 182
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 180
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 179
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 178
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 177
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 176
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 175
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 174
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 173
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 172
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 171
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 170
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 167
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 166
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 165
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 164
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 162
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 161
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 160
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 159
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 158
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 157
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 156
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 155
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 154
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 153
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 152
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 151
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 150
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 149
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 148
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 147
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 145
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 144
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 143
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 142
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 140
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 139
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 138
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 137
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 136
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 135
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 133
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 132
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 131
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 130
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 129
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 128
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 127
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 126
  net: core: support XDP generic on stacked devices.
  netvsc: unshare skb in VF rx handler
  udp: Avoid post-GRO UDP checksum recalculation
  nvme-tcp: fix queue mapping when queue count is limited
  nvme-rdma: fix queue mapping when queue count is limited
  fpga: zynqmp-fpga: Correctly handle error pointer
  selftests: vm: install test_vmalloc.sh for run_vmtests
  userfaultfd: selftest: fix compiler warning
  kselftest/cgroup: fix incorrect test_core skip
  kselftest/cgroup: fix unexpected testing failure on test_core
  kselftest/cgroup: fix unexpected testing failure on test_memcontrol
  xtensa: Fix section mismatch between memblock_reserve and mem_reserve
  signal/ptrace: Don't leak unitialized kernel memory with PTRACE_PEEK_SIGINFO
  mwifiex: Abort at too short BSS descriptor element
  mwifiex: Fix possible buffer overflows at parsing bss descriptor
  drm/i915/gvt: Assign NULL to the pointer after memory free.
  drm/i915/gvt: Check if cur_pt_type is valid
  x86: intel_epb: Do not build when CONFIG_PM is unset
  crypto: hmac - fix memory leak in hmac_init_tfm()
  crypto: jitterentropy - change back to module_init()
  ARM: dts: Drop bogus CLKSEL for timer12 on dra7
  KVM: PPC: Book3S HV: Restore SPRG3 in kvmhv_p9_guest_entry()
  KVM: PPC: Book3S HV: Fix lockdep warning when entering guest on POWER9
  KVM: PPC: Book3S HV: XIVE: Fix page offset when clearing ESB pages
  KVM: PPC: Book3S HV: XIVE: Take the srcu read lock when accessing memslots
  KVM: PPC: Book3S HV: XIVE: Do not clear IRQ data of passthrough interrupts
  KVM: PPC: Book3S HV: XIVE: Introduce a new mutex for the XIVE device
  drm/i915/gvt: Fix cmd length of VEB_DI_IECP
  drm/i915/gvt: refine ggtt range validation
  drm/i915/gvt: Fix vGPU CSFE_CHICKEN1_REG mmio handler
  drm/i915/gvt: Fix GFX_MODE handling
  drm/i915/gvt: Update force-to-nonpriv register whitelist
  drm/i915/gvt: Initialize intel_gvt_gtt_entry in stack
  ima: show rules with IMA_INMASK correctly
  evm: check hash algorithm passed to init_desc()
  scsi: libsas: delete sas port if expander discover failed
  scsi: libsas: only clear phy->in_shutdown after shutdown event done
  scsi: scsi_dh_alua: Fix possible null-ptr-deref
  scsi: smartpqi: properly set both the DMA mask and the coherent DMA mask
  scsi: zfcp: fix to prevent port_remove with pure auto scan LUNs (only sdevs)
  scsi: zfcp: fix missing zfcp_port reference put on -EBUSY from port_remove
  scsi: libcxgbi: add a check for NULL pointer in cxgbi_check_route()
  net: phy: dp83867: Set up RGMII TX delay
  net: phy: dp83867: do not call config_init twice
  net: phy: dp83867: increase SGMII autoneg timer duration
  net: phy: dp83867: fix speed 10 in sgmii mode
  net: phy: marvell10g: report if the PHY fails to boot firmware
  net: phylink: ensure consistent phy interface mode
  cgroup: Use css_tryget() instead of css_tryget_online() in task_get_css()
  blk-mq: Fix memory leak in error handling
  usbip: usbip_host: fix stub_dev lock context imbalance regression
  net: sh_eth: fix mdio access in sh_eth_close() for R-Car Gen2 and RZ/A1 SoCs
  MIPS: uprobes: remove set but not used variable 'epc'
  s390/crypto: fix possible sleep during spinlock aquired
  MIPS: pistachio: Build uImage.gz by default
  MIPS: Make virt_addr_valid() return bool
  MIPS: Bounds check virt_addr_valid
  CIFS: cifs_read_allocate_pages: don't iterate through whole page array on ENOMEM
  RDMA/efa: Remove MAYEXEC flag check from mmap flow
  mlx5: avoid 64-bit division
  IB/hfi1: Validate page aligned for a given virtual address
  IB/{qib, hfi1, rdmavt}: Correct ibv_devinfo max_mr value
  IB/hfi1: Insure freeze_work work_struct is canceled on shutdown
  IB/rdmavt: Fix alloc_qpn() WARN_ON()
  ASoC: sun4i-codec: fix first delay on Speaker
  drm/amdgpu: reserve stollen vram for raven series
  media: venus: hfi_parser: fix a regression in parser
  selftests: bpf: fix compiler warning in flow_dissector test
  arm64: use the correct function type for __arm64_sys_ni_syscall
  arm64: use the correct function type in SYSCALL_DEFINE0
  arm64: fix syscall_fn_t type
  block: don't protect generic_make_request_checks with blk_queue_enter
  block: move blk_exit_queue into __blk_release_queue
  selftests: bpf: complete sub-register zero extension checks
  selftests: bpf: move sub-register zero extension checks into subreg.c
  ovl: detect overlapping layers
  drm/i915/icl: Add WaDisableBankHangMode
  ALSA: fireface: Use ULL suffixes for 64-bit constants
  signal/arm64: Use force_sig not force_sig_fault for SIGKILL
  nl80211: fill all policy .type entries
  mac80211: free peer keys before vif down in mesh
  ANDROID: ABI out: Use the extension .xml rather then .out
  drm/mediatek: respect page offset for PRIME mmap calls
  drm/mediatek: adjust ddp clock control flow
  ALSA: hda/realtek - Improve the headset mic for Acer Aspire laptops
  KVM: PPC: Book3S HV: XIVE: Fix the enforced limit on the vCPU identifier
  KVM: PPC: Book3S HV: XIVE: Do not test the EQ flag validity when resetting
  KVM: PPC: Book3S HV: XIVE: Clear file mapping when device is released
  KVM: PPC: Book3S HV: Don't take kvm->lock around kvm_for_each_vcpu
  KVM: PPC: Book3S: Use new mutex to synchronize access to rtas token list
  KVM: PPC: Book3S HV: Use new mutex to synchronize MMU setup
  KVM: PPC: Book3S HV: Avoid touching arch.mmu_ready in XIVE release functions
  Revert "drivers: thermal: tsens: Add new operation to check if a sensor is enabled"
  net/mlx5e: Disable rxhash when CQE compress is enabled
  net/mlx5e: restrict the real_dev of vlan device is the same as uplink device
  net/mlx5: Allocate root ns memory using kzalloc to match kfree
  net/mlx5: Avoid double free in fs init error unwinding path
  net/mlx5: Avoid double free of root ns in the error flow path
  net/mlx5: Fix error handling in mlx5_load()
  Documentation: net-sysfs: Remove duplicate PHY device documentation
  llc: fix skb leak in llc_build_and_send_ui_pkt()
  selftests: pmtu: Fix encapsulating device in pmtu_vti6_link_change_mtu
  dfs_cache: fix a wrong use of kfree in flush_cache_ent()
  fs/cifs/smb2pdu.c: fix buffer free in SMB2_ioctl_free
  cifs: fix memory leak of pneg_inbuf on -EOPNOTSUPP ioctl case
  xenbus: Avoid deadlock during suspend due to open transactions
  xen/pvcalls: Remove set but not used variable
  tracing: Avoid memory leak in predicate_parse()
  habanalabs: fix bug in checking huge page optimization
  mmc: sdhci: Fix SDIO IRQ thread deadlock
  dpaa_eth: use only online CPU portals
  net: mvneta: Fix err code path of probe
  net: stmmac: Do not output error on deferred probe
  Btrfs: fix race updating log root item during fsync
  Btrfs: fix wrong ctime and mtime of a directory after log replay
  ARC: [plat-hsdk] Get rid of inappropriate PHY settings
  ARC: [plat-hsdk]: Add support of Vivante GPU
  ARC: [plat-hsdk]: enable creg-gpio controller
  Btrfs: fix fsync not persisting changed attributes of a directory
  btrfs: qgroup: Check bg while resuming relocation to avoid NULL pointer dereference
  btrfs: reloc: Also queue orphan reloc tree for cleanup to avoid BUG_ON()
  Btrfs: incremental send, fix emission of invalid clone operations
  Btrfs: incremental send, fix file corruption when no-holes feature is enabled
  btrfs: correct zstd workspace manager lock to use spin_lock_bh()
  btrfs: Ensure replaced device doesn't have pending chunk allocation
  ia64: fix build errors by exporting paddr_to_nid()
  ASoC: SOF: Intel: hda: fix the hda init chip
  ASoC: SOF: ipc: fix a race, leading to IPC timeouts
  ASoC: SOF: control: correct the copy size for bytes kcontrol put
  ASoC: SOF: pcm: remove warning - initialize workqueue on open
  ASoC: SOF: pcm: clear hw_params_upon_resume flag correctly
  ASoC: SOF: core: fix error handling with the probe workqueue
  ASoC: SOF: core: remove snd_soc_unregister_component in case of error
  ASoC: SOF: core: remove DSP after unregistering machine driver
  ASoC: soc-core: fixup references at soc_cleanup_card_resources()
  arm64/module: revert to unsigned interpretation of ABS16/32 relocations
  KVM: s390: Do not report unusabled IDs via KVM_CAP_MAX_VCPU_ID
  kvm: fix compile on s390 part 2
  xprtrdma: Use struct_size() in kzalloc()
  tools headers UAPI: Sync kvm.h headers with the kernel sources
  perf record: Fix s390 missing module symbol and warning for non-root users
  perf machine: Read also the end of the kernel
  perf test vmlinux-kallsyms: Ignore aliases to _etext when searching on kallsyms
  perf session: Add missing swap ops for namespace events
  perf namespace: Protect reading thread's namespace
  tools headers UAPI: Sync drm/drm.h with the kernel
  s390/crypto: fix gcm-aes-s390 selftest failures
  s390/zcrypt: Fix wrong dispatching for control domain CPRBs
  s390/pci: fix assignment of bus resources
  s390/pci: fix struct definition for set PCI function
  s390: mark __cpacf_check_opcode() and cpacf_query_func() as __always_inline
  s390: add unreachable() to dump_fault_info() to fix -Wmaybe-uninitialized
  tools headers UAPI: Sync drm/i915_drm.h with the kernel
  tools headers UAPI: Sync linux/fs.h with the kernel
  tools headers UAPI: Sync linux/sched.h with the kernel
  tools arch x86: Sync asm/cpufeatures.h with the with the kernel
  tools include UAPI: Update copy of files related to new fspick, fsmount, fsconfig, fsopen, move_mount and open_tree syscalls
  perf arm64: Fix mksyscalltbl when system kernel headers are ahead of the kernel
  perf data: Fix 'strncat may truncate' build failure with recent gcc
  arm64: Fix the arm64_personality() syscall wrapper redirection
  rtw88: Make some symbols static
  rtw88: avoid circular locking between local->iflist_mtx and rtwdev->mutex
  rsi: Properly initialize data in rsi_sdio_ta_reset
  rtw88: fix unassigned rssi_level in rtw_sta_info
  rtw88: fix subscript above array bounds compiler warning
  fuse: extract helper for range writeback
  fuse: fix copy_file_range() in the writeback case
  mmc: meson-gx: fix irq ack
  mmc: tmio: fix SCC error handling to avoid false positive CRC error
  mmc: tegra: Fix a warning message
  memstick: mspro_block: Fix an error code in mspro_block_issue_req()
  mac80211: mesh: fix RCU warning
  nl80211: fix station_info pertid memory leak
  mac80211: Do not use stack memory with scatterlist for GMAC
  ALSA: line6: Assure canceling delayed work at disconnection
  configfs: Fix use-after-free when accessing sd->s_dentry
  ALSA: hda - Force polling mode on CNL for fixing codec communication
  i2c: synquacer: fix synquacer_i2c_doxfer() return value
  i2c: mlxcpld: Fix wrong initialization order in probe
  i2c: dev: fix potential memory leak in i2cdev_ioctl_rdwr
  RDMA/core: Fix panic when port_data isn't initialized
  RDMA/uverbs: Pass udata on uverbs error unwind
  RDMA/core: Clear out the udata before error unwind
  net: aquantia: tcp checksum 0xffff being handled incorrectly
  net: aquantia: fix LRO with FCS error
  net: aquantia: check rx csum for all packets in LRO session
  net: aquantia: tx clean budget logic error
  vhost: scsi: add weight support
  vhost: vsock: add weight support
  vhost_net: fix possible infinite loop
  vhost: introduce vhost_exceeds_weight()
  virtio: Fix indentation of VIRTIO_MMIO
  virtio: add unlikely() to WARN_ON_ONCE()
  iommu/vt-d: Set the right field for Page Walk Snoop
  iommu/vt-d: Fix lock inversion between iommu->lock and device_domain_lock
  iommu: Add missing new line for dma type
  drm/etnaviv: lock MMU while dumping core
  block: Don't revalidate bdev of hidden gendisk
  loop: Don't change loop device under exclusive opener
  drm/imx: ipuv3-plane: fix atomic update status query for non-plus i.MX6Q
  drm/qxl: drop WARN_ONCE()
  iio: temperature: mlx90632 Relax the compatibility check
  iio: imu: st_lsm6dsx: fix PM support for st_lsm6dsx i2c controller
  staging:iio:ad7150: fix threshold mode config bit
  fuse: add FUSE_WRITE_KILL_PRIV
  fuse: fallocate: fix return with locked inode
  PCI: PM: Avoid possible suspend-to-idle issue
  ACPI: PM: Call pm_set_suspend_via_firmware() during hibernation
  ACPI/PCI: PM: Add missing wakeup.flags.valid checks
  ovl: support the FS_IOC_FS[SG]ETXATTR ioctls
  soundwire: stream: fix out of boundary access on port properties
  net: tulip: de4x5: Drop redundant MODULE_DEVICE_TABLE()
  selftests/tls: add test for sleeping even though there is data
  net/tls: fix no wakeup on partial reads
  selftests/tls: test for lowat overshoot with multiple records
  net/tls: fix lowat calculation if some data came from previous record
  dpaa2-eth: Make constant 64-bit long
  dpaa2-eth: Use PTR_ERR_OR_ZERO where appropriate
  dpaa2-eth: Fix potential spectre issue
  bonding/802.3ad: fix slave link initialization transition states
  io_uring: Fix __io_uring_register() false success
  net: ethtool: Document get_rxfh_context and set_rxfh_context ethtool ops
  net: stmmac: dwmac-mediatek: modify csr_clk value to fix mdio read/write fail
  net: stmmac: fix csr_clk can't be zero issue
  net: stmmac: update rx tail pointer register to fix rx dma hang issue.
  ip_sockglue: Fix missing-check bug in ip_ra_control()
  ipv6_sockglue: Fix a missing-check bug in ip6_ra_control()
  efi: Allow the number of EFI configuration tables entries to be zero
  efi/x86/Add missing error handling to old_memmap 1:1 mapping code
  parisc: Fix compiler warnings in float emulation code
  parisc/slab: cleanup after /proc/slab_allocators removal
  bpf: sockmap, fix use after free from sleep in psock backlog workqueue
  net: sched: don't use tc_action->order during action dump
  cxgb4: Revert "cxgb4: Remove SGE_HOST_PAGE_SIZE dependency on page size"
  net: fec: fix the clk mismatch in failed_reset path
  habanalabs: Avoid using a non-initialized MMU cache mutex
  habanalabs: fix debugfs code
  uapi/habanalabs: add opcode for enable/disable device debug mode
  habanalabs: halt debug engines on user process close
  selftests: rtc: rtctest: specify timeouts
  selftests/harness: Allow test to configure timeout
  selftests/ftrace: Add checkbashisms meta-testcase
  selftests/ftrace: Make a script checkbashisms clean
  media: smsusb: better handle optional alignment
  test_firmware: Use correct snprintf() limit
  genwqe: Prevent an integer overflow in the ioctl
  parport: Fix mem leak in parport_register_dev_model
  fpga: dfl: expand minor range when registering chrdev region
  fpga: dfl: Add lockdep classes for pdata->lock
  fpga: dfl: afu: Pass the correct device to dma_mapping_error()
  fpga: stratix10-soc: fix use-after-free on s10_init()
  w1: ds2408: Fix typo after 49695ac468 (reset on output_write retry with readback)
  kheaders: Do not regenerate archive if config is not changed
  kheaders: Move from proc to sysfs
  drm/amd/display: Don't load DMCU for Raven 1 (v2)
  drm/i915: Maintain consistent documentation subsection ordering
  scripts/sphinx-pre-install: make it handle Sphinx versions
  docs: Fix conf.py for Sphinx 2.0
  vt/fbcon: deinitialize resources in visual_init() after failed memory allocation
  xfs: fix broken log reservation debugging
  clocksource/drivers/timer-ti-dm: Change to new style declaration
  ASoC: core: lock client_mutex while removing link components
  ASoC: simple-card: Restore original configuration of DAI format
  {nl,mac}80211: allow 4addr AP operation on crypto controlled devices
  mac80211_hwsim: mark expected switch fall-through
  mac80211: fix rate reporting inside cfg80211_calculate_bitrate_he()
  mac80211: remove set but not used variable 'old'
  mac80211: handle deauthentication/disassociation from TDLS peer
  gpio: fix gpio-adp5588 build errors
  pinctrl: stmfx: Fix compile issue when CONFIG_OF_GPIO is not defined
  staging: kpc2000: Add dependency on MFD_CORE to kconfig symbol 'KPC2000'
  perf/ring-buffer: Use regular variables for nesting
  perf/ring-buffer: Always use {READ,WRITE}_ONCE() for rb->user_page data
  perf/ring_buffer: Add ordering to rb->nest increment
  perf/ring_buffer: Fix exposing a temporarily decreased data_head
  x86/CPU/AMD: Don't force the CPB cap when running under a hypervisor
  x86/boot: Provide KASAN compatible aliases for string routines
  ALSA: hda/realtek - Enable micmute LED for Huawei laptops
  Input: uinput - add compat ioctl number translation for UI_*_FF_UPLOAD
  Input: silead - add MSSL0017 to acpi_device_id
  cxgb4: offload VLAN flows regardless of VLAN ethtype
  hsr: fix don't prune the master node from the node_db
  net: mvpp2: cls: Fix leaked ethtool_rx_flow_rule
  docs: fix multiple doc build warnings in enumeration.rst
  lib/list_sort: fix kerneldoc build error
  docs: fix numaperf.rst and add it to the doc tree
  doc: Cope with the deprecation of AutoReporter
  doc: Cope with Sphinx logging deprecations
  bpf: sockmap, restore sk_write_space when psock gets dropped
  selftests: bpf: add zero extend checks for ALU32 and/or/xor
  bpf, riscv: clear target register high 32-bits for and/or/xor on ALU32
  spi: abort spi_sync if failed to prepare_transfer_hardware
  ALSA: hda/realtek - Set default power save node to 0
  ipv4/igmp: fix build error if !CONFIG_IP_MULTICAST
  powerpc/kexec: Fix loading of kernel + initramfs with kexec_file_load()
  MIPS: TXx9: Fix boot crash in free_initmem()
  MIPS: remove a space after -I to cope with header search paths for VDSO
  MIPS: mark ginvt() as __always_inline
  ipv4/igmp: fix another memory leak in igmpv3_del_delrec()
  bnxt_en: Device serial number is supported only for PFs.
  bnxt_en: Reduce memory usage when running in kdump kernel.
  bnxt_en: Fix possible BUG() condition when calling pci_disable_msix().
  bnxt_en: Fix aggregation buffer leak under OOM condition.
  ipv6: Fix redirect with VRF
  net: stmmac: fix reset gpio free missing
  mISDN: make sure device name is NUL terminated
  net: macb: save/restore the remaining registers and features
  media: dvb: warning about dvb frequency limits produces too much noise
  net/tls: don't ignore netdev notifications if no TLS features
  net/tls: fix state removal with feature flags off
  net/tls: avoid NULL-deref on resync during device removal
  Documentation: add TLS offload documentation
  Documentation: tls: RSTify the ktls documentation
  Documentation: net: move device drivers docs to a submenu
  mISDN: Fix indenting in dsp_cmx.c
  ocelot: Dont allocate another multicast list, use __dev_mc_sync
  Validate required parameters in inet6_validate_link_af
  xhci: Use %zu for printing size_t type
  xhci: Convert xhci_handshake() to use readl_poll_timeout_atomic()
  xhci: Fix immediate data transfer if buffer is already DMA mapped
  usb: xhci: avoid null pointer deref when bos field is NULL
  usb: xhci: Fix a potential null pointer dereference in xhci_debugfs_create_endpoint()
  xhci: update bounce buffer with correct sg num
  media: usb: siano: Fix false-positive "uninitialized variable" warning
  spi: spi-fsl-spi: call spi_finalize_current_message() at the end
  ALSA: hda/realtek - Check headset type by unplug and resume
  powerpc/perf: Fix MMCRA corruption by bhrb_filter
  powerpc/powernv: Return for invalid IMC domain
  HID: logitech-hidpp: Add support for the S510 remote control
  HID: multitouch: handle faulty Elo touch device
  selftests: netfilter: add flowtable test script
  netfilter: nft_flow_offload: IPCB is only valid for ipv4 family
  netfilter: nft_flow_offload: don't offload when sequence numbers need adjustment
  netfilter: nft_flow_offload: set liberal tracking mode for tcp
  netfilter: nf_flow_table: ignore DF bit setting
  ASoC: Intel: sof-rt5682: fix AMP quirk support
  ASoC: Intel: sof-rt5682: fix for codec button mapping
  clk: ti: clkctrl: Fix clkdm_clk handling
  clk: imx: imx8mm: fix int pll clk gate
  clk: sifive: restrict Kconfig scope for the FU540 PRCI driver
  RDMA/hns: Fix PD memory leak for internal allocation
  netfilter: nat: fix udp checksum corruption
  selftests: netfilter: missing error check when setting up veth interface
  RDMA/srp: Rename SRP sysfs name after IB device rename trigger
  ipvs: Fix use-after-free in ip_vs_in
  ARC: [plat-hsdk]: Add missing FIFO size entry in GMAC node
  ARC: [plat-hsdk]: Add missing multicast filter bins number to GMAC node
  samples, bpf: suppress compiler warning
  samples, bpf: fix to change the buffer size for read()
  bpf: Check sk_fullsock() before returning from bpf_sk_lookup()
  bpf: fix out-of-bounds read in __bpf_skc_lookup
  Documentation/networking: fix af_xdp.rst Sphinx warnings
  netfilter: nft_fib: Fix existence check support
  netfilter: nf_queue: fix reinject verdict handling
  dmaengine: sprd: Add interrupt support for 2-stage transfer
  dmaengine: sprd: Fix the right place to configure 2-stage transfer
  dmaengine: sprd: Fix block length overflow
  dmaengine: sprd: Fix the incorrect start for 2-stage destination channels
  dmaengine: sprd: Add validation of current descriptor in irq handler
  dmaengine: sprd: Fix the possible crash when getting descriptor status
  tty: max310x: Fix external crystal register setup
  serial: sh-sci: disable DMA for uart_console
  serial: imx: remove log spamming error message
  tty: serial: msm_serial: Fix XON/XOFF
  USB: serial: option: add Telit 0x1260 and 0x1261 compositions
  USB: serial: pl2303: add Allied Telesis VT-Kit3
  USB: serial: option: add support for Simcom SIM7500/SIM7600 RNDIS mode
  dmaengine: tegra210-adma: Fix spelling
  dmaengine: tegra210-adma: Fix channel FIFO configuration
  dmaengine: tegra210-adma: Fix crash during probe
  dmaengine: mediatek-cqdma: sleeping in atomic context
  dmaengine: dw-axi-dmac: fix null dereference when pointer first is null
  perf/x86/intel/ds: Fix EVENT vs. UEVENT PEBS constraints
  USB: rio500: update Documentation
  USB: rio500: simplify locking
  USB: rio500: fix memory leak in close after disconnect
  USB: rio500: refuse more than one device at a time
  usbip: usbip_host: fix BUG: sleeping function called from invalid context
  USB: sisusbvga: fix oops in error path of sisusb_probe
  USB: Add LPM quirk for Surface Dock GigE adapter
  media: usb: siano: Fix general protection fault in smsusb
  usb: mtu3: fix up undefined reference to usb_debug_root
  USB: Fix slab-out-of-bounds write in usb_get_bos_descriptor
  Input: elantech - enable middle button support on 2 ThinkPads
  dmaengine: fsl-qdma: Add improvement
  dmaengine: jz4780: Fix transfers being ACKed too soon
  gcc-plugins: Fix build failures under Darwin host
  MAINTAINERS: Update Stefan Wahren email address
  netfilter: nf_tables: fix oops during rule dump
  ARC: mm: SIGSEGV userspace trying to access kernel virtual memory
  ARC: fix build warnings
  ARM: dts: bcm: Add missing device_type = "memory" property
  soc: bcm: brcmstb: biuctrl: Register writes require a barrier
  soc: brcmstb: Fix error path for unsupported CPUs
  ARM: dts: dra71x: Disable usb4_tm target module
  ARM: dts: dra71x: Disable rtc target module
  ARM: dts: dra76x: Disable usb4_tm target module
  ARM: dts: dra76x: Disable rtc target module
  ASoC: simple-card: Fix configuration of DAI format
  ASoC: Intel: soc-acpi: Fix machine selection order
  ASoC: rt5677-spi: Handle over reading when flipping bytes
  ASoC: soc-dpm: fixup DAI active unbalance
  pinctrl: intel: Clear interrupt status in mask/unmask callback
  pinctrl: intel: Use GENMASK() consistently
  parisc: Allow building 64-bit kernel without -mlong-calls compiler option
  parisc: Kconfig: remove ARCH_DISCARD_MEMBLOCK
  staging: wilc1000: Fix some double unlock bugs in wilc_wlan_cleanup()
  staging: vc04_services: prevent integer overflow in create_pagelist()
  Staging: vc04_services: Fix a couple error codes
  staging: wlan-ng: fix adapter initialization failure
  staging: kpc2000: double unlock in error handling in kpc_dma_transfer()
  staging: kpc2000: Fix build error without CONFIG_UIO
  staging: kpc2000: fix build error on xtensa
  staging: erofs: set sb->s_root to NULL when failing from __getname()
  ARM: imx: cpuidle-imx6sx: Restrict the SW2ISO increase to i.MX6SX
  firmware: imx: SCU irq should ONLY be enabled after SCU IPC is ready
  arm64: imx: Fix build error without CONFIG_SOC_BUS
  ima: fix wrong signed policy requirement when not appraising
  x86/ima: Check EFI_RUNTIME_SERVICES before using
  stacktrace: Unbreak stack_trace_save_tsk_reliable()
  HID: wacom: Sync INTUOSP2_BT touch state after each frame if necessary
  HID: wacom: Correct button numbering 2nd-gen Intuos Pro over Bluetooth
  HID: wacom: Send BTN_TOUCH in response to INTUOSP2_BT eraser contact
  HID: wacom: Don't report anything prior to the tool entering range
  HID: wacom: Don't set tool type until we're in range
  ASoC: cs42xx8: Add regcache mask dirty
  regulator: tps6507x: Fix boot regression due to testing wrong init_data pointer
  ASoC: fsl_asrc: Fix the issue about unsupported rate
  spi: bitbang: Fix NULL pointer dereference in spi_unregister_master
  Input: elan_i2c - increment wakeup count if wake source
  wireless: Skip directory when generating certificates
  ASoC: ak4458: rstn_control - return a non-zero on error only
  ASoC: soc-pcm: BE dai needs prepare when pause release after resume
  ASoC: ak4458: add return value for ak4458_probe
  ASoC : cs4265 : readable register too low
  ASoC: SOF: fix error in verbose ipc command parsing
  ASoC: SOF: fix race in FW boot timeout handling
  ASoC: SOF: nocodec: fix undefined reference
  iio: adc: ti-ads8688: fix timestamp is not updated in buffer
  iio: dac: ds4422/ds4424 fix chip verification
  HID: rmi: Use SET_REPORT request on control endpoint for Acer Switch 3 and 5
  HID: logitech-hidpp: add support for the MX5500 keyboard
  HID: logitech-dj: add support for the Logitech MX5500's Bluetooth Mini-Receiver
  HID: i2c-hid: add iBall Aer3 to descriptor override
  spi: Fix Raspberry Pi breakage
  ARM: dts: dra76x: Update MMC2_HS200_MANUAL1 iodelay values
  ARM: dts: am57xx-idk: Remove support for voltage switching for SD card
  bus: ti-sysc: Handle devices with no control registers
  ARM: dts: Configure osc clock for d_can on am335x
  iio: imu: mpu6050: Fix FIFO layout for ICM20602
  lkdtm/bugs: Adjust recursion test to avoid elision
  lkdtm/usercopy: Moves the KERNEL_DS test to non-canonical
  iio: adc: ads124: avoid buffer overflow
  iio: adc: modify NPCM ADC read reference voltage

Change-Id: I98c823993370027391cc21dfb239c3049f025136
Signed-off-by: Raghavendra Rao Ananta <rananta@codeaurora.org>
2019-07-01 17:41:24 -07:00

3584 lines
95 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_configfs.c
*
* This file contains ConfigFS logic for the Generic Target Engine project.
*
* (c) Copyright 2008-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* based on configfs Copyright (C) 2005 Oracle. All rights reserved.
*
****************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/unistd.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/syscalls.h>
#include <linux/configfs.h>
#include <linux/spinlock.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_rd.h"
#include "target_core_xcopy.h"
#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
static void target_core_setup_##_name##_cit(struct target_backend *tb) \
{ \
struct config_item_type *cit = &tb->tb_##_name##_cit; \
\
cit->ct_item_ops = _item_ops; \
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = _attrs; \
cit->ct_owner = tb->ops->owner; \
pr_debug("Setup generic %s\n", __stringify(_name)); \
}
#define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
static void target_core_setup_##_name##_cit(struct target_backend *tb) \
{ \
struct config_item_type *cit = &tb->tb_##_name##_cit; \
\
cit->ct_item_ops = _item_ops; \
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = tb->ops->tb_##_name##_attrs; \
cit->ct_owner = tb->ops->owner; \
pr_debug("Setup generic %s\n", __stringify(_name)); \
}
extern struct t10_alua_lu_gp *default_lu_gp;
static LIST_HEAD(g_tf_list);
static DEFINE_MUTEX(g_tf_lock);
static struct config_group target_core_hbagroup;
static struct config_group alua_group;
static struct config_group alua_lu_gps_group;
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
return container_of(to_config_group(item), struct se_hba, hba_group);
}
/*
* Attributes for /sys/kernel/config/target/
*/
static ssize_t target_core_item_version_show(struct config_item *item,
char *page)
{
return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
" on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION,
utsname()->sysname, utsname()->machine);
}
CONFIGFS_ATTR_RO(target_core_item_, version);
char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT;
static char db_root_stage[DB_ROOT_LEN];
static ssize_t target_core_item_dbroot_show(struct config_item *item,
char *page)
{
return sprintf(page, "%s\n", db_root);
}
static ssize_t target_core_item_dbroot_store(struct config_item *item,
const char *page, size_t count)
{
ssize_t read_bytes;
struct file *fp;
mutex_lock(&g_tf_lock);
if (!list_empty(&g_tf_list)) {
mutex_unlock(&g_tf_lock);
pr_err("db_root: cannot be changed: target drivers registered");
return -EINVAL;
}
if (count > (DB_ROOT_LEN - 1)) {
mutex_unlock(&g_tf_lock);
pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n",
(int)count, DB_ROOT_LEN - 1);
return -EINVAL;
}
read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
if (!read_bytes) {
mutex_unlock(&g_tf_lock);
return -EINVAL;
}
if (db_root_stage[read_bytes - 1] == '\n')
db_root_stage[read_bytes - 1] = '\0';
/* validate new db root before accepting it */
fp = filp_open(db_root_stage, O_RDONLY, 0);
if (IS_ERR(fp)) {
mutex_unlock(&g_tf_lock);
pr_err("db_root: cannot open: %s\n", db_root_stage);
return -EINVAL;
}
if (!S_ISDIR(file_inode(fp)->i_mode)) {
filp_close(fp, NULL);
mutex_unlock(&g_tf_lock);
pr_err("db_root: not a directory: %s\n", db_root_stage);
return -EINVAL;
}
filp_close(fp, NULL);
strncpy(db_root, db_root_stage, read_bytes);
mutex_unlock(&g_tf_lock);
pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
return read_bytes;
}
CONFIGFS_ATTR(target_core_item_, dbroot);
static struct target_fabric_configfs *target_core_get_fabric(
const char *name)
{
struct target_fabric_configfs *tf;
if (!name)
return NULL;
mutex_lock(&g_tf_lock);
list_for_each_entry(tf, &g_tf_list, tf_list) {
const char *cmp_name = tf->tf_ops->fabric_alias;
if (!cmp_name)
cmp_name = tf->tf_ops->fabric_name;
if (!strcmp(cmp_name, name)) {
atomic_inc(&tf->tf_access_cnt);
mutex_unlock(&g_tf_lock);
return tf;
}
}
mutex_unlock(&g_tf_lock);
return NULL;
}
/*
* Called from struct target_core_group_ops->make_group()
*/
static struct config_group *target_core_register_fabric(
struct config_group *group,
const char *name)
{
struct target_fabric_configfs *tf;
int ret;
pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
" %s\n", group, name);
tf = target_core_get_fabric(name);
if (!tf) {
pr_debug("target_core_register_fabric() trying autoload for %s\n",
name);
/*
* Below are some hardcoded request_module() calls to automatically
* local fabric modules when the following is called:
*
* mkdir -p /sys/kernel/config/target/$MODULE_NAME
*
* Note that this does not limit which TCM fabric module can be
* registered, but simply provids auto loading logic for modules with
* mkdir(2) system calls with known TCM fabric modules.
*/
if (!strncmp(name, "iscsi", 5)) {
/*
* Automatically load the LIO Target fabric module when the
* following is called:
*
* mkdir -p $CONFIGFS/target/iscsi
*/
ret = request_module("iscsi_target_mod");
if (ret < 0) {
pr_debug("request_module() failed for"
" iscsi_target_mod.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
} else if (!strncmp(name, "loopback", 8)) {
/*
* Automatically load the tcm_loop fabric module when the
* following is called:
*
* mkdir -p $CONFIGFS/target/loopback
*/
ret = request_module("tcm_loop");
if (ret < 0) {
pr_debug("request_module() failed for"
" tcm_loop.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
}
tf = target_core_get_fabric(name);
}
if (!tf) {
pr_debug("target_core_get_fabric() failed for %s\n",
name);
return ERR_PTR(-EINVAL);
}
pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
" %s\n", tf->tf_ops->fabric_name);
/*
* On a successful target_core_get_fabric() look, the returned
* struct target_fabric_configfs *tf will contain a usage reference.
*/
pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
&tf->tf_wwn_cit);
config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
&tf->tf_discovery_cit);
configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group);
pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n",
config_item_name(&tf->tf_group.cg_item));
return &tf->tf_group;
}
/*
* Called from struct target_core_group_ops->drop_item()
*/
static void target_core_deregister_fabric(
struct config_group *group,
struct config_item *item)
{
struct target_fabric_configfs *tf = container_of(
to_config_group(item), struct target_fabric_configfs, tf_group);
pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
" tf list\n", config_item_name(item));
pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
" %s\n", tf->tf_ops->fabric_name);
atomic_dec(&tf->tf_access_cnt);
pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
" %s\n", config_item_name(item));
configfs_remove_default_groups(&tf->tf_group);
config_item_put(item);
}
static struct configfs_group_operations target_core_fabric_group_ops = {
.make_group = &target_core_register_fabric,
.drop_item = &target_core_deregister_fabric,
};
/*
* All item attributes appearing in /sys/kernel/target/ appear here.
*/
static struct configfs_attribute *target_core_fabric_item_attrs[] = {
&target_core_item_attr_version,
&target_core_item_attr_dbroot,
NULL,
};
/*
* Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
*/
static const struct config_item_type target_core_fabrics_item = {
.ct_group_ops = &target_core_fabric_group_ops,
.ct_attrs = target_core_fabric_item_attrs,
.ct_owner = THIS_MODULE,
};
static struct configfs_subsystem target_core_fabrics = {
.su_group = {
.cg_item = {
.ci_namebuf = "target",
.ci_type = &target_core_fabrics_item,
},
},
};
int target_depend_item(struct config_item *item)
{
return configfs_depend_item(&target_core_fabrics, item);
}
EXPORT_SYMBOL(target_depend_item);
void target_undepend_item(struct config_item *item)
{
return configfs_undepend_item(item);
}
EXPORT_SYMBOL(target_undepend_item);
/*##############################################################################
// Start functions called by external Target Fabrics Modules
//############################################################################*/
static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
{
if (tfo->fabric_alias) {
if (strlen(tfo->fabric_alias) >= TARGET_FABRIC_NAME_SIZE) {
pr_err("Passed alias: %s exceeds "
"TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_alias);
return -EINVAL;
}
}
if (!tfo->fabric_name) {
pr_err("Missing tfo->fabric_name\n");
return -EINVAL;
}
if (strlen(tfo->fabric_name) >= TARGET_FABRIC_NAME_SIZE) {
pr_err("Passed name: %s exceeds "
"TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_name);
return -EINVAL;
}
if (!tfo->tpg_get_wwn) {
pr_err("Missing tfo->tpg_get_wwn()\n");
return -EINVAL;
}
if (!tfo->tpg_get_tag) {
pr_err("Missing tfo->tpg_get_tag()\n");
return -EINVAL;
}
if (!tfo->tpg_check_demo_mode) {
pr_err("Missing tfo->tpg_check_demo_mode()\n");
return -EINVAL;
}
if (!tfo->tpg_check_demo_mode_cache) {
pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
return -EINVAL;
}
if (!tfo->tpg_check_demo_mode_write_protect) {
pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
return -EINVAL;
}
if (!tfo->tpg_check_prod_mode_write_protect) {
pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
return -EINVAL;
}
if (!tfo->tpg_get_inst_index) {
pr_err("Missing tfo->tpg_get_inst_index()\n");
return -EINVAL;
}
if (!tfo->release_cmd) {
pr_err("Missing tfo->release_cmd()\n");
return -EINVAL;
}
if (!tfo->sess_get_index) {
pr_err("Missing tfo->sess_get_index()\n");
return -EINVAL;
}
if (!tfo->write_pending) {
pr_err("Missing tfo->write_pending()\n");
return -EINVAL;
}
if (!tfo->set_default_node_attributes) {
pr_err("Missing tfo->set_default_node_attributes()\n");
return -EINVAL;
}
if (!tfo->get_cmd_state) {
pr_err("Missing tfo->get_cmd_state()\n");
return -EINVAL;
}
if (!tfo->queue_data_in) {
pr_err("Missing tfo->queue_data_in()\n");
return -EINVAL;
}
if (!tfo->queue_status) {
pr_err("Missing tfo->queue_status()\n");
return -EINVAL;
}
if (!tfo->queue_tm_rsp) {
pr_err("Missing tfo->queue_tm_rsp()\n");
return -EINVAL;
}
if (!tfo->aborted_task) {
pr_err("Missing tfo->aborted_task()\n");
return -EINVAL;
}
if (!tfo->check_stop_free) {
pr_err("Missing tfo->check_stop_free()\n");
return -EINVAL;
}
/*
* We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
* target_core_fabric_configfs.c WWN+TPG group context code.
*/
if (!tfo->fabric_make_wwn) {
pr_err("Missing tfo->fabric_make_wwn()\n");
return -EINVAL;
}
if (!tfo->fabric_drop_wwn) {
pr_err("Missing tfo->fabric_drop_wwn()\n");
return -EINVAL;
}
if (!tfo->fabric_make_tpg) {
pr_err("Missing tfo->fabric_make_tpg()\n");
return -EINVAL;
}
if (!tfo->fabric_drop_tpg) {
pr_err("Missing tfo->fabric_drop_tpg()\n");
return -EINVAL;
}
return 0;
}
int target_register_template(const struct target_core_fabric_ops *fo)
{
struct target_fabric_configfs *tf;
int ret;
ret = target_fabric_tf_ops_check(fo);
if (ret)
return ret;
tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
if (!tf) {
pr_err("%s: could not allocate memory!\n", __func__);
return -ENOMEM;
}
INIT_LIST_HEAD(&tf->tf_list);
atomic_set(&tf->tf_access_cnt, 0);
tf->tf_ops = fo;
target_fabric_setup_cits(tf);
mutex_lock(&g_tf_lock);
list_add_tail(&tf->tf_list, &g_tf_list);
mutex_unlock(&g_tf_lock);
return 0;
}
EXPORT_SYMBOL(target_register_template);
void target_unregister_template(const struct target_core_fabric_ops *fo)
{
struct target_fabric_configfs *t;
mutex_lock(&g_tf_lock);
list_for_each_entry(t, &g_tf_list, tf_list) {
if (!strcmp(t->tf_ops->fabric_name, fo->fabric_name)) {
BUG_ON(atomic_read(&t->tf_access_cnt));
list_del(&t->tf_list);
mutex_unlock(&g_tf_lock);
/*
* Wait for any outstanding fabric se_deve_entry->rcu_head
* callbacks to complete post kfree_rcu(), before allowing
* fabric driver unload of TFO->module to proceed.
*/
rcu_barrier();
kfree(t);
return;
}
}
mutex_unlock(&g_tf_lock);
}
EXPORT_SYMBOL(target_unregister_template);
/*##############################################################################
// Stop functions called by external Target Fabrics Modules
//############################################################################*/
static inline struct se_dev_attrib *to_attrib(struct config_item *item)
{
return container_of(to_config_group(item), struct se_dev_attrib,
da_group);
}
/* Start functions for struct config_item_type tb_dev_attrib_cit */
#define DEF_CONFIGFS_ATTRIB_SHOW(_name) \
static ssize_t _name##_show(struct config_item *item, char *page) \
{ \
return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \
}
DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr);
DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type);
DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type);
DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify);
DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids);
DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord);
DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl);
DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size);
DEF_CONFIGFS_ATTRIB_SHOW(block_size);
DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors);
DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors);
DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth);
DEF_CONFIGFS_ATTRIB_SHOW(queue_depth);
DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count);
DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count);
DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity);
DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment);
DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
static ssize_t _name##_store(struct config_item *item, const char *page,\
size_t count) \
{ \
struct se_dev_attrib *da = to_attrib(item); \
u32 val; \
int ret; \
\
ret = kstrtou32(page, 0, &val); \
if (ret < 0) \
return ret; \
da->_name = val; \
return count; \
}
DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count);
DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count);
DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity);
DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment);
DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len);
#define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name) \
static ssize_t _name##_store(struct config_item *item, const char *page, \
size_t count) \
{ \
struct se_dev_attrib *da = to_attrib(item); \
bool flag; \
int ret; \
\
ret = strtobool(page, &flag); \
if (ret < 0) \
return ret; \
da->_name = flag; \
return count; \
}
DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write);
DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw);
DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc);
DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr);
DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids);
DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot);
#define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name) \
static ssize_t _name##_store(struct config_item *item, const char *page,\
size_t count) \
{ \
printk_once(KERN_WARNING \
"ignoring deprecated %s attribute\n", \
__stringify(_name)); \
return count; \
}
DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo);
DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read);
static void dev_set_t10_wwn_model_alias(struct se_device *dev)
{
const char *configname;
configname = config_item_name(&dev->dev_group.cg_item);
if (strlen(configname) >= INQUIRY_MODEL_LEN) {
pr_warn("dev[%p]: Backstore name '%s' is too long for "
"INQUIRY_MODEL, truncating to 15 characters\n", dev,
configname);
}
/*
* XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1)
* here without potentially breaking existing setups, so continue to
* truncate one byte shorter than what can be carried in INQUIRY.
*/
strlcpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN);
}
static ssize_t emulate_model_alias_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev;
bool flag;
int ret;
if (dev->export_count) {
pr_err("dev[%p]: Unable to change model alias"
" while export_count is %d\n",
dev, dev->export_count);
return -EINVAL;
}
ret = strtobool(page, &flag);
if (ret < 0)
return ret;
BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
if (flag) {
dev_set_t10_wwn_model_alias(dev);
} else {
strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
sizeof(dev->t10_wwn.model));
}
da->emulate_model_alias = flag;
return count;
}
static ssize_t emulate_write_cache_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
bool flag;
int ret;
ret = strtobool(page, &flag);
if (ret < 0)
return ret;
if (flag && da->da_dev->transport->get_write_cache) {
pr_err("emulate_write_cache not supported for this device\n");
return -EINVAL;
}
da->emulate_write_cache = flag;
pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
da->da_dev, flag);
return count;
}
static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
u32 val;
int ret;
ret = kstrtou32(page, 0, &val);
if (ret < 0)
return ret;
if (val != 0 && val != 1 && val != 2) {
pr_err("Illegal value %d\n", val);
return -EINVAL;
}
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
" UA_INTRLCK_CTRL while export_count is %d\n",
da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
da->emulate_ua_intlck_ctrl = val;
pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
da->da_dev, val);
return count;
}
static ssize_t emulate_tas_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
bool flag;
int ret;
ret = strtobool(page, &flag);
if (ret < 0)
return ret;
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device TAS while"
" export_count is %d\n",
da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
da->emulate_tas = flag;
pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
da->da_dev, flag ? "Enabled" : "Disabled");
return count;
}
static ssize_t emulate_tpu_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
bool flag;
int ret;
ret = strtobool(page, &flag);
if (ret < 0)
return ret;
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
da->emulate_tpu = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
da->da_dev, flag);
return count;
}
static ssize_t emulate_tpws_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
bool flag;
int ret;
ret = strtobool(page, &flag);
if (ret < 0)
return ret;
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
da->emulate_tpws = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
da->da_dev, flag);
return count;
}
static ssize_t pi_prot_type_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
int old_prot = da->pi_prot_type, ret;
struct se_device *dev = da->da_dev;
u32 flag;
ret = kstrtou32(page, 0, &flag);
if (ret < 0)
return ret;
if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
pr_err("Illegal value %d for pi_prot_type\n", flag);
return -EINVAL;
}
if (flag == 2) {
pr_err("DIF TYPE2 protection currently not supported\n");
return -ENOSYS;
}
if (da->hw_pi_prot_type) {
pr_warn("DIF protection enabled on underlying hardware,"
" ignoring\n");
return count;
}
if (!dev->transport->init_prot || !dev->transport->free_prot) {
/* 0 is only allowed value for non-supporting backends */
if (flag == 0)
return count;
pr_err("DIF protection not supported by backend: %s\n",
dev->transport->name);
return -ENOSYS;
}
if (!target_dev_configured(dev)) {
pr_err("DIF protection requires device to be configured\n");
return -ENODEV;
}
if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device PROT type while"
" export_count is %d\n", dev, dev->export_count);
return -EINVAL;
}
da->pi_prot_type = flag;
if (flag && !old_prot) {
ret = dev->transport->init_prot(dev);
if (ret) {
da->pi_prot_type = old_prot;
da->pi_prot_verify = (bool) da->pi_prot_type;
return ret;
}
} else if (!flag && old_prot) {
dev->transport->free_prot(dev);
}
da->pi_prot_verify = (bool) da->pi_prot_type;
pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
return count;
}
/* always zero, but attr needs to remain RW to avoid userspace breakage */
static ssize_t pi_prot_format_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "0\n");
}
static ssize_t pi_prot_format_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev;
bool flag;
int ret;
ret = strtobool(page, &flag);
if (ret < 0)
return ret;
if (!flag)
return count;
if (!dev->transport->format_prot) {
pr_err("DIF protection format not supported by backend %s\n",
dev->transport->name);
return -ENOSYS;
}
if (!target_dev_configured(dev)) {
pr_err("DIF protection format requires device to be configured\n");
return -ENODEV;
}
if (dev->export_count) {
pr_err("dev[%p]: Unable to format SE Device PROT type while"
" export_count is %d\n", dev, dev->export_count);
return -EINVAL;
}
ret = dev->transport->format_prot(dev);
if (ret)
return ret;
pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
return count;
}
static ssize_t pi_prot_verify_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
bool flag;
int ret;
ret = strtobool(page, &flag);
if (ret < 0)
return ret;
if (!flag) {
da->pi_prot_verify = flag;
return count;
}
if (da->hw_pi_prot_type) {
pr_warn("DIF protection enabled on underlying hardware,"
" ignoring\n");
return count;
}
if (!da->pi_prot_type) {
pr_warn("DIF protection not supported by backend, ignoring\n");
return count;
}
da->pi_prot_verify = flag;
return count;
}
static ssize_t force_pr_aptpl_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
bool flag;
int ret;
ret = strtobool(page, &flag);
if (ret < 0)
return ret;
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to set force_pr_aptpl while"
" export_count is %d\n",
da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
da->force_pr_aptpl = flag;
pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag);
return count;
}
static ssize_t emulate_rest_reord_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
bool flag;
int ret;
ret = strtobool(page, &flag);
if (ret < 0)
return ret;
if (flag != 0) {
printk(KERN_ERR "dev[%p]: SE Device emulation of restricted"
" reordering not implemented\n", da->da_dev);
return -ENOSYS;
}
da->emulate_rest_reord = flag;
pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n",
da->da_dev, flag);
return count;
}
static ssize_t unmap_zeroes_data_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
bool flag;
int ret;
ret = strtobool(page, &flag);
if (ret < 0)
return ret;
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
" unmap_zeroes_data while export_count is %d\n",
da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_configure_device().
*/
if (flag && !da->max_unmap_block_desc_count) {
pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set"
" because max_unmap_block_desc_count is zero\n",
da->da_dev);
return -ENOSYS;
}
da->unmap_zeroes_data = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
da->da_dev, flag);
return count;
}
/*
* Note, this can only be called on unexported SE Device Object.
*/
static ssize_t queue_depth_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev;
u32 val;
int ret;
ret = kstrtou32(page, 0, &val);
if (ret < 0)
return ret;
if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device TCQ while"
" export_count is %d\n",
dev, dev->export_count);
return -EINVAL;
}
if (!val) {
pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev);
return -EINVAL;
}
if (val > dev->dev_attrib.queue_depth) {
if (val > dev->dev_attrib.hw_queue_depth) {
pr_err("dev[%p]: Passed queue_depth:"
" %u exceeds TCM/SE_Device MAX"
" TCQ: %u\n", dev, val,
dev->dev_attrib.hw_queue_depth);
return -EINVAL;
}
}
da->queue_depth = dev->queue_depth = val;
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val);
return count;
}
static ssize_t optimal_sectors_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
u32 val;
int ret;
ret = kstrtou32(page, 0, &val);
if (ret < 0)
return ret;
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
" optimal_sectors while export_count is %d\n",
da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
if (val > da->hw_max_sectors) {
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
" greater than hw_max_sectors: %u\n",
da->da_dev, val, da->hw_max_sectors);
return -EINVAL;
}
da->optimal_sectors = val;
pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
da->da_dev, val);
return count;
}
static ssize_t block_size_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
u32 val;
int ret;
ret = kstrtou32(page, 0, &val);
if (ret < 0)
return ret;
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device block_size"
" while export_count is %d\n",
da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
if (val != 512 && val != 1024 && val != 2048 && val != 4096) {
pr_err("dev[%p]: Illegal value for block_device: %u"
" for SE device, must be 512, 1024, 2048 or 4096\n",
da->da_dev, val);
return -EINVAL;
}
da->block_size = val;
if (da->max_bytes_per_io)
da->hw_max_sectors = da->max_bytes_per_io / val;
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
da->da_dev, val);
return count;
}
static ssize_t alua_support_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = to_attrib(item);
u8 flags = da->da_dev->transport->transport_flags;
return snprintf(page, PAGE_SIZE, "%d\n",
flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1);
}
static ssize_t pgr_support_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = to_attrib(item);
u8 flags = da->da_dev->transport->transport_flags;
return snprintf(page, PAGE_SIZE, "%d\n",
flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1);
}
CONFIGFS_ATTR(, emulate_model_alias);
CONFIGFS_ATTR(, emulate_dpo);
CONFIGFS_ATTR(, emulate_fua_write);
CONFIGFS_ATTR(, emulate_fua_read);
CONFIGFS_ATTR(, emulate_write_cache);
CONFIGFS_ATTR(, emulate_ua_intlck_ctrl);
CONFIGFS_ATTR(, emulate_tas);
CONFIGFS_ATTR(, emulate_tpu);
CONFIGFS_ATTR(, emulate_tpws);
CONFIGFS_ATTR(, emulate_caw);
CONFIGFS_ATTR(, emulate_3pc);
CONFIGFS_ATTR(, emulate_pr);
CONFIGFS_ATTR(, pi_prot_type);
CONFIGFS_ATTR_RO(, hw_pi_prot_type);
CONFIGFS_ATTR(, pi_prot_format);
CONFIGFS_ATTR(, pi_prot_verify);
CONFIGFS_ATTR(, enforce_pr_isids);
CONFIGFS_ATTR(, is_nonrot);
CONFIGFS_ATTR(, emulate_rest_reord);
CONFIGFS_ATTR(, force_pr_aptpl);
CONFIGFS_ATTR_RO(, hw_block_size);
CONFIGFS_ATTR(, block_size);
CONFIGFS_ATTR_RO(, hw_max_sectors);
CONFIGFS_ATTR(, optimal_sectors);
CONFIGFS_ATTR_RO(, hw_queue_depth);
CONFIGFS_ATTR(, queue_depth);
CONFIGFS_ATTR(, max_unmap_lba_count);
CONFIGFS_ATTR(, max_unmap_block_desc_count);
CONFIGFS_ATTR(, unmap_granularity);
CONFIGFS_ATTR(, unmap_granularity_alignment);
CONFIGFS_ATTR(, unmap_zeroes_data);
CONFIGFS_ATTR(, max_write_same_len);
CONFIGFS_ATTR_RO(, alua_support);
CONFIGFS_ATTR_RO(, pgr_support);
/*
* dev_attrib attributes for devices using the target core SBC/SPC
* interpreter. Any backend using spc_parse_cdb should be using
* these.
*/
struct configfs_attribute *sbc_attrib_attrs[] = {
&attr_emulate_model_alias,
&attr_emulate_dpo,
&attr_emulate_fua_write,
&attr_emulate_fua_read,
&attr_emulate_write_cache,
&attr_emulate_ua_intlck_ctrl,
&attr_emulate_tas,
&attr_emulate_tpu,
&attr_emulate_tpws,
&attr_emulate_caw,
&attr_emulate_3pc,
&attr_emulate_pr,
&attr_pi_prot_type,
&attr_hw_pi_prot_type,
&attr_pi_prot_format,
&attr_pi_prot_verify,
&attr_enforce_pr_isids,
&attr_is_nonrot,
&attr_emulate_rest_reord,
&attr_force_pr_aptpl,
&attr_hw_block_size,
&attr_block_size,
&attr_hw_max_sectors,
&attr_optimal_sectors,
&attr_hw_queue_depth,
&attr_queue_depth,
&attr_max_unmap_lba_count,
&attr_max_unmap_block_desc_count,
&attr_unmap_granularity,
&attr_unmap_granularity_alignment,
&attr_unmap_zeroes_data,
&attr_max_write_same_len,
&attr_alua_support,
&attr_pgr_support,
NULL,
};
EXPORT_SYMBOL(sbc_attrib_attrs);
/*
* Minimal dev_attrib attributes for devices passing through CDBs.
* In this case we only provide a few read-only attributes for
* backwards compatibility.
*/
struct configfs_attribute *passthrough_attrib_attrs[] = {
&attr_hw_pi_prot_type,
&attr_hw_block_size,
&attr_hw_max_sectors,
&attr_hw_queue_depth,
&attr_alua_support,
&attr_pgr_support,
NULL,
};
EXPORT_SYMBOL(passthrough_attrib_attrs);
TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL);
TB_CIT_SETUP_DRV(dev_action, NULL, NULL);
/* End functions for struct config_item_type tb_dev_attrib_cit */
/* Start functions for struct config_item_type tb_dev_wwn_cit */
static struct t10_wwn *to_t10_wwn(struct config_item *item)
{
return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group);
}
static ssize_t target_check_inquiry_data(char *buf)
{
size_t len;
int i;
len = strlen(buf);
/*
* SPC 4.3.1:
* ASCII data fields shall contain only ASCII printable characters
* (i.e., code values 20h to 7Eh) and may be terminated with one or
* more ASCII null (00h) characters.
*/
for (i = 0; i < len; i++) {
if (buf[i] < 0x20 || buf[i] > 0x7E) {
pr_err("Emulated T10 Inquiry Data contains non-ASCII-printable characters\n");
return -EINVAL;
}
}
return len;
}
/*
* STANDARD and VPD page 0x83 T10 Vendor Identification
*/
static ssize_t target_wwn_vendor_id_show(struct config_item *item,
char *page)
{
return sprintf(page, "%s\n", &to_t10_wwn(item)->vendor[0]);
}
static ssize_t target_wwn_vendor_id_store(struct config_item *item,
const char *page, size_t count)
{
struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct se_device *dev = t10_wwn->t10_dev;
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
unsigned char buf[INQUIRY_VENDOR_LEN + 2];
char *stripped = NULL;
size_t len;
ssize_t ret;
len = strlcpy(buf, page, sizeof(buf));
if (len < sizeof(buf)) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
len = strlen(stripped);
}
if (len > INQUIRY_VENDOR_LEN) {
pr_err("Emulated T10 Vendor Identification exceeds"
" INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
"\n");
return -EOVERFLOW;
}
ret = target_check_inquiry_data(stripped);
if (ret < 0)
return ret;
/*
* Check to see if any active exports exist. If they do exist, fail
* here as changing this information on the fly (underneath the
* initiator side OS dependent multipath code) could cause negative
* effects.
*/
if (dev->export_count) {
pr_err("Unable to set T10 Vendor Identification while"
" active %d exports exist\n", dev->export_count);
return -EINVAL;
}
BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1);
strlcpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor));
pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:"
" %s\n", dev->t10_wwn.vendor);
return count;
}
static ssize_t target_wwn_product_id_show(struct config_item *item,
char *page)
{
return sprintf(page, "%s\n", &to_t10_wwn(item)->model[0]);
}
static ssize_t target_wwn_product_id_store(struct config_item *item,
const char *page, size_t count)
{
struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct se_device *dev = t10_wwn->t10_dev;
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
unsigned char buf[INQUIRY_MODEL_LEN + 2];
char *stripped = NULL;
size_t len;
ssize_t ret;
len = strlcpy(buf, page, sizeof(buf));
if (len < sizeof(buf)) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
len = strlen(stripped);
}
if (len > INQUIRY_MODEL_LEN) {
pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
__stringify(INQUIRY_MODEL_LEN)
"\n");
return -EOVERFLOW;
}
ret = target_check_inquiry_data(stripped);
if (ret < 0)
return ret;
/*
* Check to see if any active exports exist. If they do exist, fail
* here as changing this information on the fly (underneath the
* initiator side OS dependent multipath code) could cause negative
* effects.
*/
if (dev->export_count) {
pr_err("Unable to set T10 Model while active %d exports exist\n",
dev->export_count);
return -EINVAL;
}
BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
strlcpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model));
pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n",
dev->t10_wwn.model);
return count;
}
static ssize_t target_wwn_revision_show(struct config_item *item,
char *page)
{
return sprintf(page, "%s\n", &to_t10_wwn(item)->revision[0]);
}
static ssize_t target_wwn_revision_store(struct config_item *item,
const char *page, size_t count)
{
struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct se_device *dev = t10_wwn->t10_dev;
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
unsigned char buf[INQUIRY_REVISION_LEN + 2];
char *stripped = NULL;
size_t len;
ssize_t ret;
len = strlcpy(buf, page, sizeof(buf));
if (len < sizeof(buf)) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
len = strlen(stripped);
}
if (len > INQUIRY_REVISION_LEN) {
pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
__stringify(INQUIRY_REVISION_LEN)
"\n");
return -EOVERFLOW;
}
ret = target_check_inquiry_data(stripped);
if (ret < 0)
return ret;
/*
* Check to see if any active exports exist. If they do exist, fail
* here as changing this information on the fly (underneath the
* initiator side OS dependent multipath code) could cause negative
* effects.
*/
if (dev->export_count) {
pr_err("Unable to set T10 Revision while active %d exports exist\n",
dev->export_count);
return -EINVAL;
}
BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1);
strlcpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision));
pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n",
dev->t10_wwn.revision);
return count;
}
/*
* VPD page 0x80 Unit serial
*/
static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item,
char *page)
{
return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
&to_t10_wwn(item)->unit_serial[0]);
}
static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
const char *page, size_t count)
{
struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct se_device *dev = t10_wwn->t10_dev;
unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
/*
* If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
* from the struct scsi_device level firmware, do not allow
* VPD Unit Serial to be emulated.
*
* Note this struct scsi_device could also be emulating VPD
* information from its drivers/scsi LLD. But for now we assume
* it is doing 'the right thing' wrt a world wide unique
* VPD Unit Serial Number that OS dependent multipath can depend on.
*/
if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
pr_err("Underlying SCSI device firmware provided VPD"
" Unit Serial, ignoring request\n");
return -EOPNOTSUPP;
}
if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
pr_err("Emulated VPD Unit Serial exceeds"
" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
return -EOVERFLOW;
}
/*
* Check to see if any active $FABRIC_MOD exports exist. If they
* do exist, fail here as changing this information on the fly
* (underneath the initiator side OS dependent multipath code)
* could cause negative effects.
*/
if (dev->export_count) {
pr_err("Unable to set VPD Unit Serial while"
" active %d $FABRIC_MOD exports exist\n",
dev->export_count);
return -EINVAL;
}
/*
* This currently assumes ASCII encoding for emulated VPD Unit Serial.
*
* Also, strip any newline added from the userspace
* echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
*/
memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
"%s", strstrip(buf));
dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
" %s\n", dev->t10_wwn.unit_serial);
return count;
}
/*
* VPD page 0x83 Protocol Identifier
*/
static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item,
char *page)
{
struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct t10_vpd *vpd;
unsigned char buf[VPD_TMP_BUF_SIZE];
ssize_t len = 0;
memset(buf, 0, VPD_TMP_BUF_SIZE);
spin_lock(&t10_wwn->t10_vpd_lock);
list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
if (!vpd->protocol_identifier_set)
continue;
transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
if (len + strlen(buf) >= PAGE_SIZE)
break;
len += sprintf(page+len, "%s", buf);
}
spin_unlock(&t10_wwn->t10_vpd_lock);
return len;
}
/*
* Generic wrapper for dumping VPD identifiers by association.
*/
#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
static ssize_t target_wwn_##_name##_show(struct config_item *item, \
char *page) \
{ \
struct t10_wwn *t10_wwn = to_t10_wwn(item); \
struct t10_vpd *vpd; \
unsigned char buf[VPD_TMP_BUF_SIZE]; \
ssize_t len = 0; \
\
spin_lock(&t10_wwn->t10_vpd_lock); \
list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
if (vpd->association != _assoc) \
continue; \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
if (len + strlen(buf) >= PAGE_SIZE) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
if (len + strlen(buf) >= PAGE_SIZE) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
if (len + strlen(buf) >= PAGE_SIZE) \
break; \
len += sprintf(page+len, "%s", buf); \
} \
spin_unlock(&t10_wwn->t10_vpd_lock); \
\
return len; \
}
/* VPD page 0x83 Association: Logical Unit */
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
/* VPD page 0x83 Association: Target Port */
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
/* VPD page 0x83 Association: SCSI Target Device */
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
CONFIGFS_ATTR(target_wwn_, vendor_id);
CONFIGFS_ATTR(target_wwn_, product_id);
CONFIGFS_ATTR(target_wwn_, revision);
CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port);
CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
&target_wwn_attr_vendor_id,
&target_wwn_attr_product_id,
&target_wwn_attr_revision,
&target_wwn_attr_vpd_unit_serial,
&target_wwn_attr_vpd_protocol_identifier,
&target_wwn_attr_vpd_assoc_logical_unit,
&target_wwn_attr_vpd_assoc_target_port,
&target_wwn_attr_vpd_assoc_scsi_target_device,
NULL,
};
TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs);
/* End functions for struct config_item_type tb_dev_wwn_cit */
/* Start functions for struct config_item_type tb_dev_pr_cit */
static struct se_device *pr_to_dev(struct config_item *item)
{
return container_of(to_config_group(item), struct se_device,
dev_pr_group);
}
static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
char *page)
{
struct se_node_acl *se_nacl;
struct t10_pr_registration *pr_reg;
char i_buf[PR_REG_ISID_ID_LEN];
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
pr_reg = dev->dev_pr_res_holder;
if (!pr_reg)
return sprintf(page, "No SPC-3 Reservation holder\n");
se_nacl = pr_reg->pr_reg_nacl;
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
se_nacl->se_tpg->se_tpg_tfo->fabric_name,
se_nacl->initiatorname, i_buf);
}
static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
char *page)
{
struct se_session *sess = dev->reservation_holder;
struct se_node_acl *se_nacl;
ssize_t len;
if (sess) {
se_nacl = sess->se_node_acl;
len = sprintf(page,
"SPC-2 Reservation: %s Initiator: %s\n",
se_nacl->se_tpg->se_tpg_tfo->fabric_name,
se_nacl->initiatorname);
} else {
len = sprintf(page, "No SPC-2 Reservation holder\n");
}
return len;
}
static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
{
struct se_device *dev = pr_to_dev(item);
int ret;
if (!dev->dev_attrib.emulate_pr)
return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return sprintf(page, "Passthrough\n");
spin_lock(&dev->dev_reservation_lock);
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
ret = target_core_dev_pr_show_spc2_res(dev, page);
else
ret = target_core_dev_pr_show_spc3_res(dev, page);
spin_unlock(&dev->dev_reservation_lock);
return ret;
}
static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item,
char *page)
{
struct se_device *dev = pr_to_dev(item);
ssize_t len = 0;
spin_lock(&dev->dev_reservation_lock);
if (!dev->dev_pr_res_holder) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
} else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
len = sprintf(page, "SPC-3 Reservation: All Target"
" Ports registration\n");
} else {
len = sprintf(page, "SPC-3 Reservation: Single"
" Target Port registration\n");
}
spin_unlock(&dev->dev_reservation_lock);
return len;
}
static ssize_t target_pr_res_pr_generation_show(struct config_item *item,
char *page)
{
return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation);
}
static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item,
char *page)
{
struct se_device *dev = pr_to_dev(item);
struct se_node_acl *se_nacl;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg;
const struct target_core_fabric_ops *tfo;
ssize_t len = 0;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
if (!pr_reg) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
goto out_unlock;
}
se_nacl = pr_reg->pr_reg_nacl;
se_tpg = se_nacl->se_tpg;
tfo = se_tpg->se_tpg_tfo;
len += sprintf(page+len, "SPC-3 Reservation: %s"
" Target Node Endpoint: %s\n", tfo->fabric_name,
tfo->tpg_get_wwn(se_tpg));
len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
" Identifier Tag: %hu %s Portal Group Tag: %hu"
" %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
tfo->fabric_name, tfo->tpg_get_tag(se_tpg),
tfo->fabric_name, pr_reg->pr_aptpl_target_lun);
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
return len;
}
static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item,
char *page)
{
struct se_device *dev = pr_to_dev(item);
const struct target_core_fabric_ops *tfo;
struct t10_pr_registration *pr_reg;
unsigned char buf[384];
char i_buf[PR_REG_ISID_ID_LEN];
ssize_t len = 0;
int reg_count = 0;
len += sprintf(page+len, "SPC-3 PR Registrations:\n");
spin_lock(&dev->t10_pr.registration_lock);
list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
pr_reg_list) {
memset(buf, 0, 384);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
core_pr_dump_initiator_port(pr_reg, i_buf,
PR_REG_ISID_ID_LEN);
sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
tfo->fabric_name,
pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
pr_reg->pr_res_generation);
if (len + strlen(buf) >= PAGE_SIZE)
break;
len += sprintf(page+len, "%s", buf);
reg_count++;
}
spin_unlock(&dev->t10_pr.registration_lock);
if (!reg_count)
len += sprintf(page+len, "None\n");
return len;
}
static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page)
{
struct se_device *dev = pr_to_dev(item);
struct t10_pr_registration *pr_reg;
ssize_t len = 0;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
if (pr_reg) {
len = sprintf(page, "SPC-3 Reservation Type: %s\n",
core_scsi3_pr_dump_type(pr_reg->pr_res_type));
} else {
len = sprintf(page, "No SPC-3 Reservation holder\n");
}
spin_unlock(&dev->dev_reservation_lock);
return len;
}
static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
{
struct se_device *dev = pr_to_dev(item);
if (!dev->dev_attrib.emulate_pr)
return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return sprintf(page, "SPC_PASSTHROUGH\n");
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return sprintf(page, "SPC2_RESERVATIONS\n");
return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
}
static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
char *page)
{
struct se_device *dev = pr_to_dev(item);
if (!dev->dev_attrib.emulate_pr ||
(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return 0;
return sprintf(page, "APTPL Bit Status: %s\n",
(dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
}
static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
char *page)
{
struct se_device *dev = pr_to_dev(item);
if (!dev->dev_attrib.emulate_pr ||
(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n");
}
enum {
Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
};
static match_table_t tokens = {
{Opt_initiator_fabric, "initiator_fabric=%s"},
{Opt_initiator_node, "initiator_node=%s"},
{Opt_initiator_sid, "initiator_sid=%s"},
{Opt_sa_res_key, "sa_res_key=%s"},
{Opt_res_holder, "res_holder=%d"},
{Opt_res_type, "res_type=%d"},
{Opt_res_scope, "res_scope=%d"},
{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
{Opt_mapped_lun, "mapped_lun=%u"},
{Opt_target_fabric, "target_fabric=%s"},
{Opt_target_node, "target_node=%s"},
{Opt_tpgt, "tpgt=%d"},
{Opt_port_rtpi, "port_rtpi=%d"},
{Opt_target_lun, "target_lun=%u"},
{Opt_err, NULL}
};
static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
const char *page, size_t count)
{
struct se_device *dev = pr_to_dev(item);
unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
unsigned char *t_fabric = NULL, *t_port = NULL;
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
unsigned long long tmp_ll;
u64 sa_res_key = 0;
u64 mapped_lun = 0, target_lun = 0;
int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
u16 tpgt = 0;
u8 type = 0;
if (!dev->dev_attrib.emulate_pr ||
(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return count;
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return count;
if (dev->export_count) {
pr_debug("Unable to process APTPL metadata while"
" active fabric exports exist\n");
return -EINVAL;
}
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_initiator_fabric:
i_fabric = match_strdup(args);
if (!i_fabric) {
ret = -ENOMEM;
goto out;
}
break;
case Opt_initiator_node:
i_port = match_strdup(args);
if (!i_port) {
ret = -ENOMEM;
goto out;
}
if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
pr_err("APTPL metadata initiator_node="
" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
PR_APTPL_MAX_IPORT_LEN);
ret = -EINVAL;
break;
}
break;
case Opt_initiator_sid:
isid = match_strdup(args);
if (!isid) {
ret = -ENOMEM;
goto out;
}
if (strlen(isid) >= PR_REG_ISID_LEN) {
pr_err("APTPL metadata initiator_isid"
"= exceeds PR_REG_ISID_LEN: %d\n",
PR_REG_ISID_LEN);
ret = -EINVAL;
break;
}
break;
case Opt_sa_res_key:
ret = match_u64(args, &tmp_ll);
if (ret < 0) {
pr_err("kstrtoull() failed for sa_res_key=\n");
goto out;
}
sa_res_key = (u64)tmp_ll;
break;
/*
* PR APTPL Metadata for Reservation
*/
case Opt_res_holder:
ret = match_int(args, &arg);
if (ret)
goto out;
res_holder = arg;
break;
case Opt_res_type:
ret = match_int(args, &arg);
if (ret)
goto out;
type = (u8)arg;
break;
case Opt_res_scope:
ret = match_int(args, &arg);
if (ret)
goto out;
break;
case Opt_res_all_tg_pt:
ret = match_int(args, &arg);
if (ret)
goto out;
all_tg_pt = (int)arg;
break;
case Opt_mapped_lun:
ret = match_u64(args, &tmp_ll);
if (ret)
goto out;
mapped_lun = (u64)tmp_ll;
break;
/*
* PR APTPL Metadata for Target Port
*/
case Opt_target_fabric:
t_fabric = match_strdup(args);
if (!t_fabric) {
ret = -ENOMEM;
goto out;
}
break;
case Opt_target_node:
t_port = match_strdup(args);
if (!t_port) {
ret = -ENOMEM;
goto out;
}
if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
pr_err("APTPL metadata target_node="
" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
PR_APTPL_MAX_TPORT_LEN);
ret = -EINVAL;
break;
}
break;
case Opt_tpgt:
ret = match_int(args, &arg);
if (ret)
goto out;
tpgt = (u16)arg;
break;
case Opt_port_rtpi:
ret = match_int(args, &arg);
if (ret)
goto out;
break;
case Opt_target_lun:
ret = match_u64(args, &tmp_ll);
if (ret)
goto out;
target_lun = (u64)tmp_ll;
break;
default:
break;
}
}
if (!i_port || !t_port || !sa_res_key) {
pr_err("Illegal parameters for APTPL registration\n");
ret = -EINVAL;
goto out;
}
if (res_holder && !(type)) {
pr_err("Illegal PR type: 0x%02x for reservation"
" holder\n", type);
ret = -EINVAL;
goto out;
}
ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
i_port, isid, mapped_lun, t_port, tpgt, target_lun,
res_holder, all_tg_pt, type);
out:
kfree(i_fabric);
kfree(i_port);
kfree(isid);
kfree(t_fabric);
kfree(t_port);
kfree(orig);
return (ret == 0) ? count : ret;
}
CONFIGFS_ATTR_RO(target_pr_, res_holder);
CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts);
CONFIGFS_ATTR_RO(target_pr_, res_pr_generation);
CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port);
CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts);
CONFIGFS_ATTR_RO(target_pr_, res_pr_type);
CONFIGFS_ATTR_RO(target_pr_, res_type);
CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active);
CONFIGFS_ATTR(target_pr_, res_aptpl_metadata);
static struct configfs_attribute *target_core_dev_pr_attrs[] = {
&target_pr_attr_res_holder,
&target_pr_attr_res_pr_all_tgt_pts,
&target_pr_attr_res_pr_generation,
&target_pr_attr_res_pr_holder_tg_port,
&target_pr_attr_res_pr_registered_i_pts,
&target_pr_attr_res_pr_type,
&target_pr_attr_res_type,
&target_pr_attr_res_aptpl_active,
&target_pr_attr_res_aptpl_metadata,
NULL,
};
TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs);
/* End functions for struct config_item_type tb_dev_pr_cit */
/* Start functions for struct config_item_type tb_dev_cit */
static inline struct se_device *to_device(struct config_item *item)
{
return container_of(to_config_group(item), struct se_device, dev_group);
}
static ssize_t target_dev_info_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
int bl = 0;
ssize_t read_bytes = 0;
transport_dump_dev_state(dev, page, &bl);
read_bytes += bl;
read_bytes += dev->transport->show_configfs_dev_params(dev,
page+read_bytes);
return read_bytes;
}
static ssize_t target_dev_control_store(struct config_item *item,
const char *page, size_t count)
{
struct se_device *dev = to_device(item);
return dev->transport->set_configfs_dev_params(dev, page, count);
}
static ssize_t target_dev_alias_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
if (!(dev->dev_flags & DF_USING_ALIAS))
return 0;
return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
}
static ssize_t target_dev_alias_store(struct config_item *item,
const char *page, size_t count)
{
struct se_device *dev = to_device(item);
struct se_hba *hba = dev->se_hba;
ssize_t read_bytes;
if (count > (SE_DEV_ALIAS_LEN-1)) {
pr_err("alias count: %d exceeds"
" SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
SE_DEV_ALIAS_LEN-1);
return -EINVAL;
}
read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
if (!read_bytes)
return -EINVAL;
if (dev->dev_alias[read_bytes - 1] == '\n')
dev->dev_alias[read_bytes - 1] = '\0';
dev->dev_flags |= DF_USING_ALIAS;
pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&dev->dev_group.cg_item),
dev->dev_alias);
return read_bytes;
}
static ssize_t target_dev_udev_path_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
if (!(dev->dev_flags & DF_USING_UDEV_PATH))
return 0;
return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
}
static ssize_t target_dev_udev_path_store(struct config_item *item,
const char *page, size_t count)
{
struct se_device *dev = to_device(item);
struct se_hba *hba = dev->se_hba;
ssize_t read_bytes;
if (count > (SE_UDEV_PATH_LEN-1)) {
pr_err("udev_path count: %d exceeds"
" SE_UDEV_PATH_LEN-1: %u\n", (int)count,
SE_UDEV_PATH_LEN-1);
return -EINVAL;
}
read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
"%s", page);
if (!read_bytes)
return -EINVAL;
if (dev->udev_path[read_bytes - 1] == '\n')
dev->udev_path[read_bytes - 1] = '\0';
dev->dev_flags |= DF_USING_UDEV_PATH;
pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&dev->dev_group.cg_item),
dev->udev_path);
return read_bytes;
}
static ssize_t target_dev_enable_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev));
}
static ssize_t target_dev_enable_store(struct config_item *item,
const char *page, size_t count)
{
struct se_device *dev = to_device(item);
char *ptr;
int ret;
ptr = strstr(page, "1");
if (!ptr) {
pr_err("For dev_enable ops, only valid value"
" is \"1\"\n");
return -EINVAL;
}
ret = target_configure_device(dev);
if (ret)
return ret;
return count;
}
static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
struct config_item *lu_ci;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!lu_gp_mem)
return 0;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
if (lu_gp) {
lu_ci = &lu_gp->lu_gp_group.cg_item;
len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
config_item_name(lu_ci), lu_gp->lu_gp_id);
}
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
return len;
}
static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
const char *page, size_t count)
{
struct se_device *dev = to_device(item);
struct se_hba *hba = dev->se_hba;
struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
unsigned char buf[LU_GROUP_NAME_BUF];
int move = 0;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!lu_gp_mem)
return count;
if (count > LU_GROUP_NAME_BUF) {
pr_err("ALUA LU Group Alias too large!\n");
return -EINVAL;
}
memset(buf, 0, LU_GROUP_NAME_BUF);
memcpy(buf, page, count);
/*
* Any ALUA logical unit alias besides "NULL" means we will be
* making a new group association.
*/
if (strcmp(strstrip(buf), "NULL")) {
/*
* core_alua_get_lu_gp_by_name() will increment reference to
* struct t10_alua_lu_gp. This reference is released with
* core_alua_get_lu_gp_by_name below().
*/
lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
if (!lu_gp_new)
return -ENODEV;
}
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
if (lu_gp) {
/*
* Clearing an existing lu_gp association, and replacing
* with NULL
*/
if (!lu_gp_new) {
pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
" %hu\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&dev->dev_group.cg_item),
config_item_name(&lu_gp->lu_gp_group.cg_item),
lu_gp->lu_gp_id);
__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
return count;
}
/*
* Removing existing association of lu_gp_mem with lu_gp
*/
__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
move = 1;
}
/*
* Associate lu_gp_mem with lu_gp_new.
*/
__core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
" core/alua/lu_gps/%s, ID: %hu\n",
(move) ? "Moving" : "Adding",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&dev->dev_group.cg_item),
config_item_name(&lu_gp_new->lu_gp_group.cg_item),
lu_gp_new->lu_gp_id);
core_alua_put_lu_gp_from_name(lu_gp_new);
return count;
}
static ssize_t target_dev_lba_map_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
struct t10_alua_lba_map *map;
struct t10_alua_lba_map_member *mem;
char *b = page;
int bl = 0;
char state;
spin_lock(&dev->t10_alua.lba_map_lock);
if (!list_empty(&dev->t10_alua.lba_map_list))
bl += sprintf(b + bl, "%u %u\n",
dev->t10_alua.lba_map_segment_size,
dev->t10_alua.lba_map_segment_multiplier);
list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
bl += sprintf(b + bl, "%llu %llu",
map->lba_map_first_lba, map->lba_map_last_lba);
list_for_each_entry(mem, &map->lba_map_mem_list,
lba_map_mem_list) {
switch (mem->lba_map_mem_alua_state) {
case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
state = 'O';
break;
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
state = 'A';
break;
case ALUA_ACCESS_STATE_STANDBY:
state = 'S';
break;
case ALUA_ACCESS_STATE_UNAVAILABLE:
state = 'U';
break;
default:
state = '.';
break;
}
bl += sprintf(b + bl, " %d:%c",
mem->lba_map_mem_alua_pg_id, state);
}
bl += sprintf(b + bl, "\n");
}
spin_unlock(&dev->t10_alua.lba_map_lock);
return bl;
}
static ssize_t target_dev_lba_map_store(struct config_item *item,
const char *page, size_t count)
{
struct se_device *dev = to_device(item);
struct t10_alua_lba_map *lba_map = NULL;
struct list_head lba_list;
char *map_entries, *orig, *ptr;
char state;
int pg_num = -1, pg;
int ret = 0, num = 0, pg_id, alua_state;
unsigned long start_lba = -1, end_lba = -1;
unsigned long segment_size = -1, segment_mult = -1;
orig = map_entries = kstrdup(page, GFP_KERNEL);
if (!map_entries)
return -ENOMEM;
INIT_LIST_HEAD(&lba_list);
while ((ptr = strsep(&map_entries, "\n")) != NULL) {
if (!*ptr)
continue;
if (num == 0) {
if (sscanf(ptr, "%lu %lu\n",
&segment_size, &segment_mult) != 2) {
pr_err("Invalid line %d\n", num);
ret = -EINVAL;
break;
}
num++;
continue;
}
if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
pr_err("Invalid line %d\n", num);
ret = -EINVAL;
break;
}
ptr = strchr(ptr, ' ');
if (!ptr) {
pr_err("Invalid line %d, missing end lba\n", num);
ret = -EINVAL;
break;
}
ptr++;
ptr = strchr(ptr, ' ');
if (!ptr) {
pr_err("Invalid line %d, missing state definitions\n",
num);
ret = -EINVAL;
break;
}
ptr++;
lba_map = core_alua_allocate_lba_map(&lba_list,
start_lba, end_lba);
if (IS_ERR(lba_map)) {
ret = PTR_ERR(lba_map);
break;
}
pg = 0;
while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
switch (state) {
case 'O':
alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
break;
case 'A':
alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
break;
case 'S':
alua_state = ALUA_ACCESS_STATE_STANDBY;
break;
case 'U':
alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
break;
default:
pr_err("Invalid ALUA state '%c'\n", state);
ret = -EINVAL;
goto out;
}
ret = core_alua_allocate_lba_map_mem(lba_map,
pg_id, alua_state);
if (ret) {
pr_err("Invalid target descriptor %d:%c "
"at line %d\n",
pg_id, state, num);
break;
}
pg++;
ptr = strchr(ptr, ' ');
if (ptr)
ptr++;
else
break;
}
if (pg_num == -1)
pg_num = pg;
else if (pg != pg_num) {
pr_err("Only %d from %d port groups definitions "
"at line %d\n", pg, pg_num, num);
ret = -EINVAL;
break;
}
num++;
}
out:
if (ret) {
core_alua_free_lba_map(&lba_list);
count = ret;
} else
core_alua_set_lba_map(dev, &lba_list,
segment_size, segment_mult);
kfree(orig);
return count;
}
CONFIGFS_ATTR_RO(target_dev_, info);
CONFIGFS_ATTR_WO(target_dev_, control);
CONFIGFS_ATTR(target_dev_, alias);
CONFIGFS_ATTR(target_dev_, udev_path);
CONFIGFS_ATTR(target_dev_, enable);
CONFIGFS_ATTR(target_dev_, alua_lu_gp);
CONFIGFS_ATTR(target_dev_, lba_map);
static struct configfs_attribute *target_core_dev_attrs[] = {
&target_dev_attr_info,
&target_dev_attr_control,
&target_dev_attr_alias,
&target_dev_attr_udev_path,
&target_dev_attr_enable,
&target_dev_attr_alua_lu_gp,
&target_dev_attr_lba_map,
NULL,
};
static void target_core_dev_release(struct config_item *item)
{
struct config_group *dev_cg = to_config_group(item);
struct se_device *dev =
container_of(dev_cg, struct se_device, dev_group);
target_free_device(dev);
}
/*
* Used in target_core_fabric_configfs.c to verify valid se_device symlink
* within target_fabric_port_link()
*/
struct configfs_item_operations target_core_dev_item_ops = {
.release = target_core_dev_release,
};
TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
/* End functions for struct config_item_type tb_dev_cit */
/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item)
{
return container_of(to_config_group(item), struct t10_alua_lu_gp,
lu_gp_group);
}
static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page)
{
struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
if (!lu_gp->lu_gp_valid_id)
return 0;
return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
}
static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item,
const char *page, size_t count)
{
struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
unsigned long lu_gp_id;
int ret;
ret = kstrtoul(page, 0, &lu_gp_id);
if (ret < 0) {
pr_err("kstrtoul() returned %d for"
" lu_gp_id\n", ret);
return ret;
}
if (lu_gp_id > 0x0000ffff) {
pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
" 0x0000ffff\n", lu_gp_id);
return -EINVAL;
}
ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
if (ret < 0)
return -EINVAL;
pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
" Group: core/alua/lu_gps/%s to ID: %hu\n",
config_item_name(&alua_lu_gp_cg->cg_item),
lu_gp->lu_gp_id);
return count;
}
static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
{
struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
struct se_device *dev;
struct se_hba *hba;
struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0, cur_len;
unsigned char buf[LU_GROUP_NAME_BUF];
memset(buf, 0, LU_GROUP_NAME_BUF);
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev;
hba = dev->se_hba;
cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&dev->dev_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) {
pr_warn("Ran out of lu_gp_show_attr"
"_members buffer\n");
break;
}
memcpy(page+len, buf, cur_len);
len += cur_len;
}
spin_unlock(&lu_gp->lu_gp_lock);
return len;
}
CONFIGFS_ATTR(target_lu_gp_, lu_gp_id);
CONFIGFS_ATTR_RO(target_lu_gp_, members);
static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
&target_lu_gp_attr_lu_gp_id,
&target_lu_gp_attr_members,
NULL,
};
static void target_core_alua_lu_gp_release(struct config_item *item)
{
struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
struct t10_alua_lu_gp, lu_gp_group);
core_alua_free_lu_gp(lu_gp);
}
static struct configfs_item_operations target_core_alua_lu_gp_ops = {
.release = target_core_alua_lu_gp_release,
};
static const struct config_item_type target_core_alua_lu_gp_cit = {
.ct_item_ops = &target_core_alua_lu_gp_ops,
.ct_attrs = target_core_alua_lu_gp_attrs,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
static struct config_group *target_core_alua_create_lu_gp(
struct config_group *group,
const char *name)
{
struct t10_alua_lu_gp *lu_gp;
struct config_group *alua_lu_gp_cg = NULL;
struct config_item *alua_lu_gp_ci = NULL;
lu_gp = core_alua_allocate_lu_gp(name, 0);
if (IS_ERR(lu_gp))
return NULL;
alua_lu_gp_cg = &lu_gp->lu_gp_group;
alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
config_group_init_type_name(alua_lu_gp_cg, name,
&target_core_alua_lu_gp_cit);
pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
" Group: core/alua/lu_gps/%s\n",
config_item_name(alua_lu_gp_ci));
return alua_lu_gp_cg;
}
static void target_core_alua_drop_lu_gp(
struct config_group *group,
struct config_item *item)
{
struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
struct t10_alua_lu_gp, lu_gp_group);
pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
" Group: core/alua/lu_gps/%s, ID: %hu\n",
config_item_name(item), lu_gp->lu_gp_id);
/*
* core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
* -> target_core_alua_lu_gp_release()
*/
config_item_put(item);
}
static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
.make_group = &target_core_alua_create_lu_gp,
.drop_item = &target_core_alua_drop_lu_gp,
};
static const struct config_item_type target_core_alua_lu_gps_cit = {
.ct_item_ops = NULL,
.ct_group_ops = &target_core_alua_lu_gps_group_ops,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item)
{
return container_of(to_config_group(item), struct t10_alua_tg_pt_gp,
tg_pt_gp_group);
}
static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
char *page)
{
return sprintf(page, "%d\n",
to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
}
static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
const char *page, size_t count)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
unsigned long tmp;
int new_state, ret;
if (!tg_pt_gp->tg_pt_gp_valid_id) {
pr_err("Unable to do implicit ALUA on non valid"
" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
}
if (!target_dev_configured(dev)) {
pr_err("Unable to set alua_access_state while device is"
" not configured\n");
return -ENODEV;
}
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract new ALUA access state from"
" %s\n", page);
return ret;
}
new_state = (int)tmp;
if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
pr_err("Unable to process implicit configfs ALUA"
" transition while TPGS_IMPLICIT_ALUA is disabled\n");
return -EINVAL;
}
if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
/* LBA DEPENDENT is only allowed with implicit ALUA */
pr_err("Unable to process implicit configfs ALUA transition"
" while explicit ALUA management is enabled\n");
return -EINVAL;
}
ret = core_alua_do_port_transition(tg_pt_gp, dev,
NULL, NULL, new_state, 0);
return (!ret) ? count : -EINVAL;
}
static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item,
char *page)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
return sprintf(page, "%s\n",
core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
}
static ssize_t target_tg_pt_gp_alua_access_status_store(
struct config_item *item, const char *page, size_t count)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
unsigned long tmp;
int new_status, ret;
if (!tg_pt_gp->tg_pt_gp_valid_id) {
pr_err("Unable to do set ALUA access status on non"
" valid tg_pt_gp ID: %hu\n",
tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
}
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract new ALUA access status"
" from %s\n", page);
return ret;
}
new_status = (int)tmp;
if ((new_status != ALUA_STATUS_NONE) &&
(new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
(new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
pr_err("Illegal ALUA access status: 0x%02x\n",
new_status);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
return count;
}
static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item,
char *page)
{
return core_alua_show_access_type(to_tg_pt_gp(item), page);
}
static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item,
const char *page, size_t count)
{
return core_alua_store_access_type(to_tg_pt_gp(item), page, count);
}
#define ALUA_SUPPORTED_STATE_ATTR(_name, _bit) \
static ssize_t target_tg_pt_gp_alua_support_##_name##_show( \
struct config_item *item, char *p) \
{ \
struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
return sprintf(p, "%d\n", \
!!(t->tg_pt_gp_alua_supported_states & _bit)); \
} \
\
static ssize_t target_tg_pt_gp_alua_support_##_name##_store( \
struct config_item *item, const char *p, size_t c) \
{ \
struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
unsigned long tmp; \
int ret; \
\
if (!t->tg_pt_gp_valid_id) { \
pr_err("Unable to do set " #_name " ALUA state on non" \
" valid tg_pt_gp ID: %hu\n", \
t->tg_pt_gp_valid_id); \
return -EINVAL; \
} \
\
ret = kstrtoul(p, 0, &tmp); \
if (ret < 0) { \
pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
return -EINVAL; \
} \
if (tmp > 1) { \
pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
return -EINVAL; \
} \
if (tmp) \
t->tg_pt_gp_alua_supported_states |= _bit; \
else \
t->tg_pt_gp_alua_supported_states &= ~_bit; \
\
return c; \
}
ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP);
ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP);
ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP);
ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP);
ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP);
ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP);
ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP);
static ssize_t target_tg_pt_gp_alua_write_metadata_show(
struct config_item *item, char *page)
{
return sprintf(page, "%d\n",
to_tg_pt_gp(item)->tg_pt_gp_write_metadata);
}
static ssize_t target_tg_pt_gp_alua_write_metadata_store(
struct config_item *item, const char *page, size_t count)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
unsigned long tmp;
int ret;
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_write_metadata\n");
return ret;
}
if ((tmp != 0) && (tmp != 1)) {
pr_err("Illegal value for alua_write_metadata:"
" %lu\n", tmp);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
return count;
}
static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item,
char *page)
{
return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page);
}
static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item,
const char *page, size_t count)
{
return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page,
count);
}
static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item,
char *page)
{
return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page);
}
static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item,
const char *page, size_t count)
{
return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page,
count);
}
static ssize_t target_tg_pt_gp_implicit_trans_secs_show(
struct config_item *item, char *page)
{
return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page);
}
static ssize_t target_tg_pt_gp_implicit_trans_secs_store(
struct config_item *item, const char *page, size_t count)
{
return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page,
count);
}
static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item,
char *page)
{
return core_alua_show_preferred_bit(to_tg_pt_gp(item), page);
}
static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item,
const char *page, size_t count)
{
return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count);
}
static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item,
char *page)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
if (!tg_pt_gp->tg_pt_gp_valid_id)
return 0;
return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
}
static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item,
const char *page, size_t count)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
unsigned long tg_pt_gp_id;
int ret;
ret = kstrtoul(page, 0, &tg_pt_gp_id);
if (ret < 0) {
pr_err("ALUA tg_pt_gp_id: invalid value '%s' for tg_pt_gp_id\n",
page);
return ret;
}
if (tg_pt_gp_id > 0x0000ffff) {
pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum: 0x0000ffff\n",
tg_pt_gp_id);
return -EINVAL;
}
ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
if (ret < 0)
return -EINVAL;
pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
"core/alua/tg_pt_gps/%s to ID: %hu\n",
config_item_name(&alua_tg_pt_gp_cg->cg_item),
tg_pt_gp->tg_pt_gp_id);
return count;
}
static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
char *page)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
struct se_lun *lun;
ssize_t len = 0, cur_len;
unsigned char buf[TG_PT_GROUP_NAME_BUF];
memset(buf, 0, TG_PT_GROUP_NAME_BUF);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
lun_tg_pt_gp_link) {
struct se_portal_group *tpg = lun->lun_tpg;
cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
"/%s\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_wwn(tpg),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) {
pr_warn("Ran out of lu_gp_show_attr"
"_members buffer\n");
break;
}
memcpy(page+len, buf, cur_len);
len += cur_len;
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
return len;
}
CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata);
CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs);
CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs);
CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs);
CONFIGFS_ATTR(target_tg_pt_gp_, preferred);
CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id);
CONFIGFS_ATTR_RO(target_tg_pt_gp_, members);
static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
&target_tg_pt_gp_attr_alua_access_state,
&target_tg_pt_gp_attr_alua_access_status,
&target_tg_pt_gp_attr_alua_access_type,
&target_tg_pt_gp_attr_alua_support_transitioning,
&target_tg_pt_gp_attr_alua_support_offline,
&target_tg_pt_gp_attr_alua_support_lba_dependent,
&target_tg_pt_gp_attr_alua_support_unavailable,
&target_tg_pt_gp_attr_alua_support_standby,
&target_tg_pt_gp_attr_alua_support_active_nonoptimized,
&target_tg_pt_gp_attr_alua_support_active_optimized,
&target_tg_pt_gp_attr_alua_write_metadata,
&target_tg_pt_gp_attr_nonop_delay_msecs,
&target_tg_pt_gp_attr_trans_delay_msecs,
&target_tg_pt_gp_attr_implicit_trans_secs,
&target_tg_pt_gp_attr_preferred,
&target_tg_pt_gp_attr_tg_pt_gp_id,
&target_tg_pt_gp_attr_members,
NULL,
};
static void target_core_alua_tg_pt_gp_release(struct config_item *item)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
struct t10_alua_tg_pt_gp, tg_pt_gp_group);
core_alua_free_tg_pt_gp(tg_pt_gp);
}
static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
.release = target_core_alua_tg_pt_gp_release,
};
static const struct config_item_type target_core_alua_tg_pt_gp_cit = {
.ct_item_ops = &target_core_alua_tg_pt_gp_ops,
.ct_attrs = target_core_alua_tg_pt_gp_attrs,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
static struct config_group *target_core_alua_create_tg_pt_gp(
struct config_group *group,
const char *name)
{
struct t10_alua *alua = container_of(group, struct t10_alua,
alua_tg_pt_gps_group);
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct config_group *alua_tg_pt_gp_cg = NULL;
struct config_item *alua_tg_pt_gp_ci = NULL;
tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
if (!tg_pt_gp)
return NULL;
alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
config_group_init_type_name(alua_tg_pt_gp_cg, name,
&target_core_alua_tg_pt_gp_cit);
pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
" Group: alua/tg_pt_gps/%s\n",
config_item_name(alua_tg_pt_gp_ci));
return alua_tg_pt_gp_cg;
}
static void target_core_alua_drop_tg_pt_gp(
struct config_group *group,
struct config_item *item)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
struct t10_alua_tg_pt_gp, tg_pt_gp_group);
pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
" Group: alua/tg_pt_gps/%s, ID: %hu\n",
config_item_name(item), tg_pt_gp->tg_pt_gp_id);
/*
* core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
* -> target_core_alua_tg_pt_gp_release().
*/
config_item_put(item);
}
static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
.make_group = &target_core_alua_create_tg_pt_gp,
.drop_item = &target_core_alua_drop_tg_pt_gp,
};
TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
/* Start functions for struct config_item_type target_core_alua_cit */
/*
* target_core_alua_cit is a ConfigFS group that lives under
* /sys/kernel/config/target/core/alua. There are default groups
* core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
* target_core_alua_cit in target_core_init_configfs() below.
*/
static const struct config_item_type target_core_alua_cit = {
.ct_item_ops = NULL,
.ct_attrs = NULL,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_alua_cit */
/* Start functions for struct config_item_type tb_dev_stat_cit */
static struct config_group *target_core_stat_mkdir(
struct config_group *group,
const char *name)
{
return ERR_PTR(-ENOSYS);
}
static void target_core_stat_rmdir(
struct config_group *group,
struct config_item *item)
{
return;
}
static struct configfs_group_operations target_core_stat_group_ops = {
.make_group = &target_core_stat_mkdir,
.drop_item = &target_core_stat_rmdir,
};
TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
/* End functions for struct config_item_type tb_dev_stat_cit */
/* Start functions for struct config_item_type target_core_hba_cit */
static struct config_group *target_core_make_subdev(
struct config_group *group,
const char *name)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct config_item *hba_ci = &group->cg_item;
struct se_hba *hba = item_to_hba(hba_ci);
struct target_backend *tb = hba->backend;
struct se_device *dev;
int errno = -ENOMEM, ret;
ret = mutex_lock_interruptible(&hba->hba_access_mutex);
if (ret)
return ERR_PTR(ret);
dev = target_alloc_device(hba, name);
if (!dev)
goto out_unlock;
config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit);
config_group_init_type_name(&dev->dev_action_group, "action",
&tb->tb_dev_action_cit);
configfs_add_default_group(&dev->dev_action_group, &dev->dev_group);
config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
&tb->tb_dev_attrib_cit);
configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group);
config_group_init_type_name(&dev->dev_pr_group, "pr",
&tb->tb_dev_pr_cit);
configfs_add_default_group(&dev->dev_pr_group, &dev->dev_group);
config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
&tb->tb_dev_wwn_cit);
configfs_add_default_group(&dev->t10_wwn.t10_wwn_group,
&dev->dev_group);
config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
"alua", &tb->tb_dev_alua_tg_pt_gps_cit);
configfs_add_default_group(&dev->t10_alua.alua_tg_pt_gps_group,
&dev->dev_group);
config_group_init_type_name(&dev->dev_stat_grps.stat_group,
"statistics", &tb->tb_dev_stat_cit);
configfs_add_default_group(&dev->dev_stat_grps.stat_group,
&dev->dev_group);
/*
* Add core/$HBA/$DEV/alua/default_tg_pt_gp
*/
tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
if (!tg_pt_gp)
goto out_free_device;
dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
configfs_add_default_group(&tg_pt_gp->tg_pt_gp_group,
&dev->t10_alua.alua_tg_pt_gps_group);
/*
* Add core/$HBA/$DEV/statistics/ default groups
*/
target_stat_setup_dev_default_groups(dev);
mutex_unlock(&hba->hba_access_mutex);
return &dev->dev_group;
out_free_device:
target_free_device(dev);
out_unlock:
mutex_unlock(&hba->hba_access_mutex);
return ERR_PTR(errno);
}
static void target_core_drop_subdev(
struct config_group *group,
struct config_item *item)
{
struct config_group *dev_cg = to_config_group(item);
struct se_device *dev =
container_of(dev_cg, struct se_device, dev_group);
struct se_hba *hba;
hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
mutex_lock(&hba->hba_access_mutex);
configfs_remove_default_groups(&dev->dev_stat_grps.stat_group);
configfs_remove_default_groups(&dev->t10_alua.alua_tg_pt_gps_group);
/*
* core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
* directly from target_core_alua_tg_pt_gp_release().
*/
dev->t10_alua.default_tg_pt_gp = NULL;
configfs_remove_default_groups(dev_cg);
/*
* se_dev is released from target_core_dev_item_ops->release()
*/
config_item_put(item);
mutex_unlock(&hba->hba_access_mutex);
}
static struct configfs_group_operations target_core_hba_group_ops = {
.make_group = target_core_make_subdev,
.drop_item = target_core_drop_subdev,
};
static inline struct se_hba *to_hba(struct config_item *item)
{
return container_of(to_config_group(item), struct se_hba, hba_group);
}
static ssize_t target_hba_info_show(struct config_item *item, char *page)
{
struct se_hba *hba = to_hba(item);
return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
hba->hba_id, hba->backend->ops->name,
TARGET_CORE_VERSION);
}
static ssize_t target_hba_mode_show(struct config_item *item, char *page)
{
struct se_hba *hba = to_hba(item);
int hba_mode = 0;
if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
hba_mode = 1;
return sprintf(page, "%d\n", hba_mode);
}
static ssize_t target_hba_mode_store(struct config_item *item,
const char *page, size_t count)
{
struct se_hba *hba = to_hba(item);
unsigned long mode_flag;
int ret;
if (hba->backend->ops->pmode_enable_hba == NULL)
return -EINVAL;
ret = kstrtoul(page, 0, &mode_flag);
if (ret < 0) {
pr_err("Unable to extract hba mode flag: %d\n", ret);
return ret;
}
if (hba->dev_count) {
pr_err("Unable to set hba_mode with active devices\n");
return -EINVAL;
}
ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag);
if (ret < 0)
return -EINVAL;
if (ret > 0)
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
else if (ret == 0)
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
return count;
}
CONFIGFS_ATTR_RO(target_, hba_info);
CONFIGFS_ATTR(target_, hba_mode);
static void target_core_hba_release(struct config_item *item)
{
struct se_hba *hba = container_of(to_config_group(item),
struct se_hba, hba_group);
core_delete_hba(hba);
}
static struct configfs_attribute *target_core_hba_attrs[] = {
&target_attr_hba_info,
&target_attr_hba_mode,
NULL,
};
static struct configfs_item_operations target_core_hba_item_ops = {
.release = target_core_hba_release,
};
static const struct config_item_type target_core_hba_cit = {
.ct_item_ops = &target_core_hba_item_ops,
.ct_group_ops = &target_core_hba_group_ops,
.ct_attrs = target_core_hba_attrs,
.ct_owner = THIS_MODULE,
};
static struct config_group *target_core_call_addhbatotarget(
struct config_group *group,
const char *name)
{
char *se_plugin_str, *str, *str2;
struct se_hba *hba;
char buf[TARGET_CORE_NAME_MAX_LEN];
unsigned long plugin_dep_id = 0;
int ret;
memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
pr_err("Passed *name strlen(): %d exceeds"
" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
TARGET_CORE_NAME_MAX_LEN);
return ERR_PTR(-ENAMETOOLONG);
}
snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
str = strstr(buf, "_");
if (!str) {
pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
return ERR_PTR(-EINVAL);
}
se_plugin_str = buf;
/*
* Special case for subsystem plugins that have "_" in their names.
* Namely rd_direct and rd_mcp..
*/
str2 = strstr(str+1, "_");
if (str2) {
*str2 = '\0'; /* Terminate for *se_plugin_str */
str2++; /* Skip to start of plugin dependent ID */
str = str2;
} else {
*str = '\0'; /* Terminate for *se_plugin_str */
str++; /* Skip to start of plugin dependent ID */
}
ret = kstrtoul(str, 0, &plugin_dep_id);
if (ret < 0) {
pr_err("kstrtoul() returned %d for"
" plugin_dep_id\n", ret);
return ERR_PTR(ret);
}
/*
* Load up TCM subsystem plugins if they have not already been loaded.
*/
transport_subsystem_check_init();
hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
if (IS_ERR(hba))
return ERR_CAST(hba);
config_group_init_type_name(&hba->hba_group, name,
&target_core_hba_cit);
return &hba->hba_group;
}
static void target_core_call_delhbafromtarget(
struct config_group *group,
struct config_item *item)
{
/*
* core_delete_hba() is called from target_core_hba_item_ops->release()
* -> target_core_hba_release()
*/
config_item_put(item);
}
static struct configfs_group_operations target_core_group_ops = {
.make_group = target_core_call_addhbatotarget,
.drop_item = target_core_call_delhbafromtarget,
};
static const struct config_item_type target_core_cit = {
.ct_item_ops = NULL,
.ct_group_ops = &target_core_group_ops,
.ct_attrs = NULL,
.ct_owner = THIS_MODULE,
};
/* Stop functions for struct config_item_type target_core_hba_cit */
void target_setup_backend_cits(struct target_backend *tb)
{
target_core_setup_dev_cit(tb);
target_core_setup_dev_action_cit(tb);
target_core_setup_dev_attrib_cit(tb);
target_core_setup_dev_pr_cit(tb);
target_core_setup_dev_wwn_cit(tb);
target_core_setup_dev_alua_tg_pt_gps_cit(tb);
target_core_setup_dev_stat_cit(tb);
}
static void target_init_dbroot(void)
{
struct file *fp;
snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED);
fp = filp_open(db_root_stage, O_RDONLY, 0);
if (IS_ERR(fp)) {
pr_err("db_root: cannot open: %s\n", db_root_stage);
return;
}
if (!S_ISDIR(file_inode(fp)->i_mode)) {
filp_close(fp, NULL);
pr_err("db_root: not a valid directory: %s\n", db_root_stage);
return;
}
filp_close(fp, NULL);
strncpy(db_root, db_root_stage, DB_ROOT_LEN);
pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
}
static int __init target_core_init_configfs(void)
{
struct configfs_subsystem *subsys = &target_core_fabrics;
struct t10_alua_lu_gp *lu_gp;
int ret;
pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
" Engine: %s on %s/%s on "UTS_RELEASE"\n",
TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
config_group_init(&subsys->su_group);
mutex_init(&subsys->su_mutex);
ret = init_se_kmem_caches();
if (ret < 0)
return ret;
/*
* Create $CONFIGFS/target/core default group for HBA <-> Storage Object
* and ALUA Logical Unit Group and Target Port Group infrastructure.
*/
config_group_init_type_name(&target_core_hbagroup, "core",
&target_core_cit);
configfs_add_default_group(&target_core_hbagroup, &subsys->su_group);
/*
* Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
*/
config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit);
configfs_add_default_group(&alua_group, &target_core_hbagroup);
/*
* Add ALUA Logical Unit Group and Target Port Group ConfigFS
* groups under /sys/kernel/config/target/core/alua/
*/
config_group_init_type_name(&alua_lu_gps_group, "lu_gps",
&target_core_alua_lu_gps_cit);
configfs_add_default_group(&alua_lu_gps_group, &alua_group);
/*
* Add core/alua/lu_gps/default_lu_gp
*/
lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
if (IS_ERR(lu_gp)) {
ret = -ENOMEM;
goto out_global;
}
config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
&target_core_alua_lu_gp_cit);
configfs_add_default_group(&lu_gp->lu_gp_group, &alua_lu_gps_group);
default_lu_gp = lu_gp;
/*
* Register the target_core_mod subsystem with configfs.
*/
ret = configfs_register_subsystem(subsys);
if (ret < 0) {
pr_err("Error %d while registering subsystem %s\n",
ret, subsys->su_group.cg_item.ci_namebuf);
goto out_global;
}
pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
" Infrastructure: "TARGET_CORE_VERSION" on %s/%s"
" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
/*
* Register built-in RAMDISK subsystem logic for virtual LUN 0
*/
ret = rd_module_init();
if (ret < 0)
goto out;
ret = core_dev_setup_virtual_lun0();
if (ret < 0)
goto out;
ret = target_xcopy_setup_pt();
if (ret < 0)
goto out;
target_init_dbroot();
return 0;
out:
configfs_unregister_subsystem(subsys);
core_dev_release_virtual_lun0();
rd_module_exit();
out_global:
if (default_lu_gp) {
core_alua_free_lu_gp(default_lu_gp);
default_lu_gp = NULL;
}
release_se_kmem_caches();
return ret;
}
static void __exit target_core_exit_configfs(void)
{
configfs_remove_default_groups(&alua_lu_gps_group);
configfs_remove_default_groups(&alua_group);
configfs_remove_default_groups(&target_core_hbagroup);
/*
* We expect subsys->su_group.default_groups to be released
* by configfs subsystem provider logic..
*/
configfs_unregister_subsystem(&target_core_fabrics);
core_alua_free_lu_gp(default_lu_gp);
default_lu_gp = NULL;
pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
" Infrastructure\n");
core_dev_release_virtual_lun0();
rd_module_exit();
target_xcopy_release_pt();
release_se_kmem_caches();
}
MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");
module_init(target_core_init_configfs);
module_exit(target_core_exit_configfs);