android_kernel_xiaomi_sm8350/drivers/scsi/gdth.c
Raghavendra Rao Ananta 5bd75403be Merge remote-tracking branch 'remotes/origin/tmp-f686d9f' into msm-lahaina
* remotes/origin/tmp-f686d9f:
  ANDROID: update abi_gki_aarch64.xml for 5.2-rc6
  Linux 5.2-rc6
  Revert "iommu/vt-d: Fix lock inversion between iommu->lock and device_domain_lock"
  Bluetooth: Fix regression with minimum encryption key size alignment
  tcp: refine memory limit test in tcp_fragment()
  x86/vdso: Prevent segfaults due to hoisted vclock reads
  SUNRPC: Fix a credential refcount leak
  Revert "SUNRPC: Declare RPC timers as TIMER_DEFERRABLE"
  net :sunrpc :clnt :Fix xps refcount imbalance on the error path
  NFS4: Only set creation opendata if O_CREAT
  ANDROID: gki_defconfig: workaround to enable configs
  ANDROID: gki_defconfig: more configs for partners
  ARM: 8867/1: vdso: pass --be8 to linker if necessary
  KVM: nVMX: reorganize initial steps of vmx_set_nested_state
  KVM: PPC: Book3S HV: Invalidate ERAT when flushing guest TLB entries
  habanalabs: use u64_to_user_ptr() for reading user pointers
  nfsd: replace Jeff by Chuck as nfsd co-maintainer
  inet: clear num_timeout reqsk_alloc()
  PCI/P2PDMA: Ignore root complex whitelist when an IOMMU is present
  net: mvpp2: debugfs: Add pmap to fs dump
  ipv6: Default fib6_type to RTN_UNICAST when not set
  net: hns3: Fix inconsistent indenting
  net/af_iucv: always register net_device notifier
  net/af_iucv: build proper skbs for HiperTransport
  net/af_iucv: remove GFP_DMA restriction for HiperTransport
  doc: fix documentation about UIO_MEM_LOGICAL using
  MAINTAINERS / Documentation: Thorsten Scherer is the successor of Gavin Schenk
  docs: fb: Add TER16x32 to the available font names
  MAINTAINERS: fpga: hand off maintainership to Moritz
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 507
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 506
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 505
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 504
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 503
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 502
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 501
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 499
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 498
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 497
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 496
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 495
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 491
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 490
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 489
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 488
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 487
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 486
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 485
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 484
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 482
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 481
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 480
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 479
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 477
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 475
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 474
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 473
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 472
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 471
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 469
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 468
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 467
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 466
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 465
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 464
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 463
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 462
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 461
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 460
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 459
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 457
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 456
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 455
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 454
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 452
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 451
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 250
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 248
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 247
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 246
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 245
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 244
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 243
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 239
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 238
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 237
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 235
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 233
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 232
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 231
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 230
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 226
  KVM: arm/arm64: Fix emulated ptimer irq injection
  net: dsa: mv88e6xxx: fix shift of FID bits in mv88e6185_g1_vtu_loadpurge()
  tests: kvm: Check for a kernel warning
  kvm: tests: Sort tests in the Makefile alphabetically
  KVM: x86/mmu: Allocate PAE root array when using SVM's 32-bit NPT
  KVM: x86: Modify struct kvm_nested_state to have explicit fields for data
  fanotify: update connector fsid cache on add mark
  quota: fix a problem about transfer quota
  drm/i915: Don't clobber M/N values during fastset check
  powerpc: enable a 30-bit ZONE_DMA for 32-bit pmac
  ovl: make i_ino consistent with st_ino in more cases
  scsi: qla2xxx: Fix hardlockup in abort command during driver remove
  scsi: ufs: Avoid runtime suspend possibly being blocked forever
  scsi: qedi: update driver version to 8.37.0.20
  scsi: qedi: Check targetname while finding boot target information
  hvsock: fix epollout hang from race condition
  net/udp_gso: Allow TX timestamp with UDP GSO
  net: netem: fix use after free and double free with packet corruption
  net: netem: fix backlog accounting for corrupted GSO frames
  net: lio_core: fix potential sign-extension overflow on large shift
  tipc: pass tunnel dev as NULL to udp_tunnel(6)_xmit_skb
  ip6_tunnel: allow not to count pkts on tstats by passing dev as NULL
  ip_tunnel: allow not to count pkts on tstats by setting skb's dev to NULL
  apparmor: reset pos on failure to unpack for various functions
  apparmor: enforce nullbyte at end of tag string
  apparmor: fix PROFILE_MEDIATES for untrusted input
  RDMA/efa: Handle mmap insertions overflow
  tun: wake up waitqueues after IFF_UP is set
  drm: return -EFAULT if copy_to_user() fails
  net: remove duplicate fetch in sock_getsockopt
  tipc: fix issues with early FAILOVER_MSG from peer
  bnx2x: Check if transceiver implements DDM before access
  xhci: detect USB 3.2 capable host controllers correctly
  usb: xhci: Don't try to recover an endpoint if port is in error state.
  KVM: fix typo in documentation
  drm/panfrost: Make sure a BO is only unmapped when appropriate
  md: fix for divide error in status_resync
  soc: ixp4xx: npe: Fix an IS_ERR() vs NULL check in probe
  arm64/mm: don't initialize pgd_cache twice
  MAINTAINERS: Update my email address
  arm64/sve: <uapi/asm/ptrace.h> should not depend on <uapi/linux/prctl.h>
  ovl: fix typo in MODULE_PARM_DESC
  ovl: fix bogus -Wmaybe-unitialized warning
  ovl: don't fail with disconnected lower NFS
  mmc: core: Prevent processing SDIO IRQs when the card is suspended
  mmc: sdhci: sdhci-pci-o2micro: Correctly set bus width when tuning
  brcmfmac: sdio: Don't tune while the card is off
  mmc: core: Add sdio_retune_hold_now() and sdio_retune_release()
  brcmfmac: sdio: Disable auto-tuning around commands expected to fail
  mmc: core: API to temporarily disable retuning for SDIO CRC errors
  Revert "brcmfmac: disable command decode in sdio_aos"
  ARM: ixp4xx: include irqs.h where needed
  ARM: ixp4xx: mark ixp4xx_irq_setup as __init
  ARM: ixp4xx: don't select SERIAL_OF_PLATFORM
  firmware: trusted_foundations: add ARMv7 dependency
  usb: dwc2: Use generic PHY width in params setup
  RDMA/efa: Fix success return value in case of error
  IB/hfi1: Handle port down properly in pio
  IB/hfi1: Handle wakeup of orphaned QPs for pio
  IB/hfi1: Wakeup QPs orphaned on wait list after flush
  IB/hfi1: Use aborts to trigger RC throttling
  IB/hfi1: Create inline to get extended headers
  IB/hfi1: Silence txreq allocation warnings
  IB/hfi1: Avoid hardlockup with flushlist_lock
  KVM: PPC: Book3S HV: Only write DAWR[X] when handling h_set_dawr in real mode
  KVM: PPC: Book3S HV: Fix r3 corruption in h_set_dabr()
  fs/namespace: fix unprivileged mount propagation
  vfs: fsmount: add missing mntget()
  cifs: fix GlobalMid_Lock bug in cifs_reconnect
  SMB3: retry on STATUS_INSUFFICIENT_RESOURCES instead of failing write
  staging: erofs: add requirements field in superblock
  arm64: ssbd: explicitly depend on <linux/prctl.h>
  block: fix page leak when merging to same page
  block: return from __bio_try_merge_page if merging occured in the same page
  Btrfs: fix failure to persist compression property xattr deletion on fsync
  riscv: remove unused barrier defines
  usb: chipidea: udc: workaround for endpoint conflict issue
  MAINTAINERS: Change QCOM repo location
  mmc: mediatek: fix SDIO IRQ detection issue
  mmc: mediatek: fix SDIO IRQ interrupt handle flow
  mmc: core: complete HS400 before checking status
  riscv: mm: synchronize MMU after pte change
  MAINTAINERS: Update my email address to use @kernel.org
  ANDROID: update abi_gki_aarch64.xml for 5.2-rc5
  riscv: dts: add initial board data for the SiFive HiFive Unleashed
  riscv: dts: add initial support for the SiFive FU540-C000 SoC
  dt-bindings: riscv: convert cpu binding to json-schema
  dt-bindings: riscv: sifive: add YAML documentation for the SiFive FU540
  arch: riscv: add support for building DTB files from DT source data
  drm/i915/gvt: ignore unexpected pvinfo write
  lapb: fixed leak of control-blocks.
  tipc: purge deferredq list for each grp member in tipc_group_delete
  ax25: fix inconsistent lock state in ax25_destroy_timer
  neigh: fix use-after-free read in pneigh_get_next
  tcp: fix compile error if !CONFIG_SYSCTL
  hv_sock: Suppress bogus "may be used uninitialized" warnings
  be2net: Fix number of Rx queues used for flow hashing
  net: handle 802.1P vlan 0 packets properly
  Linux 5.2-rc5
  tcp: enforce tcp_min_snd_mss in tcp_mtu_probing()
  tcp: add tcp_min_snd_mss sysctl
  tcp: tcp_fragment() should apply sane memory limits
  tcp: limit payload size of sacked skbs
  Revert "net: phylink: set the autoneg state in phylink_phy_change"
  bpf: fix nested bpf tracepoints with per-cpu data
  bpf: Fix out of bounds memory access in bpf_sk_storage
  vsock/virtio: set SOCK_DONE on peer shutdown
  net: dsa: rtl8366: Fix up VLAN filtering
  net: phylink: set the autoneg state in phylink_phy_change
  powerpc/32: fix build failure on book3e with KVM
  powerpc/booke: fix fast syscall entry on SMP
  powerpc/32s: fix initial setup of segment registers on secondary CPU
  x86/microcode, cpuhotplug: Add a microcode loader CPU hotplug callback
  net: add high_order_alloc_disable sysctl/static key
  tcp: add tcp_tx_skb_cache sysctl
  tcp: add tcp_rx_skb_cache sysctl
  sysctl: define proc_do_static_key()
  hv_netvsc: Set probe mode to sync
  net: sched: flower: don't call synchronize_rcu() on mask creation
  net: dsa: fix warning same module names
  sctp: Free cookie before we memdup a new one
  net: dsa: microchip: Don't try to read stats for unused ports
  qmi_wwan: extend permitted QMAP mux_id value range
  qmi_wwan: avoid RCU stalls on device disconnect when in QMAP mode
  qmi_wwan: add network device usage statistics for qmimux devices
  qmi_wwan: add support for QMAP padding in the RX path
  bpf, x64: fix stack layout of JITed bpf code
  Smack: Restore the smackfsdef mount option and add missing prefixes
  bpf, devmap: Add missing RCU read lock on flush
  bpf, devmap: Add missing bulk queue free
  bpf, devmap: Fix premature entry free on destroying map
  ftrace: Fix NULL pointer dereference in free_ftrace_func_mapper()
  module: Fix livepatch/ftrace module text permissions race
  tracing/uprobe: Fix obsolete comment on trace_uprobe_create()
  tracing/uprobe: Fix NULL pointer dereference in trace_uprobe_create()
  tracing: Make two symbols static
  tracing: avoid build warning with HAVE_NOP_MCOUNT
  tracing: Fix out-of-range read in trace_stack_print()
  gfs2: Fix rounding error in gfs2_iomap_page_prepare
  net: phylink: further mac_config documentation improvements
  nfc: Ensure presence of required attributes in the deactivate_target handler
  btrfs: start readahead also in seed devices
  x86/kasan: Fix boot with 5-level paging and KASAN
  cfg80211: report measurement start TSF correctly
  cfg80211: fix memory leak of wiphy device name
  cfg80211: util: fix bit count off by one
  mac80211: do not start any work during reconfigure flow
  cfg80211: use BIT_ULL in cfg80211_parse_mbssid_data()
  mac80211: only warn once on chanctx_conf being NULL
  mac80211: drop robust management frames from unknown TA
  gpu: ipu-v3: image-convert: Fix image downsize coefficients
  gpu: ipu-v3: image-convert: Fix input bytesperline for packed formats
  gpu: ipu-v3: image-convert: Fix input bytesperline width/height align
  thunderbolt: Implement CIO reset correctly for Titan Ridge
  ARM: davinci: da8xx: specify dma_coherent_mask for lcdc
  ARM: davinci: da850-evm: call regulator_has_full_constraints()
  timekeeping: Repair ktime_get_coarse*() granularity
  Revert "ALSA: hda/realtek - Improve the headset mic for Acer Aspire laptops"
  ANDROID: update abi_gki_aarch64.xml
  mm/devm_memremap_pages: fix final page put race
  PCI/P2PDMA: track pgmap references per resource, not globally
  lib/genalloc: introduce chunk owners
  PCI/P2PDMA: fix the gen_pool_add_virt() failure path
  mm/devm_memremap_pages: introduce devm_memunmap_pages
  drivers/base/devres: introduce devm_release_action()
  mm/vmscan.c: fix trying to reclaim unevictable LRU page
  coredump: fix race condition between collapse_huge_page() and core dumping
  mm/mlock.c: change count_mm_mlocked_page_nr return type
  mm: mmu_gather: remove __tlb_reset_range() for force flush
  fs/ocfs2: fix race in ocfs2_dentry_attach_lock()
  mm/vmscan.c: fix recent_rotated history
  mm/mlock.c: mlockall error for flag MCL_ONFAULT
  scripts/decode_stacktrace.sh: prefix addr2line with $CROSS_COMPILE
  mm/list_lru.c: fix memory leak in __memcg_init_list_lru_node
  mm: memcontrol: don't batch updates of local VM stats and events
  PCI: PM: Skip devices in D0 for suspend-to-idle
  ANDROID: Removed extraneous configs from gki
  powerpc/bpf: use unsigned division instruction for 64-bit operations
  bpf: fix div64 overflow tests to properly detect errors
  bpf: sync BPF_FIB_LOOKUP flag changes with BPF uapi
  bpf: simplify definition of BPF_FIB_LOOKUP related flags
  cifs: add spinlock for the openFileList to cifsInodeInfo
  cifs: fix panic in smb2_reconnect
  x86/fpu: Don't use current->mm to check for a kthread
  KVM: nVMX: use correct clean fields when copying from eVMCS
  vfio-ccw: Destroy kmem cache region on module exit
  block/ps3vram: Use %llu to format sector_t after LBDAF removal
  libata: Extend quirks for the ST1000LM024 drives with NOLPM quirk
  bcache: only set BCACHE_DEV_WB_RUNNING when cached device attached
  bcache: fix stack corruption by PRECEDING_KEY()
  arm64/sve: Fix missing SVE/FPSIMD endianness conversions
  blk-mq: remove WARN_ON(!q->elevator) from blk_mq_sched_free_requests
  blkio-controller.txt: Remove references to CFQ
  block/switching-sched.txt: Update to blk-mq schedulers
  null_blk: remove duplicate check for report zone
  blk-mq: no need to check return value of debugfs_create functions
  io_uring: fix memory leak of UNIX domain socket inode
  block: force select mq-deadline for zoned block devices
  binder: fix possible UAF when freeing buffer
  drm/amdgpu: return 0 by default in amdgpu_pm_load_smu_firmware
  drm/amdgpu: Fix bounds checking in amdgpu_ras_is_supported()
  ANDROID: x86 gki_defconfig: enable DMA_CMA
  ANDROID: Fixed x86 regression
  ANDROID: gki_defconfig: enable DMA_CMA
  Input: synaptics - enable SMBus on ThinkPad E480 and E580
  net: mvpp2: prs: Use the correct helpers when removing all VID filters
  net: mvpp2: prs: Fix parser range for VID filtering
  mlxsw: spectrum: Disallow prio-tagged packets when PVID is removed
  mlxsw: spectrum_buffers: Reduce pool size on Spectrum-2
  selftests: tc_flower: Add TOS matching test
  mlxsw: spectrum_flower: Fix TOS matching
  selftests: mlxsw: Test nexthop offload indication
  mlxsw: spectrum_router: Refresh nexthop neighbour when it becomes dead
  mlxsw: spectrum: Use different seeds for ECMP and LAG hash
  net: tls, correctly account for copied bytes with multiple sk_msgs
  vrf: Increment Icmp6InMsgs on the original netdev
  cpuset: restore sanity to cpuset_cpus_allowed_fallback()
  net: ethtool: Allow matching on vlan DEI bit
  linux-next: DOC: RDS: Fix a typo in rds.txt
  x86/kgdb: Return 0 from kgdb_arch_set_breakpoint()
  mpls: fix af_mpls dependencies for real
  selinux: fix a missing-check bug in selinux_sb_eat_lsm_opts()
  selinux: fix a missing-check bug in selinux_add_mnt_opt( )
  arm64: tlbflush: Ensure start/end of address range are aligned to stride
  usb: typec: Make sure an alt mode exist before getting its partner
  KVM: arm/arm64: vgic: Fix kvm_device leak in vgic_its_destroy
  KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST
  KVM: arm64: Implement vq_present() as a macro
  xdp: check device pointer before clearing
  bpf: net: Set sk_bpf_storage back to NULL for cloned sk
  Btrfs: fix race between block group removal and block group allocation
  clocksource/drivers/arm_arch_timer: Don't trace count reader functions
  i2c: pca-platform: Fix GPIO lookup code
  thunderbolt: Make sure device runtime resume completes before taking domain lock
  drm: add fallback override/firmware EDID modes workaround
  i2c: acorn: fix i2c warning
  arm64: Don't unconditionally add -Wno-psabi to KBUILD_CFLAGS
  drm/edid: abstract override/firmware EDID retrieval
  platform/mellanox: mlxreg-hotplug: Add devm_free_irq call to remove flow
  platform/x86: mlx-platform: Fix parent device in i2c-mux-reg device registration
  platform/x86: intel-vbtn: Report switch events when event wakes device
  platform/x86: asus-wmi: Only Tell EC the OS will handle display hotkeys from asus_nb_wmi
  ARM: mvebu_v7_defconfig: fix Ethernet on Clearfog
  x86/resctrl: Prevent NULL pointer dereference when local MBM is disabled
  x86/resctrl: Don't stop walking closids when a locksetup group is found
  iommu/arm-smmu: Avoid constant zero in TLBI writes
  drm/i915/perf: fix whitelist on Gen10+
  drm/i915/sdvo: Implement proper HDMI audio support for SDVO
  drm/i915: Fix per-pixel alpha with CCS
  drm/i915/dmc: protect against reading random memory
  drm/i915/dsi: Use a fuzzy check for burst mode clock check
  Input: imx_keypad - make sure keyboard can always wake up system
  selinux: log raw contexts as untrusted strings
  ptrace: restore smp_rmb() in __ptrace_may_access()
  IB/hfi1: Correct tid qp rcd to match verbs context
  IB/hfi1: Close PSM sdma_progress sleep window
  IB/hfi1: Validate fault injection opcode user input
  geneve: Don't assume linear buffers in error handler
  vxlan: Don't assume linear buffers in error handler
  net: openvswitch: do not free vport if register_netdevice() is failed.
  net: correct udp zerocopy refcnt also when zerocopy only on append
  drm/amdgpu/{uvd,vcn}: fetch ring's read_ptr after alloc
  ovl: fix wrong flags check in FS_IOC_FS[SG]ETXATTR ioctls
  riscv: Fix udelay in RV32.
  drm/vmwgfx: fix a warning due to missing dma_parms
  riscv: export pm_power_off again
  drm/vmwgfx: Honor the sg list segment size limitation
  RISC-V: defconfig: enable clocks, serial console
  drm/vmwgfx: Use the backdoor port if the HB port is not available
  bpf: lpm_trie: check left child of last leftmost node for NULL
  Revert "fuse: require /dev/fuse reads to have enough buffer capacity"
  ALSA: ice1712: Check correct return value to snd_i2c_sendbytes (EWS/DMX 6Fire)
  ALSA: oxfw: allow PCM capture for Stanton SCS.1m
  ALSA: firewire-motu: fix destruction of data for isochronous resources
  s390/ctl_reg: mark __ctl_set_bit and __ctl_clear_bit as __always_inline
  s390/boot: disable address-of-packed-member warning
  ANDROID: update gki aarch64 ABI representation
  cgroup: Fix css_task_iter_advance_css_set() cset skip condition
  drm/panfrost: Require the simple_ondemand governor
  drm/panfrost: make devfreq optional again
  drm/gem_shmem: Use a writecombine mapping for ->vaddr
  mmc: sdhi: disallow HS400 for M3-W ES1.2, RZ/G2M, and V3H
  ASoC: Intel: sst: fix kmalloc call with wrong flags
  ASoC: core: Fix deadlock in snd_soc_instantiate_card()
  cgroup/bfq: revert bfq.weight symlink change
  ARM: dts: am335x phytec boards: Fix cd-gpios active level
  ARM: dts: dra72x: Disable usb4_tm target module
  nfp: ensure skb network header is set for packet redirect
  tcp: fix undo spurious SYNACK in passive Fast Open
  mpls: fix af_mpls dependencies
  ibmvnic: Fix unchecked return codes of memory allocations
  ibmvnic: Refresh device multicast list after reset
  ibmvnic: Do not close unopened driver during reset
  mpls: fix warning with multi-label encap
  net: phy: rename Asix Electronics PHY driver
  ipv6: flowlabel: fl6_sock_lookup() must use atomic_inc_not_zero
  net: ipv4: fib_semantics: fix uninitialized variable
  Input: iqs5xx - get axis info before calling input_mt_init_slots()
  Linux 5.2-rc4
  drm: panel-orientation-quirks: Add quirk for GPD MicroPC
  drm: panel-orientation-quirks: Add quirk for GPD pocket2
  counter/ftm-quaddec: Add missing dependencies in Kconfig
  staging: iio: adt7316: Fix build errors when GPIOLIB is not set
  x86/fpu: Update kernel's FPU state before using for the fsave header
  MAINTAINERS: Karthikeyan Ramasubramanian is MIA
  i2c: xiic: Add max_read_len quirk
  ANDROID: update ABI representation
  gpio: pca953x: hack to fix 24 bit gpio expanders
  net/mlx5e: Support tagged tunnel over bond
  net/mlx5e: Avoid detaching non-existing netdev under switchdev mode
  net/mlx5e: Fix source port matching in fdb peer flow rule
  net/mlx5e: Replace reciprocal_scale in TX select queue function
  net/mlx5e: Add ndo_set_feature for uplink representor
  net/mlx5: Avoid reloading already removed devices
  net/mlx5: Update pci error handler entries and command translation
  RAS/CEC: Convert the timer callback to a workqueue
  RAS/CEC: Fix binary search function
  x86/mm/KASLR: Compute the size of the vmemmap section properly
  can: purge socket error queue on sock destruct
  can: flexcan: Remove unneeded registration message
  can: af_can: Fix error path of can_init()
  can: m_can: implement errata "Needless activation of MRAF irq"
  can: mcp251x: add support for mcp25625
  dt-bindings: can: mcp251x: add mcp25625 support
  can: xilinx_can: use correct bittiming_const for CAN FD core
  can: flexcan: fix timeout when set small bitrate
  can: usb: Kconfig: Remove duplicate menu entry
  lockref: Limit number of cmpxchg loop retries
  uaccess: add noop untagged_addr definition
  x86/insn-eval: Fix use-after-free access to LDT entry
  kbuild: use more portable 'command -v' for cc-cross-prefix
  s390/unwind: correct stack switching during unwind
  scsi: hpsa: correct ioaccel2 chaining
  btrfs: Always trim all unallocated space in btrfs_trim_free_extents
  netfilter: ipv6: nf_defrag: accept duplicate fragments again
  powerpc/32s: fix booting with CONFIG_PPC_EARLY_DEBUG_BOOTX
  drm/meson: fix G12A primary plane disabling
  drm/meson: fix primary plane disabling
  drm/meson: fix G12A HDMI PLL settings for 4K60 1000/1001 variations
  block, bfq: add weight symlink to the bfq.weight cgroup parameter
  cgroup: let a symlink too be created with a cftype file
  powerpc/64s: __find_linux_pte() synchronization vs pmdp_invalidate()
  powerpc/64s: Fix THP PMD collapse serialisation
  powerpc: Fix kexec failure on book3s/32
  drm/nouveau/secboot/gp10[2467]: support newer FW to fix SEC2 failures on some boards
  drm/nouveau/secboot: enable loading of versioned LS PMU/SEC2 ACR msgqueue FW
  drm/nouveau/secboot: split out FW version-specific LS function pointers
  drm/nouveau/secboot: pass max supported FW version to LS load funcs
  drm/nouveau/core: support versioned firmware loading
  drm/nouveau/core: pass subdev into nvkm_firmware_get, rather than device
  block: free sched's request pool in blk_cleanup_queue
  bpf: expand section tests for test_section_names
  bpf: more msg_name rewrite tests to test_sock_addr
  bpf, bpftool: enable recvmsg attach types
  bpf, libbpf: enable recvmsg attach types
  bpf: sync tooling uapi header
  bpf: fix unconnected udp hooks
  vfio/mdev: Synchronize device create/remove with parent removal
  vfio/mdev: Avoid creating sysfs remove file on stale device removal
  pktgen: do not sleep with the thread lock held.
  net: mvpp2: Use strscpy to handle stat strings
  net: rds: fix memory leak in rds_ib_flush_mr_pool
  ipv6: fix EFAULT on sendto with icmpv6 and hdrincl
  ipv6: use READ_ONCE() for inet->hdrincl as in ipv4
  soundwire: intel: set dai min and max channels correctly
  soundwire: stream: fix bad unlock balance
  x86/fpu: Use fault_in_pages_writeable() for pre-faulting
  nvme-rdma: use dynamic dma mapping per command
  nvme: Fix u32 overflow in the number of namespace list calculation
  vfio/mdev: Improve the create/remove sequence
  SoC: rt274: Fix internal jack assignment in set_jack callback
  ALSA: hdac: fix memory release for SST and SOF drivers
  ASoC: SOF: Intel: hda: use the defined ppcap functions
  ASoC: core: move DAI pre-links initiation to snd_soc_instantiate_card
  ASoC: Intel: cht_bsw_rt5672: fix kernel oops with platform_name override
  ASoC: Intel: cht_bsw_nau8824: fix kernel oops with platform_name override
  ASoC: Intel: bytcht_es8316: fix kernel oops with platform_name override
  ASoC: Intel: cht_bsw_max98090: fix kernel oops with platform_name override
  Revert "gfs2: Replace gl_revokes with a GLF flag"
  arm64: Silence gcc warnings about arch ABI drift
  parisc: Fix crash due alternative coding for NP iopdir_fdc bit
  parisc: Use lpa instruction to load physical addresses in driver code
  parisc: configs: Remove useless UEVENT_HELPER_PATH
  parisc: Use implicit space register selection for loading the coherence index of I/O pdirs
  usb: gadget: udc: lpc32xx: fix return value check in lpc32xx_udc_probe()
  usb: gadget: dwc2: fix zlp handling
  usb: dwc2: Set actual frame number for completed ISOC transfer for none DDMA
  usb: gadget: udc: lpc32xx: allocate descriptor with GFP_ATOMIC
  usb: gadget: fusb300_udc: Fix memory leak of fusb300->ep[i]
  usb: phy: mxs: Disable external charger detect in mxs_phy_hw_init()
  usb: dwc2: Fix DMA cache alignment issues
  usb: dwc2: host: Fix wMaxPacketSize handling (fix webcam regression)
  ARM64: trivial: s/TIF_SECOMP/TIF_SECCOMP/ comment typo fix
  drm/komeda: Potential error pointer dereference
  drm/komeda: remove set but not used variable 'kcrtc'
  x86/CPU: Add more Icelake model numbers
  hwmon: (pmbus/core) Treat parameters as paged if on multiple pages
  hwmon: (pmbus/core) mutex_lock write in pmbus_set_samples
  hwmon: (core) add thermal sensors only if dev->of_node is present
  Revert "fib_rules: return 0 directly if an exactly same rule exists when NLM_F_EXCL not supplied"
  net: aquantia: fix wol configuration not applied sometimes
  ethtool: fix potential userspace buffer overflow
  Fix memory leak in sctp_process_init
  net: rds: fix memory leak when unload rds_rdma
  ipv6: fix the check before getting the cookie in rt6_get_cookie
  ipv4: not do cache for local delivery if bc_forwarding is enabled
  selftests: vm: Fix test build failure when built by itself
  tools: bpftool: Fix JSON output when lookup fails
  mmc: also set max_segment_size in the device
  mtip32xx: also set max_segment_size in the device
  rsxx: don't call dma_set_max_seg_size
  nvme-pci: don't limit DMA segement size
  s390/qeth: handle error when updating TX queue count
  s390/qeth: fix VLAN attribute in bridge_hostnotify udev event
  s390/qeth: check dst entry before use
  s390/qeth: handle limited IPv4 broadcast in L3 TX path
  ceph: fix error handling in ceph_get_caps()
  ceph: avoid iput_final() while holding mutex or in dispatch thread
  ceph: single workqueue for inode related works
  cgroup: css_task_iter_skip()'d iterators must be advanced before accessed
  drm/amd/amdgpu: add RLC firmware to support raven1 refresh
  drm/amd/powerplay: add set_power_profile_mode for raven1_refresh
  drm/amdgpu: fix ring test failure issue during s3 in vce 3.0 (V2)
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 450
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 449
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 448
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 446
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 445
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 444
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 443
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 442
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 441
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 440
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 438
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 437
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 436
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 435
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 434
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 433
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 432
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 431
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 430
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 429
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 428
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 426
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 424
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 423
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 422
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 421
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 420
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 419
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 418
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 417
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 416
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 414
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 412
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 411
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 410
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 409
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 408
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 407
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 406
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 405
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 404
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 403
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 402
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 401
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 400
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 399
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 398
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 397
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 396
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 395
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 394
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 393
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 392
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 391
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 390
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 389
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 388
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 387
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 380
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 378
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 377
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 376
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 375
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 373
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 372
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 371
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 370
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 367
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 365
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 364
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 363
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 362
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 354
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 353
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 352
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 351
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 350
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 349
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 348
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 347
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 346
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 345
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 344
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 343
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 342
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 341
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 340
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 339
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 338
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 336
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 335
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 334
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 333
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 332
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 330
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 328
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 326
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 325
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 324
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 323
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 322
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 321
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 320
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 316
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 315
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 314
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 313
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 312
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 311
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 310
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 309
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 308
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 307
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 305
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 301
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 300
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 299
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 297
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 296
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 295
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 294
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 292
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 291
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 290
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 289
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 288
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 287
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 286
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 285
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 284
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 283
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 282
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 281
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 280
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 278
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 277
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 276
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 275
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 274
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 273
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 272
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 271
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 270
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 269
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 268
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 267
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 266
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 265
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 264
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 263
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 262
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 260
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 258
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 257
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 256
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 254
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 253
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 252
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 251
  lib/test_stackinit: Handle Clang auto-initialization pattern
  block: Drop unlikely before IS_ERR(_OR_NULL)
  xen/swiotlb: don't initialize swiotlb twice on arm64
  s390/mm: fix address space detection in exception handling
  HID: logitech-dj: Fix 064d:c52f receiver support
  Revert "HID: core: Call request_module before doing device_add"
  Revert "HID: core: Do not call request_module() in async context"
  Revert "HID: Increase maximum report size allowed by hid_field_extract()"
  tests: fix pidfd-test compilation
  signal: improve comments
  samples: fix pidfd-metadata compilation
  arm64: arch_timer: mark functions as __always_inline
  arm64: smp: Moved cpu_logical_map[] to smp.h
  arm64: cpufeature: Fix missing ZFR0 in __read_sysreg_by_encoding()
  selftests/bpf: move test_lirc_mode2_user to TEST_GEN_PROGS_EXTENDED
  USB: Fix chipmunk-like voice when using Logitech C270 for recording audio.
  USB: usb-storage: Add new ID to ums-realtek
  udmabuf: actually unmap the scatterlist
  net: fix indirect calls helpers for ptype list hooks.
  net: ipvlan: Fix ipvlan device tso disabled while NETIF_F_IP_CSUM is set
  scsi: smartpqi: unlock on error in pqi_submit_raid_request_synchronous()
  scsi: ufs: Check that space was properly alloced in copy_query_response
  udp: only choose unbound UDP socket for multicast when not in a VRF
  net/tls: replace the sleeping lock around RX resync with a bit lock
  Revert "net/tls: avoid NULL-deref on resync during device removal"
  block: aoe: no need to check return value of debugfs_create functions
  net: dsa: sja1105: Fix link speed not working at 100 Mbps and below
  net: phylink: avoid reducing support mask
  scripts/checkstack.pl: Fix arm64 wrong or unknown architecture
  kbuild: tar-pkg: enable communication with jobserver
  kconfig: tests: fix recursive inclusion unit test
  kbuild: teach kselftest-merge to find nested config files
  nvmet: fix data_len to 0 for bdev-backed write_zeroes
  MAINTAINERS: Hand over skd maintainership
  ASoC: sun4i-i2s: Add offset to RX channel select
  ASoC: sun4i-i2s: Fix sun8i tx channel offset mask
  ASoC: max98090: remove 24-bit format support if RJ is 0
  ASoC: da7219: Fix build error without CONFIG_I2C
  ASoC: SOF: Intel: hda: Fix COMPILE_TEST build error
  drm/arm/hdlcd: Allow a bit of clock tolerance
  drm/arm/hdlcd: Actually validate CRTC modes
  drm/arm/mali-dp: Add a loop around the second set CVAL and try 5 times
  drm/komeda: fixing of DMA mapping sg segment warning
  netfilter: ipv6: nf_defrag: fix leakage of unqueued fragments
  habanalabs: Read upper bits of trace buffer from RWPHI
  arm64: arch_k3: Fix kconfig dependency warning
  drm: don't block fb changes for async plane updates
  drm/vc4: fix fb references in async update
  drm/msm: fix fb references in async update
  drm/amd: fix fb references in async update
  drm/rockchip: fix fb references in async update
  xen-blkfront: switch kcalloc to kvcalloc for large array allocation
  drm/mediatek: call mtk_dsi_stop() after mtk_drm_crtc_atomic_disable()
  drm/mediatek: clear num_pipes when unbind driver
  drm/mediatek: call drm_atomic_helper_shutdown() when unbinding driver
  drm/mediatek: unbind components in mtk_drm_unbind()
  drm/mediatek: fix unbind functions
  net: sfp: read eeprom in maximum 16 byte increments
  selftests: set sysctl bc_forwarding properly in router_broadcast.sh
  ANDROID: update gki aarch64 ABI representation
  net: ethernet: mediatek: Use NET_IP_ALIGN to judge if HW RX_2BYTE_OFFSET is enabled
  net: ethernet: mediatek: Use hw_feature to judge if HWLRO is supported
  net: ethernet: ti: cpsw_ethtool: fix ethtool ring param set
  ANDROID: gki_defconfig: Enable CMA, SLAB_FREELIST (RANDOM and HARDENED) on x86
  bpf: udp: Avoid calling reuseport's bpf_prog from udp_gro
  bpf: udp: ipv6: Avoid running reuseport's bpf_prog from __udp6_lib_err
  rcu: locking and unlocking need to always be at least barriers
  ANDROID: gki_defconfig: enable SLAB_FREELIST_RANDOM, SLAB_FREELIST_HARDENED
  ANDROID: gki_defconfig: enable CMA and increase CMA_AREAS
  ASoC: SOF: fix DSP oops definitions in FW ABI
  ASoC: hda: fix unbalanced codec dev refcount for HDA_DEV_ASOC
  ASoC: SOF: ipc: replace fw ready bitfield with explicit bit ordering
  ASoC: SOF: bump to ABI 3.6
  ASoC: SOF: soundwire: add initial soundwire support
  ASoC: SOF: uapi: mirror firmware changes
  ASoC: Intel: Baytrail: add quirk for Aegex 10 (RU2) tablet
  xfs: inode btree scrubber should calculate im_boffset correctly
  mmc: sdhci_am654: Fix SLOTTYPE write
  usb: typec: ucsi: ccg: fix memory leak in do_flash
  ANDROID: update gki aarch64 ABI representation
  habanalabs: Fix virtual address access via debugfs for 2MB pages
  drm/komeda: Constify the usage of komeda_component/pipeline/dev_funcs
  x86/power: Fix 'nosmt' vs hibernation triple fault during resume
  mm/vmalloc: Avoid rare case of flushing TLB with weird arguments
  mm/vmalloc: Fix calculation of direct map addr range
  PM: sleep: Add kerneldoc comments to some functions
  drm/i915/gvt: save RING_HEAD into vreg when vgpu switched out
  sparc: perf: fix updated event period in response to PERF_EVENT_IOC_PERIOD
  mdesc: fix a missing-check bug in get_vdev_port_node_info()
  drm/i915/gvt: add F_CMD_ACCESS flag for wa regs
  sparc64: Fix regression in non-hypervisor TLB flush xcall
  packet: unconditionally free po->rollover
  Update my email address
  net: hns: Fix loopback test failed at copper ports
  Linux 5.2-rc3
  net: dsa: mv88e6xxx: avoid error message on remove from VLAN 0
  mm, compaction: make sure we isolate a valid PFN
  include/linux/generic-radix-tree.h: fix kerneldoc comment
  kernel/signal.c: trace_signal_deliver when signal_group_exit
  drivers/iommu/intel-iommu.c: fix variable 'iommu' set but not used
  spdxcheck.py: fix directory structures
  kasan: initialize tag to 0xff in __kasan_kmalloc
  z3fold: fix sheduling while atomic
  scripts/gdb: fix invocation when CONFIG_COMMON_CLK is not set
  mm/gup: continue VM_FAULT_RETRY processing even for pre-faults
  ocfs2: fix error path kobject memory leak
  memcg: make it work on sparse non-0-node systems
  mm, memcg: consider subtrees in memory.events
  prctl_set_mm: downgrade mmap_sem to read lock
  prctl_set_mm: refactor checks from validate_prctl_map
  kernel/fork.c: make max_threads symbol static
  arch/arm/boot/compressed/decompress.c: fix build error due to lz4 changes
  arch/parisc/configs/c8000_defconfig: remove obsoleted CONFIG_DEBUG_SLAB_LEAK
  mm/vmalloc.c: fix typo in comment
  lib/sort.c: fix kernel-doc notation warnings
  mm: fix Documentation/vm/hmm.rst Sphinx warnings
  treewide: fix typos of SPDX-License-Identifier
  crypto: ux500 - fix license comment syntax error
  MAINTAINERS: add I2C DT bindings to ARM platforms
  MAINTAINERS: add DT bindings to i2c drivers
  mwifiex: Fix heap overflow in mwifiex_uap_parse_tail_ies()
  iwlwifi: mvm: change TLC config cmd sent by rs to be async
  iwlwifi: Fix double-free problems in iwl_req_fw_callback()
  iwlwifi: fix AX201 killer sku loading firmware issue
  iwlwifi: print fseq info upon fw assert
  iwlwifi: clear persistence bit according to device family
  iwlwifi: fix load in rfkill flow for unified firmware
  iwlwifi: mvm: remove d3_sram debugfs file
  bpf, riscv: clear high 32 bits for ALU32 add/sub/neg/lsh/rsh/arsh
  libbpf: Return btf_fd for load_sk_storage_btf
  HID: a4tech: fix horizontal scrolling
  HID: hyperv: Add a module description line
  net: dsa: sja1105: Don't store frame type in skb->cb
  block: print offending values when cloned rq limits are exceeded
  blk-mq: Document the blk_mq_hw_queue_to_node() arguments
  blk-mq: Fix spelling in a source code comment
  block: Fix bsg_setup_queue() kernel-doc header
  block: Fix rq_qos_wait() kernel-doc header
  block: Fix blk_mq_*_map_queues() kernel-doc headers
  block: Fix throtl_pending_timer_fn() kernel-doc header
  block: Convert blk_invalidate_devt() header into a non-kernel-doc header
  block/partitions/ldm: Convert a kernel-doc header into a non-kernel-doc header
  leds: avoid flush_work in atomic context
  cgroup: Include dying leaders with live threads in PROCS iterations
  cgroup: Implement css_task_iter_skip()
  cgroup: Call cgroup_release() before __exit_signal()
  netfilter: nf_tables: fix module autoload with inet family
  Revert "lockd: Show pid of lockd for remote locks"
  ALSA: hda/realtek - Update headset mode for ALC256
  fs/adfs: fix filename fixup handling for "/" and "//" names
  fs/adfs: move append_filetype_suffix() into adfs_object_fixup()
  fs/adfs: remove truncated filename hashing
  fs/adfs: factor out filename fixup
  fs/adfs: factor out object fixups
  fs/adfs: factor out filename case lowering
  fs/adfs: factor out filename comparison
  ovl: doc: add non-standard corner cases
  pstore/ram: Run without kernel crash dump region
  MAINTAINERS: add Vasily Gorbik and Christian Borntraeger for s390
  MAINTAINERS: Farewell Martin Schwidefsky
  pstore: Set tfm to NULL on free_buf_for_compression
  nds32: add new emulations for floating point instruction
  nds32: Avoid IEX status being incorrectly modified
  math-emu: Use statement expressions to fix Wshift-count-overflow warning
  net: correct zerocopy refcnt with udp MSG_MORE
  ethtool: Check for vlan etype or vlan tci when parsing flow_rule
  net: don't clear sock->sk early to avoid trouble in strparser
  net-gro: fix use-after-free read in napi_gro_frags()
  net: dsa: tag_8021q: Create a stable binary format
  net: dsa: tag_8021q: Change order of rx_vid setup
  net: mvpp2: fix bad MVPP2_TXQ_SCHED_TOKEN_CNTR_REG queue value
  docs cgroups: add another example size for hugetlb
  NFSv4.1: Fix bug only first CB_NOTIFY_LOCK is handled
  NFSv4.1: Again fix a race where CB_NOTIFY_LOCK fails to wake a waiter
  ipv4: tcp_input: fix stack out of bounds when parsing TCP options.
  mlxsw: spectrum: Prevent force of 56G
  mlxsw: spectrum_acl: Avoid warning after identical rules insertion
  SUNRPC: Fix a use after free when a server rejects the RPCSEC_GSS credential
  net: dsa: mv88e6xxx: fix handling of upper half of STATS_TYPE_PORT
  SUNRPC fix regression in umount of a secure mount
  r8169: fix MAC address being lost in PCI D3
  treewide: Add SPDX license identifier - Kbuild
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 225
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 224
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 223
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 222
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 221
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 220
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 218
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 217
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 216
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 215
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 214
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 213
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 211
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 210
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 209
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 207
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 206
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 203
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 201
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 200
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 199
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 198
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 197
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 195
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 194
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 193
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 191
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 190
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 188
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 185
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 183
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 182
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 180
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 179
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 178
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 177
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 176
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 175
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 174
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 173
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 172
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 171
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 170
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 167
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 166
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 165
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 164
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 162
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 161
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 160
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 159
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 158
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 157
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 156
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 155
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 154
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 153
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 152
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 151
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 150
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 149
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 148
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 147
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 145
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 144
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 143
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 142
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 140
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 139
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 138
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 137
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 136
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 135
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 133
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 132
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 131
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 130
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 129
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 128
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 127
  treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 126
  net: core: support XDP generic on stacked devices.
  netvsc: unshare skb in VF rx handler
  udp: Avoid post-GRO UDP checksum recalculation
  nvme-tcp: fix queue mapping when queue count is limited
  nvme-rdma: fix queue mapping when queue count is limited
  fpga: zynqmp-fpga: Correctly handle error pointer
  selftests: vm: install test_vmalloc.sh for run_vmtests
  userfaultfd: selftest: fix compiler warning
  kselftest/cgroup: fix incorrect test_core skip
  kselftest/cgroup: fix unexpected testing failure on test_core
  kselftest/cgroup: fix unexpected testing failure on test_memcontrol
  xtensa: Fix section mismatch between memblock_reserve and mem_reserve
  signal/ptrace: Don't leak unitialized kernel memory with PTRACE_PEEK_SIGINFO
  mwifiex: Abort at too short BSS descriptor element
  mwifiex: Fix possible buffer overflows at parsing bss descriptor
  drm/i915/gvt: Assign NULL to the pointer after memory free.
  drm/i915/gvt: Check if cur_pt_type is valid
  x86: intel_epb: Do not build when CONFIG_PM is unset
  crypto: hmac - fix memory leak in hmac_init_tfm()
  crypto: jitterentropy - change back to module_init()
  ARM: dts: Drop bogus CLKSEL for timer12 on dra7
  KVM: PPC: Book3S HV: Restore SPRG3 in kvmhv_p9_guest_entry()
  KVM: PPC: Book3S HV: Fix lockdep warning when entering guest on POWER9
  KVM: PPC: Book3S HV: XIVE: Fix page offset when clearing ESB pages
  KVM: PPC: Book3S HV: XIVE: Take the srcu read lock when accessing memslots
  KVM: PPC: Book3S HV: XIVE: Do not clear IRQ data of passthrough interrupts
  KVM: PPC: Book3S HV: XIVE: Introduce a new mutex for the XIVE device
  drm/i915/gvt: Fix cmd length of VEB_DI_IECP
  drm/i915/gvt: refine ggtt range validation
  drm/i915/gvt: Fix vGPU CSFE_CHICKEN1_REG mmio handler
  drm/i915/gvt: Fix GFX_MODE handling
  drm/i915/gvt: Update force-to-nonpriv register whitelist
  drm/i915/gvt: Initialize intel_gvt_gtt_entry in stack
  ima: show rules with IMA_INMASK correctly
  evm: check hash algorithm passed to init_desc()
  scsi: libsas: delete sas port if expander discover failed
  scsi: libsas: only clear phy->in_shutdown after shutdown event done
  scsi: scsi_dh_alua: Fix possible null-ptr-deref
  scsi: smartpqi: properly set both the DMA mask and the coherent DMA mask
  scsi: zfcp: fix to prevent port_remove with pure auto scan LUNs (only sdevs)
  scsi: zfcp: fix missing zfcp_port reference put on -EBUSY from port_remove
  scsi: libcxgbi: add a check for NULL pointer in cxgbi_check_route()
  net: phy: dp83867: Set up RGMII TX delay
  net: phy: dp83867: do not call config_init twice
  net: phy: dp83867: increase SGMII autoneg timer duration
  net: phy: dp83867: fix speed 10 in sgmii mode
  net: phy: marvell10g: report if the PHY fails to boot firmware
  net: phylink: ensure consistent phy interface mode
  cgroup: Use css_tryget() instead of css_tryget_online() in task_get_css()
  blk-mq: Fix memory leak in error handling
  usbip: usbip_host: fix stub_dev lock context imbalance regression
  net: sh_eth: fix mdio access in sh_eth_close() for R-Car Gen2 and RZ/A1 SoCs
  MIPS: uprobes: remove set but not used variable 'epc'
  s390/crypto: fix possible sleep during spinlock aquired
  MIPS: pistachio: Build uImage.gz by default
  MIPS: Make virt_addr_valid() return bool
  MIPS: Bounds check virt_addr_valid
  CIFS: cifs_read_allocate_pages: don't iterate through whole page array on ENOMEM
  RDMA/efa: Remove MAYEXEC flag check from mmap flow
  mlx5: avoid 64-bit division
  IB/hfi1: Validate page aligned for a given virtual address
  IB/{qib, hfi1, rdmavt}: Correct ibv_devinfo max_mr value
  IB/hfi1: Insure freeze_work work_struct is canceled on shutdown
  IB/rdmavt: Fix alloc_qpn() WARN_ON()
  ASoC: sun4i-codec: fix first delay on Speaker
  drm/amdgpu: reserve stollen vram for raven series
  media: venus: hfi_parser: fix a regression in parser
  selftests: bpf: fix compiler warning in flow_dissector test
  arm64: use the correct function type for __arm64_sys_ni_syscall
  arm64: use the correct function type in SYSCALL_DEFINE0
  arm64: fix syscall_fn_t type
  block: don't protect generic_make_request_checks with blk_queue_enter
  block: move blk_exit_queue into __blk_release_queue
  selftests: bpf: complete sub-register zero extension checks
  selftests: bpf: move sub-register zero extension checks into subreg.c
  ovl: detect overlapping layers
  drm/i915/icl: Add WaDisableBankHangMode
  ALSA: fireface: Use ULL suffixes for 64-bit constants
  signal/arm64: Use force_sig not force_sig_fault for SIGKILL
  nl80211: fill all policy .type entries
  mac80211: free peer keys before vif down in mesh
  ANDROID: ABI out: Use the extension .xml rather then .out
  drm/mediatek: respect page offset for PRIME mmap calls
  drm/mediatek: adjust ddp clock control flow
  ALSA: hda/realtek - Improve the headset mic for Acer Aspire laptops
  KVM: PPC: Book3S HV: XIVE: Fix the enforced limit on the vCPU identifier
  KVM: PPC: Book3S HV: XIVE: Do not test the EQ flag validity when resetting
  KVM: PPC: Book3S HV: XIVE: Clear file mapping when device is released
  KVM: PPC: Book3S HV: Don't take kvm->lock around kvm_for_each_vcpu
  KVM: PPC: Book3S: Use new mutex to synchronize access to rtas token list
  KVM: PPC: Book3S HV: Use new mutex to synchronize MMU setup
  KVM: PPC: Book3S HV: Avoid touching arch.mmu_ready in XIVE release functions
  Revert "drivers: thermal: tsens: Add new operation to check if a sensor is enabled"
  net/mlx5e: Disable rxhash when CQE compress is enabled
  net/mlx5e: restrict the real_dev of vlan device is the same as uplink device
  net/mlx5: Allocate root ns memory using kzalloc to match kfree
  net/mlx5: Avoid double free in fs init error unwinding path
  net/mlx5: Avoid double free of root ns in the error flow path
  net/mlx5: Fix error handling in mlx5_load()
  Documentation: net-sysfs: Remove duplicate PHY device documentation
  llc: fix skb leak in llc_build_and_send_ui_pkt()
  selftests: pmtu: Fix encapsulating device in pmtu_vti6_link_change_mtu
  dfs_cache: fix a wrong use of kfree in flush_cache_ent()
  fs/cifs/smb2pdu.c: fix buffer free in SMB2_ioctl_free
  cifs: fix memory leak of pneg_inbuf on -EOPNOTSUPP ioctl case
  xenbus: Avoid deadlock during suspend due to open transactions
  xen/pvcalls: Remove set but not used variable
  tracing: Avoid memory leak in predicate_parse()
  habanalabs: fix bug in checking huge page optimization
  mmc: sdhci: Fix SDIO IRQ thread deadlock
  dpaa_eth: use only online CPU portals
  net: mvneta: Fix err code path of probe
  net: stmmac: Do not output error on deferred probe
  Btrfs: fix race updating log root item during fsync
  Btrfs: fix wrong ctime and mtime of a directory after log replay
  ARC: [plat-hsdk] Get rid of inappropriate PHY settings
  ARC: [plat-hsdk]: Add support of Vivante GPU
  ARC: [plat-hsdk]: enable creg-gpio controller
  Btrfs: fix fsync not persisting changed attributes of a directory
  btrfs: qgroup: Check bg while resuming relocation to avoid NULL pointer dereference
  btrfs: reloc: Also queue orphan reloc tree for cleanup to avoid BUG_ON()
  Btrfs: incremental send, fix emission of invalid clone operations
  Btrfs: incremental send, fix file corruption when no-holes feature is enabled
  btrfs: correct zstd workspace manager lock to use spin_lock_bh()
  btrfs: Ensure replaced device doesn't have pending chunk allocation
  ia64: fix build errors by exporting paddr_to_nid()
  ASoC: SOF: Intel: hda: fix the hda init chip
  ASoC: SOF: ipc: fix a race, leading to IPC timeouts
  ASoC: SOF: control: correct the copy size for bytes kcontrol put
  ASoC: SOF: pcm: remove warning - initialize workqueue on open
  ASoC: SOF: pcm: clear hw_params_upon_resume flag correctly
  ASoC: SOF: core: fix error handling with the probe workqueue
  ASoC: SOF: core: remove snd_soc_unregister_component in case of error
  ASoC: SOF: core: remove DSP after unregistering machine driver
  ASoC: soc-core: fixup references at soc_cleanup_card_resources()
  arm64/module: revert to unsigned interpretation of ABS16/32 relocations
  KVM: s390: Do not report unusabled IDs via KVM_CAP_MAX_VCPU_ID
  kvm: fix compile on s390 part 2
  xprtrdma: Use struct_size() in kzalloc()
  tools headers UAPI: Sync kvm.h headers with the kernel sources
  perf record: Fix s390 missing module symbol and warning for non-root users
  perf machine: Read also the end of the kernel
  perf test vmlinux-kallsyms: Ignore aliases to _etext when searching on kallsyms
  perf session: Add missing swap ops for namespace events
  perf namespace: Protect reading thread's namespace
  tools headers UAPI: Sync drm/drm.h with the kernel
  s390/crypto: fix gcm-aes-s390 selftest failures
  s390/zcrypt: Fix wrong dispatching for control domain CPRBs
  s390/pci: fix assignment of bus resources
  s390/pci: fix struct definition for set PCI function
  s390: mark __cpacf_check_opcode() and cpacf_query_func() as __always_inline
  s390: add unreachable() to dump_fault_info() to fix -Wmaybe-uninitialized
  tools headers UAPI: Sync drm/i915_drm.h with the kernel
  tools headers UAPI: Sync linux/fs.h with the kernel
  tools headers UAPI: Sync linux/sched.h with the kernel
  tools arch x86: Sync asm/cpufeatures.h with the with the kernel
  tools include UAPI: Update copy of files related to new fspick, fsmount, fsconfig, fsopen, move_mount and open_tree syscalls
  perf arm64: Fix mksyscalltbl when system kernel headers are ahead of the kernel
  perf data: Fix 'strncat may truncate' build failure with recent gcc
  arm64: Fix the arm64_personality() syscall wrapper redirection
  rtw88: Make some symbols static
  rtw88: avoid circular locking between local->iflist_mtx and rtwdev->mutex
  rsi: Properly initialize data in rsi_sdio_ta_reset
  rtw88: fix unassigned rssi_level in rtw_sta_info
  rtw88: fix subscript above array bounds compiler warning
  fuse: extract helper for range writeback
  fuse: fix copy_file_range() in the writeback case
  mmc: meson-gx: fix irq ack
  mmc: tmio: fix SCC error handling to avoid false positive CRC error
  mmc: tegra: Fix a warning message
  memstick: mspro_block: Fix an error code in mspro_block_issue_req()
  mac80211: mesh: fix RCU warning
  nl80211: fix station_info pertid memory leak
  mac80211: Do not use stack memory with scatterlist for GMAC
  ALSA: line6: Assure canceling delayed work at disconnection
  configfs: Fix use-after-free when accessing sd->s_dentry
  ALSA: hda - Force polling mode on CNL for fixing codec communication
  i2c: synquacer: fix synquacer_i2c_doxfer() return value
  i2c: mlxcpld: Fix wrong initialization order in probe
  i2c: dev: fix potential memory leak in i2cdev_ioctl_rdwr
  RDMA/core: Fix panic when port_data isn't initialized
  RDMA/uverbs: Pass udata on uverbs error unwind
  RDMA/core: Clear out the udata before error unwind
  net: aquantia: tcp checksum 0xffff being handled incorrectly
  net: aquantia: fix LRO with FCS error
  net: aquantia: check rx csum for all packets in LRO session
  net: aquantia: tx clean budget logic error
  vhost: scsi: add weight support
  vhost: vsock: add weight support
  vhost_net: fix possible infinite loop
  vhost: introduce vhost_exceeds_weight()
  virtio: Fix indentation of VIRTIO_MMIO
  virtio: add unlikely() to WARN_ON_ONCE()
  iommu/vt-d: Set the right field for Page Walk Snoop
  iommu/vt-d: Fix lock inversion between iommu->lock and device_domain_lock
  iommu: Add missing new line for dma type
  drm/etnaviv: lock MMU while dumping core
  block: Don't revalidate bdev of hidden gendisk
  loop: Don't change loop device under exclusive opener
  drm/imx: ipuv3-plane: fix atomic update status query for non-plus i.MX6Q
  drm/qxl: drop WARN_ONCE()
  iio: temperature: mlx90632 Relax the compatibility check
  iio: imu: st_lsm6dsx: fix PM support for st_lsm6dsx i2c controller
  staging:iio:ad7150: fix threshold mode config bit
  fuse: add FUSE_WRITE_KILL_PRIV
  fuse: fallocate: fix return with locked inode
  PCI: PM: Avoid possible suspend-to-idle issue
  ACPI: PM: Call pm_set_suspend_via_firmware() during hibernation
  ACPI/PCI: PM: Add missing wakeup.flags.valid checks
  ovl: support the FS_IOC_FS[SG]ETXATTR ioctls
  soundwire: stream: fix out of boundary access on port properties
  net: tulip: de4x5: Drop redundant MODULE_DEVICE_TABLE()
  selftests/tls: add test for sleeping even though there is data
  net/tls: fix no wakeup on partial reads
  selftests/tls: test for lowat overshoot with multiple records
  net/tls: fix lowat calculation if some data came from previous record
  dpaa2-eth: Make constant 64-bit long
  dpaa2-eth: Use PTR_ERR_OR_ZERO where appropriate
  dpaa2-eth: Fix potential spectre issue
  bonding/802.3ad: fix slave link initialization transition states
  io_uring: Fix __io_uring_register() false success
  net: ethtool: Document get_rxfh_context and set_rxfh_context ethtool ops
  net: stmmac: dwmac-mediatek: modify csr_clk value to fix mdio read/write fail
  net: stmmac: fix csr_clk can't be zero issue
  net: stmmac: update rx tail pointer register to fix rx dma hang issue.
  ip_sockglue: Fix missing-check bug in ip_ra_control()
  ipv6_sockglue: Fix a missing-check bug in ip6_ra_control()
  efi: Allow the number of EFI configuration tables entries to be zero
  efi/x86/Add missing error handling to old_memmap 1:1 mapping code
  parisc: Fix compiler warnings in float emulation code
  parisc/slab: cleanup after /proc/slab_allocators removal
  bpf: sockmap, fix use after free from sleep in psock backlog workqueue
  net: sched: don't use tc_action->order during action dump
  cxgb4: Revert "cxgb4: Remove SGE_HOST_PAGE_SIZE dependency on page size"
  net: fec: fix the clk mismatch in failed_reset path
  habanalabs: Avoid using a non-initialized MMU cache mutex
  habanalabs: fix debugfs code
  uapi/habanalabs: add opcode for enable/disable device debug mode
  habanalabs: halt debug engines on user process close
  selftests: rtc: rtctest: specify timeouts
  selftests/harness: Allow test to configure timeout
  selftests/ftrace: Add checkbashisms meta-testcase
  selftests/ftrace: Make a script checkbashisms clean
  media: smsusb: better handle optional alignment
  test_firmware: Use correct snprintf() limit
  genwqe: Prevent an integer overflow in the ioctl
  parport: Fix mem leak in parport_register_dev_model
  fpga: dfl: expand minor range when registering chrdev region
  fpga: dfl: Add lockdep classes for pdata->lock
  fpga: dfl: afu: Pass the correct device to dma_mapping_error()
  fpga: stratix10-soc: fix use-after-free on s10_init()
  w1: ds2408: Fix typo after 49695ac468 (reset on output_write retry with readback)
  kheaders: Do not regenerate archive if config is not changed
  kheaders: Move from proc to sysfs
  drm/amd/display: Don't load DMCU for Raven 1 (v2)
  drm/i915: Maintain consistent documentation subsection ordering
  scripts/sphinx-pre-install: make it handle Sphinx versions
  docs: Fix conf.py for Sphinx 2.0
  vt/fbcon: deinitialize resources in visual_init() after failed memory allocation
  xfs: fix broken log reservation debugging
  clocksource/drivers/timer-ti-dm: Change to new style declaration
  ASoC: core: lock client_mutex while removing link components
  ASoC: simple-card: Restore original configuration of DAI format
  {nl,mac}80211: allow 4addr AP operation on crypto controlled devices
  mac80211_hwsim: mark expected switch fall-through
  mac80211: fix rate reporting inside cfg80211_calculate_bitrate_he()
  mac80211: remove set but not used variable 'old'
  mac80211: handle deauthentication/disassociation from TDLS peer
  gpio: fix gpio-adp5588 build errors
  pinctrl: stmfx: Fix compile issue when CONFIG_OF_GPIO is not defined
  staging: kpc2000: Add dependency on MFD_CORE to kconfig symbol 'KPC2000'
  perf/ring-buffer: Use regular variables for nesting
  perf/ring-buffer: Always use {READ,WRITE}_ONCE() for rb->user_page data
  perf/ring_buffer: Add ordering to rb->nest increment
  perf/ring_buffer: Fix exposing a temporarily decreased data_head
  x86/CPU/AMD: Don't force the CPB cap when running under a hypervisor
  x86/boot: Provide KASAN compatible aliases for string routines
  ALSA: hda/realtek - Enable micmute LED for Huawei laptops
  Input: uinput - add compat ioctl number translation for UI_*_FF_UPLOAD
  Input: silead - add MSSL0017 to acpi_device_id
  cxgb4: offload VLAN flows regardless of VLAN ethtype
  hsr: fix don't prune the master node from the node_db
  net: mvpp2: cls: Fix leaked ethtool_rx_flow_rule
  docs: fix multiple doc build warnings in enumeration.rst
  lib/list_sort: fix kerneldoc build error
  docs: fix numaperf.rst and add it to the doc tree
  doc: Cope with the deprecation of AutoReporter
  doc: Cope with Sphinx logging deprecations
  bpf: sockmap, restore sk_write_space when psock gets dropped
  selftests: bpf: add zero extend checks for ALU32 and/or/xor
  bpf, riscv: clear target register high 32-bits for and/or/xor on ALU32
  spi: abort spi_sync if failed to prepare_transfer_hardware
  ALSA: hda/realtek - Set default power save node to 0
  ipv4/igmp: fix build error if !CONFIG_IP_MULTICAST
  powerpc/kexec: Fix loading of kernel + initramfs with kexec_file_load()
  MIPS: TXx9: Fix boot crash in free_initmem()
  MIPS: remove a space after -I to cope with header search paths for VDSO
  MIPS: mark ginvt() as __always_inline
  ipv4/igmp: fix another memory leak in igmpv3_del_delrec()
  bnxt_en: Device serial number is supported only for PFs.
  bnxt_en: Reduce memory usage when running in kdump kernel.
  bnxt_en: Fix possible BUG() condition when calling pci_disable_msix().
  bnxt_en: Fix aggregation buffer leak under OOM condition.
  ipv6: Fix redirect with VRF
  net: stmmac: fix reset gpio free missing
  mISDN: make sure device name is NUL terminated
  net: macb: save/restore the remaining registers and features
  media: dvb: warning about dvb frequency limits produces too much noise
  net/tls: don't ignore netdev notifications if no TLS features
  net/tls: fix state removal with feature flags off
  net/tls: avoid NULL-deref on resync during device removal
  Documentation: add TLS offload documentation
  Documentation: tls: RSTify the ktls documentation
  Documentation: net: move device drivers docs to a submenu
  mISDN: Fix indenting in dsp_cmx.c
  ocelot: Dont allocate another multicast list, use __dev_mc_sync
  Validate required parameters in inet6_validate_link_af
  xhci: Use %zu for printing size_t type
  xhci: Convert xhci_handshake() to use readl_poll_timeout_atomic()
  xhci: Fix immediate data transfer if buffer is already DMA mapped
  usb: xhci: avoid null pointer deref when bos field is NULL
  usb: xhci: Fix a potential null pointer dereference in xhci_debugfs_create_endpoint()
  xhci: update bounce buffer with correct sg num
  media: usb: siano: Fix false-positive "uninitialized variable" warning
  spi: spi-fsl-spi: call spi_finalize_current_message() at the end
  ALSA: hda/realtek - Check headset type by unplug and resume
  powerpc/perf: Fix MMCRA corruption by bhrb_filter
  powerpc/powernv: Return for invalid IMC domain
  HID: logitech-hidpp: Add support for the S510 remote control
  HID: multitouch: handle faulty Elo touch device
  selftests: netfilter: add flowtable test script
  netfilter: nft_flow_offload: IPCB is only valid for ipv4 family
  netfilter: nft_flow_offload: don't offload when sequence numbers need adjustment
  netfilter: nft_flow_offload: set liberal tracking mode for tcp
  netfilter: nf_flow_table: ignore DF bit setting
  ASoC: Intel: sof-rt5682: fix AMP quirk support
  ASoC: Intel: sof-rt5682: fix for codec button mapping
  clk: ti: clkctrl: Fix clkdm_clk handling
  clk: imx: imx8mm: fix int pll clk gate
  clk: sifive: restrict Kconfig scope for the FU540 PRCI driver
  RDMA/hns: Fix PD memory leak for internal allocation
  netfilter: nat: fix udp checksum corruption
  selftests: netfilter: missing error check when setting up veth interface
  RDMA/srp: Rename SRP sysfs name after IB device rename trigger
  ipvs: Fix use-after-free in ip_vs_in
  ARC: [plat-hsdk]: Add missing FIFO size entry in GMAC node
  ARC: [plat-hsdk]: Add missing multicast filter bins number to GMAC node
  samples, bpf: suppress compiler warning
  samples, bpf: fix to change the buffer size for read()
  bpf: Check sk_fullsock() before returning from bpf_sk_lookup()
  bpf: fix out-of-bounds read in __bpf_skc_lookup
  Documentation/networking: fix af_xdp.rst Sphinx warnings
  netfilter: nft_fib: Fix existence check support
  netfilter: nf_queue: fix reinject verdict handling
  dmaengine: sprd: Add interrupt support for 2-stage transfer
  dmaengine: sprd: Fix the right place to configure 2-stage transfer
  dmaengine: sprd: Fix block length overflow
  dmaengine: sprd: Fix the incorrect start for 2-stage destination channels
  dmaengine: sprd: Add validation of current descriptor in irq handler
  dmaengine: sprd: Fix the possible crash when getting descriptor status
  tty: max310x: Fix external crystal register setup
  serial: sh-sci: disable DMA for uart_console
  serial: imx: remove log spamming error message
  tty: serial: msm_serial: Fix XON/XOFF
  USB: serial: option: add Telit 0x1260 and 0x1261 compositions
  USB: serial: pl2303: add Allied Telesis VT-Kit3
  USB: serial: option: add support for Simcom SIM7500/SIM7600 RNDIS mode
  dmaengine: tegra210-adma: Fix spelling
  dmaengine: tegra210-adma: Fix channel FIFO configuration
  dmaengine: tegra210-adma: Fix crash during probe
  dmaengine: mediatek-cqdma: sleeping in atomic context
  dmaengine: dw-axi-dmac: fix null dereference when pointer first is null
  perf/x86/intel/ds: Fix EVENT vs. UEVENT PEBS constraints
  USB: rio500: update Documentation
  USB: rio500: simplify locking
  USB: rio500: fix memory leak in close after disconnect
  USB: rio500: refuse more than one device at a time
  usbip: usbip_host: fix BUG: sleeping function called from invalid context
  USB: sisusbvga: fix oops in error path of sisusb_probe
  USB: Add LPM quirk for Surface Dock GigE adapter
  media: usb: siano: Fix general protection fault in smsusb
  usb: mtu3: fix up undefined reference to usb_debug_root
  USB: Fix slab-out-of-bounds write in usb_get_bos_descriptor
  Input: elantech - enable middle button support on 2 ThinkPads
  dmaengine: fsl-qdma: Add improvement
  dmaengine: jz4780: Fix transfers being ACKed too soon
  gcc-plugins: Fix build failures under Darwin host
  MAINTAINERS: Update Stefan Wahren email address
  netfilter: nf_tables: fix oops during rule dump
  ARC: mm: SIGSEGV userspace trying to access kernel virtual memory
  ARC: fix build warnings
  ARM: dts: bcm: Add missing device_type = "memory" property
  soc: bcm: brcmstb: biuctrl: Register writes require a barrier
  soc: brcmstb: Fix error path for unsupported CPUs
  ARM: dts: dra71x: Disable usb4_tm target module
  ARM: dts: dra71x: Disable rtc target module
  ARM: dts: dra76x: Disable usb4_tm target module
  ARM: dts: dra76x: Disable rtc target module
  ASoC: simple-card: Fix configuration of DAI format
  ASoC: Intel: soc-acpi: Fix machine selection order
  ASoC: rt5677-spi: Handle over reading when flipping bytes
  ASoC: soc-dpm: fixup DAI active unbalance
  pinctrl: intel: Clear interrupt status in mask/unmask callback
  pinctrl: intel: Use GENMASK() consistently
  parisc: Allow building 64-bit kernel without -mlong-calls compiler option
  parisc: Kconfig: remove ARCH_DISCARD_MEMBLOCK
  staging: wilc1000: Fix some double unlock bugs in wilc_wlan_cleanup()
  staging: vc04_services: prevent integer overflow in create_pagelist()
  Staging: vc04_services: Fix a couple error codes
  staging: wlan-ng: fix adapter initialization failure
  staging: kpc2000: double unlock in error handling in kpc_dma_transfer()
  staging: kpc2000: Fix build error without CONFIG_UIO
  staging: kpc2000: fix build error on xtensa
  staging: erofs: set sb->s_root to NULL when failing from __getname()
  ARM: imx: cpuidle-imx6sx: Restrict the SW2ISO increase to i.MX6SX
  firmware: imx: SCU irq should ONLY be enabled after SCU IPC is ready
  arm64: imx: Fix build error without CONFIG_SOC_BUS
  ima: fix wrong signed policy requirement when not appraising
  x86/ima: Check EFI_RUNTIME_SERVICES before using
  stacktrace: Unbreak stack_trace_save_tsk_reliable()
  HID: wacom: Sync INTUOSP2_BT touch state after each frame if necessary
  HID: wacom: Correct button numbering 2nd-gen Intuos Pro over Bluetooth
  HID: wacom: Send BTN_TOUCH in response to INTUOSP2_BT eraser contact
  HID: wacom: Don't report anything prior to the tool entering range
  HID: wacom: Don't set tool type until we're in range
  ASoC: cs42xx8: Add regcache mask dirty
  regulator: tps6507x: Fix boot regression due to testing wrong init_data pointer
  ASoC: fsl_asrc: Fix the issue about unsupported rate
  spi: bitbang: Fix NULL pointer dereference in spi_unregister_master
  Input: elan_i2c - increment wakeup count if wake source
  wireless: Skip directory when generating certificates
  ASoC: ak4458: rstn_control - return a non-zero on error only
  ASoC: soc-pcm: BE dai needs prepare when pause release after resume
  ASoC: ak4458: add return value for ak4458_probe
  ASoC : cs4265 : readable register too low
  ASoC: SOF: fix error in verbose ipc command parsing
  ASoC: SOF: fix race in FW boot timeout handling
  ASoC: SOF: nocodec: fix undefined reference
  iio: adc: ti-ads8688: fix timestamp is not updated in buffer
  iio: dac: ds4422/ds4424 fix chip verification
  HID: rmi: Use SET_REPORT request on control endpoint for Acer Switch 3 and 5
  HID: logitech-hidpp: add support for the MX5500 keyboard
  HID: logitech-dj: add support for the Logitech MX5500's Bluetooth Mini-Receiver
  HID: i2c-hid: add iBall Aer3 to descriptor override
  spi: Fix Raspberry Pi breakage
  ARM: dts: dra76x: Update MMC2_HS200_MANUAL1 iodelay values
  ARM: dts: am57xx-idk: Remove support for voltage switching for SD card
  bus: ti-sysc: Handle devices with no control registers
  ARM: dts: Configure osc clock for d_can on am335x
  iio: imu: mpu6050: Fix FIFO layout for ICM20602
  lkdtm/bugs: Adjust recursion test to avoid elision
  lkdtm/usercopy: Moves the KERNEL_DS test to non-canonical
  iio: adc: ads124: avoid buffer overflow
  iio: adc: modify NPCM ADC read reference voltage

Change-Id: I98c823993370027391cc21dfb239c3049f025136
Signed-off-by: Raghavendra Rao Ananta <rananta@codeaurora.org>
2019-07-01 17:41:24 -07:00

4324 lines
146 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/************************************************************************
* Linux driver for *
* ICP vortex GmbH: GDT PCI Disk Array Controllers *
* Intel Corporation: Storage RAID Controllers *
* *
* gdth.c *
* Copyright (C) 1995-06 ICP vortex GmbH, Achim Leubner *
* Copyright (C) 2002-04 Intel Corporation *
* Copyright (C) 2003-06 Adaptec Inc. *
* <achim_leubner@adaptec.com> *
* *
* Additions/Fixes: *
* Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com> *
* Johannes Dinner <johannes_dinner@adaptec.com> *
* *
* *
* Linux kernel 2.6.x supported *
* *
************************************************************************/
/* All GDT Disk Array Controllers are fully supported by this driver.
* This includes the PCI SCSI Disk Array Controllers and the
* PCI Fibre Channel Disk Array Controllers. See gdth.h for a complete
* list of all controller types.
*
* After the optional list of IRQ values, other possible
* command line options are:
* disable:Y disable driver
* disable:N enable driver
* reserve_mode:0 reserve no drives for the raw service
* reserve_mode:1 reserve all not init., removable drives
* reserve_mode:2 reserve all not init. drives
* reserve_list:h,b,t,l,h,b,t,l,... reserve particular drive(s) with
* h- controller no., b- channel no.,
* t- target ID, l- LUN
* reverse_scan:Y reverse scan order for PCI controllers
* reverse_scan:N scan PCI controllers like BIOS
* max_ids:x x - target ID count per channel (1..MAXID)
* rescan:Y rescan all channels/IDs
* rescan:N use all devices found until now
* hdr_channel:x x - number of virtual bus for host drives
* shared_access:Y disable driver reserve/release protocol to
* access a shared resource from several nodes,
* appropriate controller firmware required
* shared_access:N enable driver reserve/release protocol
* force_dma32:Y use only 32 bit DMA mode
* force_dma32:N use 64 bit DMA mode, if supported
*
* The default values are: "gdth=disable:N,reserve_mode:1,reverse_scan:N,
* max_ids:127,rescan:N,hdr_channel:0,
* shared_access:Y,force_dma32:N".
* Here is another example: "gdth=reserve_list:0,1,2,0,0,1,3,0,rescan:Y".
*
* When loading the gdth driver as a module, the same options are available.
* You can set the IRQs with "IRQ=...". However, the syntax to specify the
* options changes slightly. You must replace all ',' between options
* with ' ' and all ':' with '=' and you must use
* '1' in place of 'Y' and '0' in place of 'N'.
*
* Default: "modprobe gdth disable=0 reserve_mode=1 reverse_scan=0
* max_ids=127 rescan=0 hdr_channel=0 shared_access=0
* force_dma32=0"
* The other example: "modprobe gdth reserve_list=0,1,2,0,0,1,3,0 rescan=1".
*/
/* The meaning of the Scsi_Pointer members in this driver is as follows:
* ptr: Chaining
* this_residual: unused
* buffer: unused
* dma_handle: unused
* buffers_residual: unused
* Status: unused
* Message: unused
* have_data_in: unused
* sent_command: unused
* phase: unused
*/
/* statistics */
#define GDTH_STATISTICS
#include <linux/module.h>
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/proc_fs.h>
#include <linux/time.h>
#include <linux/timer.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/reboot.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "gdth.h"
static DEFINE_MUTEX(gdth_mutex);
static void gdth_delay(int milliseconds);
static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs);
static irqreturn_t gdth_interrupt(int irq, void *dev_id);
static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
int gdth_from_wait, int* pIndex);
static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
struct scsi_cmnd *scp);
static int gdth_async_event(gdth_ha_str *ha);
static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority);
static void gdth_next(gdth_ha_str *ha);
static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b);
static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
u16 idx, gdth_evt_data *evt);
static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
static void gdth_readapp_event(gdth_ha_str *ha, u8 application,
gdth_evt_str *estr);
static void gdth_clear_events(void);
static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
char *buffer, u16 count);
static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
u16 hdrive);
static void gdth_enable_int(gdth_ha_str *ha);
static int gdth_test_busy(gdth_ha_str *ha);
static int gdth_get_cmd_index(gdth_ha_str *ha);
static void gdth_release_event(gdth_ha_str *ha);
static int gdth_wait(gdth_ha_str *ha, int index,u32 time);
static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
u32 p1, u64 p2,u64 p3);
static int gdth_search_drives(gdth_ha_str *ha);
static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive);
static const char *gdth_ctr_name(gdth_ha_str *ha);
static int gdth_open(struct inode *inode, struct file *filep);
static int gdth_close(struct inode *inode, struct file *filep);
static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg);
static void gdth_flush(gdth_ha_str *ha);
static int gdth_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
struct gdth_cmndinfo *cmndinfo);
static void gdth_scsi_done(struct scsi_cmnd *scp);
#ifdef DEBUG_GDTH
static u8 DebugState = DEBUG_GDTH;
#define TRACE(a) {if (DebugState==1) {printk a;}}
#define TRACE2(a) {if (DebugState==1 || DebugState==2) {printk a;}}
#define TRACE3(a) {if (DebugState!=0) {printk a;}}
#else /* !DEBUG */
#define TRACE(a)
#define TRACE2(a)
#define TRACE3(a)
#endif
#ifdef GDTH_STATISTICS
static u32 max_rq=0, max_index=0, max_sg=0;
static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
static struct timer_list gdth_timer;
#endif
#define PTR2USHORT(a) (u16)(unsigned long)(a)
#define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b)
#define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t))
#define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b))
static u8 gdth_polling; /* polling if TRUE */
static int gdth_ctr_count = 0; /* controller count */
static LIST_HEAD(gdth_instances); /* controller list */
static u8 gdth_write_through = FALSE; /* write through */
static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */
static int elastidx;
static int eoldidx;
static int major;
#define DIN 1 /* IN data direction */
#define DOU 2 /* OUT data direction */
#define DNO DIN /* no data transfer */
#define DUN DIN /* unknown data direction */
static u8 gdth_direction_tab[0x100] = {
DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU,
DOU,DOU,DIN,DIN,DIN,DNO,DUN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DUN,DUN,
DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DIN,DUN,DUN,DUN,DUN,DUN,
DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,
DUN,DUN,DUN,DUN,DUN,DNO,DNO,DUN,DIN,DNO,DOU,DUN,DNO,DUN,DOU,DOU,
DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
};
/* LILO and modprobe/insmod parameters */
/* disable driver flag */
static int disable __initdata = 0;
/* reserve flag */
static int reserve_mode = 1;
/* reserve list */
static int reserve_list[MAX_RES_ARGS] =
{0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
/* scan order for PCI controllers */
static int reverse_scan = 0;
/* virtual channel for the host drives */
static int hdr_channel = 0;
/* max. IDs per channel */
static int max_ids = MAXID;
/* rescan all IDs */
static int rescan = 0;
/* shared access */
static int shared_access = 1;
/* 64 bit DMA mode, support for drives > 2 TB, if force_dma32 = 0 */
static int force_dma32 = 0;
/* parameters for modprobe/insmod */
module_param(disable, int, 0);
module_param(reserve_mode, int, 0);
module_param_array(reserve_list, int, NULL, 0);
module_param(reverse_scan, int, 0);
module_param(hdr_channel, int, 0);
module_param(max_ids, int, 0);
module_param(rescan, int, 0);
module_param(shared_access, int, 0);
module_param(force_dma32, int, 0);
MODULE_AUTHOR("Achim Leubner");
MODULE_LICENSE("GPL");
/* ioctl interface */
static const struct file_operations gdth_fops = {
.unlocked_ioctl = gdth_unlocked_ioctl,
.open = gdth_open,
.release = gdth_close,
.llseek = noop_llseek,
};
#include "gdth_proc.h"
#include "gdth_proc.c"
static gdth_ha_str *gdth_find_ha(int hanum)
{
gdth_ha_str *ha;
list_for_each_entry(ha, &gdth_instances, list)
if (hanum == ha->hanum)
return ha;
return NULL;
}
static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha)
{
struct gdth_cmndinfo *priv = NULL;
unsigned long flags;
int i;
spin_lock_irqsave(&ha->smp_lock, flags);
for (i=0; i<GDTH_MAXCMDS; ++i) {
if (ha->cmndinfo[i].index == 0) {
priv = &ha->cmndinfo[i];
memset(priv, 0, sizeof(*priv));
priv->index = i+1;
break;
}
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
return priv;
}
static void gdth_put_cmndinfo(struct gdth_cmndinfo *priv)
{
BUG_ON(!priv);
priv->index = 0;
}
static void gdth_delay(int milliseconds)
{
if (milliseconds == 0) {
udelay(1);
} else {
mdelay(milliseconds);
}
}
static void gdth_scsi_done(struct scsi_cmnd *scp)
{
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
int internal_command = cmndinfo->internal_command;
TRACE2(("gdth_scsi_done()\n"));
gdth_put_cmndinfo(cmndinfo);
scp->host_scribble = NULL;
if (internal_command)
complete((struct completion *)scp->request);
else
scp->scsi_done(scp);
}
int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
int timeout, u32 *info)
{
gdth_ha_str *ha = shost_priv(sdev->host);
struct scsi_cmnd *scp;
struct gdth_cmndinfo cmndinfo;
DECLARE_COMPLETION_ONSTACK(wait);
int rval;
scp = kzalloc(sizeof(*scp), GFP_KERNEL);
if (!scp)
return -ENOMEM;
scp->sense_buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
if (!scp->sense_buffer) {
kfree(scp);
return -ENOMEM;
}
scp->device = sdev;
memset(&cmndinfo, 0, sizeof(cmndinfo));
/* use request field to save the ptr. to completion struct. */
scp->request = (struct request *)&wait;
scp->cmd_len = 12;
scp->cmnd = cmnd;
cmndinfo.priority = IOCTL_PRI;
cmndinfo.internal_cmd_str = gdtcmd;
cmndinfo.internal_command = 1;
TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0]));
__gdth_queuecommand(ha, scp, &cmndinfo);
wait_for_completion(&wait);
rval = cmndinfo.status;
if (info)
*info = cmndinfo.info;
kfree(scp->sense_buffer);
kfree(scp);
return rval;
}
int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
int timeout, u32 *info)
{
struct scsi_device *sdev = scsi_get_host_dev(shost);
int rval = __gdth_execute(sdev, gdtcmd, cmnd, timeout, info);
scsi_free_host_dev(sdev);
return rval;
}
static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs)
{
*cyls = size /HEADS/SECS;
if (*cyls <= MAXCYLS) {
*heads = HEADS;
*secs = SECS;
} else { /* too high for 64*32 */
*cyls = size /MEDHEADS/MEDSECS;
if (*cyls <= MAXCYLS) {
*heads = MEDHEADS;
*secs = MEDSECS;
} else { /* too high for 127*63 */
*cyls = size /BIGHEADS/BIGSECS;
*heads = BIGHEADS;
*secs = BIGSECS;
}
}
}
static bool gdth_search_vortex(u16 device)
{
if (device <= PCI_DEVICE_ID_VORTEX_GDT6555)
return true;
if (device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP &&
device <= PCI_DEVICE_ID_VORTEX_GDTMAXRP)
return true;
if (device == PCI_DEVICE_ID_VORTEX_GDTNEWRX ||
device == PCI_DEVICE_ID_VORTEX_GDTNEWRX2)
return true;
return false;
}
static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out);
static int gdth_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
static void gdth_pci_remove_one(struct pci_dev *pdev);
static void gdth_remove_one(gdth_ha_str *ha);
/* Vortex only makes RAID controllers.
* We do not really want to specify all 550 ids here, so wildcard match.
*/
static const struct pci_device_id gdthtable[] = {
{ PCI_VDEVICE(VORTEX, PCI_ANY_ID) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC_XSCALE) },
{ } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, gdthtable);
static struct pci_driver gdth_pci_driver = {
.name = "gdth",
.id_table = gdthtable,
.probe = gdth_pci_init_one,
.remove = gdth_pci_remove_one,
};
static void gdth_pci_remove_one(struct pci_dev *pdev)
{
gdth_ha_str *ha = pci_get_drvdata(pdev);
list_del(&ha->list);
gdth_remove_one(ha);
pci_disable_device(pdev);
}
static int gdth_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
u16 vendor = pdev->vendor;
u16 device = pdev->device;
unsigned long base0, base1, base2;
int rc;
gdth_pci_str gdth_pcistr;
gdth_ha_str *ha = NULL;
TRACE(("gdth_search_dev() cnt %d vendor %x device %x\n",
gdth_ctr_count, vendor, device));
memset(&gdth_pcistr, 0, sizeof(gdth_pcistr));
if (vendor == PCI_VENDOR_ID_VORTEX && !gdth_search_vortex(device))
return -ENODEV;
rc = pci_enable_device(pdev);
if (rc)
return rc;
if (gdth_ctr_count >= MAXHA)
return -EBUSY;
/* GDT PCI controller found, resources are already in pdev */
gdth_pcistr.pdev = pdev;
base0 = pci_resource_flags(pdev, 0);
base1 = pci_resource_flags(pdev, 1);
base2 = pci_resource_flags(pdev, 2);
if (device <= PCI_DEVICE_ID_VORTEX_GDT6000B || /* GDT6000/B */
device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP) { /* MPR */
if (!(base0 & IORESOURCE_MEM))
return -ENODEV;
gdth_pcistr.dpmem = pci_resource_start(pdev, 0);
} else { /* GDT6110, GDT6120, .. */
if (!(base0 & IORESOURCE_MEM) ||
!(base2 & IORESOURCE_MEM) ||
!(base1 & IORESOURCE_IO))
return -ENODEV;
gdth_pcistr.dpmem = pci_resource_start(pdev, 2);
gdth_pcistr.io = pci_resource_start(pdev, 1);
}
TRACE2(("Controller found at %d/%d, irq %d, dpmem 0x%lx\n",
gdth_pcistr.pdev->bus->number,
PCI_SLOT(gdth_pcistr.pdev->devfn),
gdth_pcistr.irq,
gdth_pcistr.dpmem));
rc = gdth_pci_probe_one(&gdth_pcistr, &ha);
if (rc)
return rc;
return 0;
}
static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
gdth_ha_str *ha)
{
register gdt6_dpram_str __iomem *dp6_ptr;
register gdt6c_dpram_str __iomem *dp6c_ptr;
register gdt6m_dpram_str __iomem *dp6m_ptr;
u32 retries;
u8 prot_ver;
u16 command;
int i, found = FALSE;
TRACE(("gdth_init_pci()\n"));
if (pdev->vendor == PCI_VENDOR_ID_INTEL)
ha->oem_id = OEM_ID_INTEL;
else
ha->oem_id = OEM_ID_ICP;
ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8);
ha->stype = (u32)pdev->device;
ha->irq = pdev->irq;
ha->pdev = pdev;
if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6000B) { /* GDT6000/B */
TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6_dpram_str));
if (ha->brd == NULL) {
printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
return 0;
}
/* check and reset interface area */
dp6_ptr = ha->brd;
writel(DPMEM_MAGIC, &dp6_ptr->u);
if (readl(&dp6_ptr->u) != DPMEM_MAGIC) {
printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
pcistr->dpmem);
found = FALSE;
for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
iounmap(ha->brd);
ha->brd = ioremap(i, sizeof(u16));
if (ha->brd == NULL) {
printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
return 0;
}
if (readw(ha->brd) != 0xffff) {
TRACE2(("init_pci_old() address 0x%x busy\n", i));
continue;
}
iounmap(ha->brd);
pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
ha->brd = ioremap(i, sizeof(gdt6_dpram_str));
if (ha->brd == NULL) {
printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
return 0;
}
dp6_ptr = ha->brd;
writel(DPMEM_MAGIC, &dp6_ptr->u);
if (readl(&dp6_ptr->u) == DPMEM_MAGIC) {
printk("GDT-PCI: Use free address at 0x%x\n", i);
found = TRUE;
break;
}
}
if (!found) {
printk("GDT-PCI: No free address found!\n");
iounmap(ha->brd);
return 0;
}
}
memset_io(&dp6_ptr->u, 0, sizeof(dp6_ptr->u));
if (readl(&dp6_ptr->u) != 0) {
printk("GDT-PCI: Initialization error (DPMEM write error)\n");
iounmap(ha->brd);
return 0;
}
/* disable board interrupts, deinit services */
writeb(0xff, &dp6_ptr->io.irqdel);
writeb(0x00, &dp6_ptr->io.irqen);
writeb(0x00, &dp6_ptr->u.ic.S_Status);
writeb(0x00, &dp6_ptr->u.ic.Cmd_Index);
writel(pcistr->dpmem, &dp6_ptr->u.ic.S_Info[0]);
writeb(0xff, &dp6_ptr->u.ic.S_Cmd_Indx);
writeb(0, &dp6_ptr->io.event);
retries = INIT_RETRIES;
gdth_delay(20);
while (readb(&dp6_ptr->u.ic.S_Status) != 0xff) {
if (--retries == 0) {
printk("GDT-PCI: Initialization error (DEINIT failed)\n");
iounmap(ha->brd);
return 0;
}
gdth_delay(1);
}
prot_ver = (u8)readl(&dp6_ptr->u.ic.S_Info[0]);
writeb(0, &dp6_ptr->u.ic.S_Status);
writeb(0xff, &dp6_ptr->io.irqdel);
if (prot_ver != PROTOCOL_VERSION) {
printk("GDT-PCI: Illegal protocol version\n");
iounmap(ha->brd);
return 0;
}
ha->type = GDT_PCI;
ha->ic_all_size = sizeof(dp6_ptr->u);
/* special command to controller BIOS */
writel(0x00, &dp6_ptr->u.ic.S_Info[0]);
writel(0x00, &dp6_ptr->u.ic.S_Info[1]);
writel(0x00, &dp6_ptr->u.ic.S_Info[2]);
writel(0x00, &dp6_ptr->u.ic.S_Info[3]);
writeb(0xfe, &dp6_ptr->u.ic.S_Cmd_Indx);
writeb(0, &dp6_ptr->io.event);
retries = INIT_RETRIES;
gdth_delay(20);
while (readb(&dp6_ptr->u.ic.S_Status) != 0xfe) {
if (--retries == 0) {
printk("GDT-PCI: Initialization error\n");
iounmap(ha->brd);
return 0;
}
gdth_delay(1);
}
writeb(0, &dp6_ptr->u.ic.S_Status);
writeb(0xff, &dp6_ptr->io.irqdel);
ha->dma64_support = 0;
} else if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6555) { /* GDT6110, ... */
ha->plx = (gdt6c_plx_regs *)pcistr->io;
TRACE2(("init_pci_new() dpmem %lx irq %d\n",
pcistr->dpmem,ha->irq));
ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6c_dpram_str));
if (ha->brd == NULL) {
printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
iounmap(ha->brd);
return 0;
}
/* check and reset interface area */
dp6c_ptr = ha->brd;
writel(DPMEM_MAGIC, &dp6c_ptr->u);
if (readl(&dp6c_ptr->u) != DPMEM_MAGIC) {
printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
pcistr->dpmem);
found = FALSE;
for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
iounmap(ha->brd);
ha->brd = ioremap(i, sizeof(u16));
if (ha->brd == NULL) {
printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
return 0;
}
if (readw(ha->brd) != 0xffff) {
TRACE2(("init_pci_plx() address 0x%x busy\n", i));
continue;
}
iounmap(ha->brd);
pci_write_config_dword(pdev, PCI_BASE_ADDRESS_2, i);
ha->brd = ioremap(i, sizeof(gdt6c_dpram_str));
if (ha->brd == NULL) {
printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
return 0;
}
dp6c_ptr = ha->brd;
writel(DPMEM_MAGIC, &dp6c_ptr->u);
if (readl(&dp6c_ptr->u) == DPMEM_MAGIC) {
printk("GDT-PCI: Use free address at 0x%x\n", i);
found = TRUE;
break;
}
}
if (!found) {
printk("GDT-PCI: No free address found!\n");
iounmap(ha->brd);
return 0;
}
}
memset_io(&dp6c_ptr->u, 0, sizeof(dp6c_ptr->u));
if (readl(&dp6c_ptr->u) != 0) {
printk("GDT-PCI: Initialization error (DPMEM write error)\n");
iounmap(ha->brd);
return 0;
}
/* disable board interrupts, deinit services */
outb(0x00,PTR2USHORT(&ha->plx->control1));
outb(0xff,PTR2USHORT(&ha->plx->edoor_reg));
writeb(0x00, &dp6c_ptr->u.ic.S_Status);
writeb(0x00, &dp6c_ptr->u.ic.Cmd_Index);
writel(pcistr->dpmem, &dp6c_ptr->u.ic.S_Info[0]);
writeb(0xff, &dp6c_ptr->u.ic.S_Cmd_Indx);
outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
retries = INIT_RETRIES;
gdth_delay(20);
while (readb(&dp6c_ptr->u.ic.S_Status) != 0xff) {
if (--retries == 0) {
printk("GDT-PCI: Initialization error (DEINIT failed)\n");
iounmap(ha->brd);
return 0;
}
gdth_delay(1);
}
prot_ver = (u8)readl(&dp6c_ptr->u.ic.S_Info[0]);
writeb(0, &dp6c_ptr->u.ic.Status);
if (prot_ver != PROTOCOL_VERSION) {
printk("GDT-PCI: Illegal protocol version\n");
iounmap(ha->brd);
return 0;
}
ha->type = GDT_PCINEW;
ha->ic_all_size = sizeof(dp6c_ptr->u);
/* special command to controller BIOS */
writel(0x00, &dp6c_ptr->u.ic.S_Info[0]);
writel(0x00, &dp6c_ptr->u.ic.S_Info[1]);
writel(0x00, &dp6c_ptr->u.ic.S_Info[2]);
writel(0x00, &dp6c_ptr->u.ic.S_Info[3]);
writeb(0xfe, &dp6c_ptr->u.ic.S_Cmd_Indx);
outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
retries = INIT_RETRIES;
gdth_delay(20);
while (readb(&dp6c_ptr->u.ic.S_Status) != 0xfe) {
if (--retries == 0) {
printk("GDT-PCI: Initialization error\n");
iounmap(ha->brd);
return 0;
}
gdth_delay(1);
}
writeb(0, &dp6c_ptr->u.ic.S_Status);
ha->dma64_support = 0;
} else { /* MPR */
TRACE2(("init_pci_mpr() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6m_dpram_str));
if (ha->brd == NULL) {
printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
return 0;
}
/* manipulate config. space to enable DPMEM, start RP controller */
pci_read_config_word(pdev, PCI_COMMAND, &command);
command |= 6;
pci_write_config_word(pdev, PCI_COMMAND, command);
gdth_delay(1);
dp6m_ptr = ha->brd;
/* Ensure that it is safe to access the non HW portions of DPMEM.
* Aditional check needed for Xscale based RAID controllers */
while( ((int)readb(&dp6m_ptr->i960r.sema0_reg) ) & 3 )
gdth_delay(1);
/* check and reset interface area */
writel(DPMEM_MAGIC, &dp6m_ptr->u);
if (readl(&dp6m_ptr->u) != DPMEM_MAGIC) {
printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
pcistr->dpmem);
found = FALSE;
for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
iounmap(ha->brd);
ha->brd = ioremap(i, sizeof(u16));
if (ha->brd == NULL) {
printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
return 0;
}
if (readw(ha->brd) != 0xffff) {
TRACE2(("init_pci_mpr() address 0x%x busy\n", i));
continue;
}
iounmap(ha->brd);
pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
ha->brd = ioremap(i, sizeof(gdt6m_dpram_str));
if (ha->brd == NULL) {
printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
return 0;
}
dp6m_ptr = ha->brd;
writel(DPMEM_MAGIC, &dp6m_ptr->u);
if (readl(&dp6m_ptr->u) == DPMEM_MAGIC) {
printk("GDT-PCI: Use free address at 0x%x\n", i);
found = TRUE;
break;
}
}
if (!found) {
printk("GDT-PCI: No free address found!\n");
iounmap(ha->brd);
return 0;
}
}
memset_io(&dp6m_ptr->u, 0, sizeof(dp6m_ptr->u));
/* disable board interrupts, deinit services */
writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) | 4,
&dp6m_ptr->i960r.edoor_en_reg);
writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
writeb(0x00, &dp6m_ptr->u.ic.S_Status);
writeb(0x00, &dp6m_ptr->u.ic.Cmd_Index);
writel(pcistr->dpmem, &dp6m_ptr->u.ic.S_Info[0]);
writeb(0xff, &dp6m_ptr->u.ic.S_Cmd_Indx);
writeb(1, &dp6m_ptr->i960r.ldoor_reg);
retries = INIT_RETRIES;
gdth_delay(20);
while (readb(&dp6m_ptr->u.ic.S_Status) != 0xff) {
if (--retries == 0) {
printk("GDT-PCI: Initialization error (DEINIT failed)\n");
iounmap(ha->brd);
return 0;
}
gdth_delay(1);
}
prot_ver = (u8)readl(&dp6m_ptr->u.ic.S_Info[0]);
writeb(0, &dp6m_ptr->u.ic.S_Status);
if (prot_ver != PROTOCOL_VERSION) {
printk("GDT-PCI: Illegal protocol version\n");
iounmap(ha->brd);
return 0;
}
ha->type = GDT_PCIMPR;
ha->ic_all_size = sizeof(dp6m_ptr->u);
/* special command to controller BIOS */
writel(0x00, &dp6m_ptr->u.ic.S_Info[0]);
writel(0x00, &dp6m_ptr->u.ic.S_Info[1]);
writel(0x00, &dp6m_ptr->u.ic.S_Info[2]);
writel(0x00, &dp6m_ptr->u.ic.S_Info[3]);
writeb(0xfe, &dp6m_ptr->u.ic.S_Cmd_Indx);
writeb(1, &dp6m_ptr->i960r.ldoor_reg);
retries = INIT_RETRIES;
gdth_delay(20);
while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfe) {
if (--retries == 0) {
printk("GDT-PCI: Initialization error\n");
iounmap(ha->brd);
return 0;
}
gdth_delay(1);
}
writeb(0, &dp6m_ptr->u.ic.S_Status);
/* read FW version to detect 64-bit DMA support */
writeb(0xfd, &dp6m_ptr->u.ic.S_Cmd_Indx);
writeb(1, &dp6m_ptr->i960r.ldoor_reg);
retries = INIT_RETRIES;
gdth_delay(20);
while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfd) {
if (--retries == 0) {
printk("GDT-PCI: Initialization error (DEINIT failed)\n");
iounmap(ha->brd);
return 0;
}
gdth_delay(1);
}
prot_ver = (u8)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16);
writeb(0, &dp6m_ptr->u.ic.S_Status);
if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */
ha->dma64_support = 0;
else
ha->dma64_support = 1;
}
return 1;
}
/* controller protocol functions */
static void gdth_enable_int(gdth_ha_str *ha)
{
unsigned long flags;
gdt6_dpram_str __iomem *dp6_ptr;
gdt6m_dpram_str __iomem *dp6m_ptr;
TRACE(("gdth_enable_int() hanum %d\n",ha->hanum));
spin_lock_irqsave(&ha->smp_lock, flags);
if (ha->type == GDT_PCI) {
dp6_ptr = ha->brd;
writeb(1, &dp6_ptr->io.irqdel);
writeb(0, &dp6_ptr->u.ic.Cmd_Index);
writeb(1, &dp6_ptr->io.irqen);
} else if (ha->type == GDT_PCINEW) {
outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
outb(0x03, PTR2USHORT(&ha->plx->control1));
} else if (ha->type == GDT_PCIMPR) {
dp6m_ptr = ha->brd;
writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) & ~4,
&dp6m_ptr->i960r.edoor_en_reg);
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
}
/* return IStatus if interrupt was from this card else 0 */
static u8 gdth_get_status(gdth_ha_str *ha)
{
u8 IStatus = 0;
TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
if (ha->type == GDT_PCI)
IStatus =
readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
else if (ha->type == GDT_PCINEW)
IStatus = inb(PTR2USHORT(&ha->plx->edoor_reg));
else if (ha->type == GDT_PCIMPR)
IStatus =
readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.edoor_reg);
return IStatus;
}
static int gdth_test_busy(gdth_ha_str *ha)
{
register int gdtsema0 = 0;
TRACE(("gdth_test_busy() hanum %d\n", ha->hanum));
if (ha->type == GDT_PCI)
gdtsema0 = (int)readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
else if (ha->type == GDT_PCINEW)
gdtsema0 = (int)inb(PTR2USHORT(&ha->plx->sema0_reg));
else if (ha->type == GDT_PCIMPR)
gdtsema0 =
(int)readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
return (gdtsema0 & 1);
}
static int gdth_get_cmd_index(gdth_ha_str *ha)
{
int i;
TRACE(("gdth_get_cmd_index() hanum %d\n", ha->hanum));
for (i=0; i<GDTH_MAXCMDS; ++i) {
if (ha->cmd_tab[i].cmnd == UNUSED_CMND) {
ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer;
ha->cmd_tab[i].service = ha->pccb->Service;
ha->pccb->CommandIndex = (u32)i+2;
return (i+2);
}
}
return 0;
}
static void gdth_set_sema0(gdth_ha_str *ha)
{
TRACE(("gdth_set_sema0() hanum %d\n", ha->hanum));
if (ha->type == GDT_PCI) {
writeb(1, &((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
} else if (ha->type == GDT_PCINEW) {
outb(1, PTR2USHORT(&ha->plx->sema0_reg));
} else if (ha->type == GDT_PCIMPR) {
writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
}
}
static void gdth_copy_command(gdth_ha_str *ha)
{
register gdth_cmd_str *cmd_ptr;
register gdt6m_dpram_str __iomem *dp6m_ptr;
register gdt6c_dpram_str __iomem *dp6c_ptr;
gdt6_dpram_str __iomem *dp6_ptr;
u16 cp_count,dp_offset,cmd_no;
TRACE(("gdth_copy_command() hanum %d\n", ha->hanum));
cp_count = ha->cmd_len;
dp_offset= ha->cmd_offs_dpmem;
cmd_no = ha->cmd_cnt;
cmd_ptr = ha->pccb;
++ha->cmd_cnt;
/* set cpcount dword aligned */
if (cp_count & 3)
cp_count += (4 - (cp_count & 3));
ha->cmd_offs_dpmem += cp_count;
/* set offset and service, copy command to DPMEM */
if (ha->type == GDT_PCI) {
dp6_ptr = ha->brd;
writew(dp_offset + DPMEM_COMMAND_OFFSET,
&dp6_ptr->u.ic.comm_queue[cmd_no].offset);
writew((u16)cmd_ptr->Service,
&dp6_ptr->u.ic.comm_queue[cmd_no].serv_id);
memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
} else if (ha->type == GDT_PCINEW) {
dp6c_ptr = ha->brd;
writew(dp_offset + DPMEM_COMMAND_OFFSET,
&dp6c_ptr->u.ic.comm_queue[cmd_no].offset);
writew((u16)cmd_ptr->Service,
&dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id);
memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
} else if (ha->type == GDT_PCIMPR) {
dp6m_ptr = ha->brd;
writew(dp_offset + DPMEM_COMMAND_OFFSET,
&dp6m_ptr->u.ic.comm_queue[cmd_no].offset);
writew((u16)cmd_ptr->Service,
&dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id);
memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
}
}
static void gdth_release_event(gdth_ha_str *ha)
{
TRACE(("gdth_release_event() hanum %d\n", ha->hanum));
#ifdef GDTH_STATISTICS
{
u32 i,j;
for (i=0,j=0; j<GDTH_MAXCMDS; ++j) {
if (ha->cmd_tab[j].cmnd != UNUSED_CMND)
++i;
}
if (max_index < i) {
max_index = i;
TRACE3(("GDT: max_index = %d\n",(u16)i));
}
}
#endif
if (ha->pccb->OpCode == GDT_INIT)
ha->pccb->Service |= 0x80;
if (ha->type == GDT_PCI) {
writeb(0, &((gdt6_dpram_str __iomem *)ha->brd)->io.event);
} else if (ha->type == GDT_PCINEW) {
outb(1, PTR2USHORT(&ha->plx->ldoor_reg));
} else if (ha->type == GDT_PCIMPR) {
writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.ldoor_reg);
}
}
static int gdth_wait(gdth_ha_str *ha, int index, u32 time)
{
int answer_found = FALSE;
int wait_index = 0;
TRACE(("gdth_wait() hanum %d index %d time %d\n", ha->hanum, index, time));
if (index == 0)
return 1; /* no wait required */
do {
__gdth_interrupt(ha, true, &wait_index);
if (wait_index == index) {
answer_found = TRUE;
break;
}
gdth_delay(1);
} while (--time);
while (gdth_test_busy(ha))
gdth_delay(0);
return (answer_found);
}
static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
u32 p1, u64 p2, u64 p3)
{
register gdth_cmd_str *cmd_ptr;
int retries,index;
TRACE2(("gdth_internal_cmd() service %d opcode %d\n",service,opcode));
cmd_ptr = ha->pccb;
memset((char*)cmd_ptr,0,sizeof(gdth_cmd_str));
/* make command */
for (retries = INIT_RETRIES;;) {
cmd_ptr->Service = service;
cmd_ptr->RequestBuffer = INTERNAL_CMND;
if (!(index=gdth_get_cmd_index(ha))) {
TRACE(("GDT: No free command index found\n"));
return 0;
}
gdth_set_sema0(ha);
cmd_ptr->OpCode = opcode;
cmd_ptr->BoardNode = LOCALBOARD;
if (service == CACHESERVICE) {
if (opcode == GDT_IOCTL) {
cmd_ptr->u.ioctl.subfunc = p1;
cmd_ptr->u.ioctl.channel = (u32)p2;
cmd_ptr->u.ioctl.param_size = (u16)p3;
cmd_ptr->u.ioctl.p_param = ha->scratch_phys;
} else {
if (ha->cache_feat & GDT_64BIT) {
cmd_ptr->u.cache64.DeviceNo = (u16)p1;
cmd_ptr->u.cache64.BlockNo = p2;
} else {
cmd_ptr->u.cache.DeviceNo = (u16)p1;
cmd_ptr->u.cache.BlockNo = (u32)p2;
}
}
} else if (service == SCSIRAWSERVICE) {
if (ha->raw_feat & GDT_64BIT) {
cmd_ptr->u.raw64.direction = p1;
cmd_ptr->u.raw64.bus = (u8)p2;
cmd_ptr->u.raw64.target = (u8)p3;
cmd_ptr->u.raw64.lun = (u8)(p3 >> 8);
} else {
cmd_ptr->u.raw.direction = p1;
cmd_ptr->u.raw.bus = (u8)p2;
cmd_ptr->u.raw.target = (u8)p3;
cmd_ptr->u.raw.lun = (u8)(p3 >> 8);
}
} else if (service == SCREENSERVICE) {
if (opcode == GDT_REALTIME) {
*(u32 *)&cmd_ptr->u.screen.su.data[0] = p1;
*(u32 *)&cmd_ptr->u.screen.su.data[4] = (u32)p2;
*(u32 *)&cmd_ptr->u.screen.su.data[8] = (u32)p3;
}
}
ha->cmd_len = sizeof(gdth_cmd_str);
ha->cmd_offs_dpmem = 0;
ha->cmd_cnt = 0;
gdth_copy_command(ha);
gdth_release_event(ha);
gdth_delay(20);
if (!gdth_wait(ha, index, INIT_TIMEOUT)) {
printk("GDT: Initialization error (timeout service %d)\n",service);
return 0;
}
if (ha->status != S_BSY || --retries == 0)
break;
gdth_delay(1);
}
return (ha->status != S_OK ? 0:1);
}
/* search for devices */
static int gdth_search_drives(gdth_ha_str *ha)
{
u16 cdev_cnt, i;
int ok;
u32 bus_no, drv_cnt, drv_no, j;
gdth_getch_str *chn;
gdth_drlist_str *drl;
gdth_iochan_str *ioc;
gdth_raw_iochan_str *iocr;
gdth_arcdl_str *alst;
gdth_alist_str *alst2;
gdth_oem_str_ioctl *oemstr;
TRACE(("gdth_search_drives() hanum %d\n", ha->hanum));
ok = 0;
/* initialize controller services, at first: screen service */
ha->screen_feat = 0;
if (!force_dma32) {
ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_X_INIT_SCR, 0, 0, 0);
if (ok)
ha->screen_feat = GDT_64BIT;
}
if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0);
if (!ok) {
printk("GDT-HA %d: Initialization error screen service (code %d)\n",
ha->hanum, ha->status);
return 0;
}
TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n"));
/* unfreeze all IOs */
gdth_internal_cmd(ha, CACHESERVICE, GDT_UNFREEZE_IO, 0, 0, 0);
/* initialize cache service */
ha->cache_feat = 0;
if (!force_dma32) {
ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INIT_HOST, LINUX_OS,
0, 0);
if (ok)
ha->cache_feat = GDT_64BIT;
}
if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0);
if (!ok) {
printk("GDT-HA %d: Initialization error cache service (code %d)\n",
ha->hanum, ha->status);
return 0;
}
TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n"));
cdev_cnt = (u16)ha->info;
ha->fw_vers = ha->service;
/* detect number of buses - try new IOCTL */
iocr = (gdth_raw_iochan_str *)ha->pscratch;
iocr->hdr.version = 0xffffffff;
iocr->hdr.list_entries = MAXBUS;
iocr->hdr.first_chan = 0;
iocr->hdr.last_chan = MAXBUS-1;
iocr->hdr.list_offset = GDTOFFSOF(gdth_raw_iochan_str, list[0]);
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_RAW_DESC,
INVALID_CHANNEL,sizeof(gdth_raw_iochan_str))) {
TRACE2(("IOCHAN_RAW_DESC supported!\n"));
ha->bus_cnt = iocr->hdr.chan_count;
for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
if (iocr->list[bus_no].proc_id < MAXID)
ha->bus_id[bus_no] = iocr->list[bus_no].proc_id;
else
ha->bus_id[bus_no] = 0xff;
}
} else {
/* old method */
chn = (gdth_getch_str *)ha->pscratch;
for (bus_no = 0; bus_no < MAXBUS; ++bus_no) {
chn->channel_no = bus_no;
if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
SCSI_CHAN_CNT | L_CTRL_PATTERN,
IO_CHANNEL | INVALID_CHANNEL,
sizeof(gdth_getch_str))) {
if (bus_no == 0) {
printk("GDT-HA %d: Error detecting channel count (0x%x)\n",
ha->hanum, ha->status);
return 0;
}
break;
}
if (chn->siop_id < MAXID)
ha->bus_id[bus_no] = chn->siop_id;
else
ha->bus_id[bus_no] = 0xff;
}
ha->bus_cnt = (u8)bus_no;
}
TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt));
/* read cache configuration */
if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_INFO,
INVALID_CHANNEL,sizeof(gdth_cinfo_str))) {
printk("GDT-HA %d: Initialization error cache service (code %d)\n",
ha->hanum, ha->status);
return 0;
}
ha->cpar = ((gdth_cinfo_str *)ha->pscratch)->cpar;
TRACE2(("gdth_search_drives() cinfo: vs %x sta %d str %d dw %d b %d\n",
ha->cpar.version,ha->cpar.state,ha->cpar.strategy,
ha->cpar.write_back,ha->cpar.block_size));
/* read board info and features */
ha->more_proc = FALSE;
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_INFO,
INVALID_CHANNEL,sizeof(gdth_binfo_str))) {
memcpy(&ha->binfo, (gdth_binfo_str *)ha->pscratch,
sizeof(gdth_binfo_str));
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_FEATURES,
INVALID_CHANNEL,sizeof(gdth_bfeat_str))) {
TRACE2(("BOARD_INFO/BOARD_FEATURES supported\n"));
ha->bfeat = *(gdth_bfeat_str *)ha->pscratch;
ha->more_proc = TRUE;
}
} else {
TRACE2(("BOARD_INFO requires firmware >= 1.10/2.08\n"));
strcpy(ha->binfo.type_string, gdth_ctr_name(ha));
}
TRACE2(("Controller name: %s\n",ha->binfo.type_string));
/* read more informations */
if (ha->more_proc) {
/* physical drives, channel addresses */
ioc = (gdth_iochan_str *)ha->pscratch;
ioc->hdr.version = 0xffffffff;
ioc->hdr.list_entries = MAXBUS;
ioc->hdr.first_chan = 0;
ioc->hdr.last_chan = MAXBUS-1;
ioc->hdr.list_offset = GDTOFFSOF(gdth_iochan_str, list[0]);
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_DESC,
INVALID_CHANNEL,sizeof(gdth_iochan_str))) {
for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
ha->raw[bus_no].address = ioc->list[bus_no].address;
ha->raw[bus_no].local_no = ioc->list[bus_no].local_no;
}
} else {
for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
ha->raw[bus_no].address = IO_CHANNEL;
ha->raw[bus_no].local_no = bus_no;
}
}
for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
chn = (gdth_getch_str *)ha->pscratch;
chn->channel_no = ha->raw[bus_no].local_no;
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
SCSI_CHAN_CNT | L_CTRL_PATTERN,
ha->raw[bus_no].address | INVALID_CHANNEL,
sizeof(gdth_getch_str))) {
ha->raw[bus_no].pdev_cnt = chn->drive_cnt;
TRACE2(("Channel %d: %d phys. drives\n",
bus_no,chn->drive_cnt));
}
if (ha->raw[bus_no].pdev_cnt > 0) {
drl = (gdth_drlist_str *)ha->pscratch;
drl->sc_no = ha->raw[bus_no].local_no;
drl->sc_cnt = ha->raw[bus_no].pdev_cnt;
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
SCSI_DR_LIST | L_CTRL_PATTERN,
ha->raw[bus_no].address | INVALID_CHANNEL,
sizeof(gdth_drlist_str))) {
for (j = 0; j < ha->raw[bus_no].pdev_cnt; ++j)
ha->raw[bus_no].id_list[j] = drl->sc_list[j];
} else {
ha->raw[bus_no].pdev_cnt = 0;
}
}
}
/* logical drives */
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT,
INVALID_CHANNEL,sizeof(u32))) {
drv_cnt = *(u32 *)ha->pscratch;
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST,
INVALID_CHANNEL,drv_cnt * sizeof(u32))) {
for (j = 0; j < drv_cnt; ++j) {
drv_no = ((u32 *)ha->pscratch)[j];
if (drv_no < MAX_LDRIVES) {
ha->hdr[drv_no].is_logdrv = TRUE;
TRACE2(("Drive %d is log. drive\n",drv_no));
}
}
}
alst = (gdth_arcdl_str *)ha->pscratch;
alst->entries_avail = MAX_LDRIVES;
alst->first_entry = 0;
alst->list_offset = GDTOFFSOF(gdth_arcdl_str, list[0]);
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
ARRAY_DRV_LIST2 | LA_CTRL_PATTERN,
INVALID_CHANNEL, sizeof(gdth_arcdl_str) +
(alst->entries_avail-1) * sizeof(gdth_alist_str))) {
for (j = 0; j < alst->entries_init; ++j) {
ha->hdr[j].is_arraydrv = alst->list[j].is_arrayd;
ha->hdr[j].is_master = alst->list[j].is_master;
ha->hdr[j].is_parity = alst->list[j].is_parity;
ha->hdr[j].is_hotfix = alst->list[j].is_hotfix;
ha->hdr[j].master_no = alst->list[j].cd_handle;
}
} else if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
ARRAY_DRV_LIST | LA_CTRL_PATTERN,
0, 35 * sizeof(gdth_alist_str))) {
for (j = 0; j < 35; ++j) {
alst2 = &((gdth_alist_str *)ha->pscratch)[j];
ha->hdr[j].is_arraydrv = alst2->is_arrayd;
ha->hdr[j].is_master = alst2->is_master;
ha->hdr[j].is_parity = alst2->is_parity;
ha->hdr[j].is_hotfix = alst2->is_hotfix;
ha->hdr[j].master_no = alst2->cd_handle;
}
}
}
}
/* initialize raw service */
ha->raw_feat = 0;
if (!force_dma32) {
ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_X_INIT_RAW, 0, 0, 0);
if (ok)
ha->raw_feat = GDT_64BIT;
}
if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0);
if (!ok) {
printk("GDT-HA %d: Initialization error raw service (code %d)\n",
ha->hanum, ha->status);
return 0;
}
TRACE2(("gdth_search_drives(): RAWSERVICE initialized\n"));
/* set/get features raw service (scatter/gather) */
if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_SET_FEAT, SCATTER_GATHER,
0, 0)) {
TRACE2(("gdth_search_drives(): set features RAWSERVICE OK\n"));
if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) {
TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n",
ha->info));
ha->raw_feat |= (u16)ha->info;
}
}
/* set/get features cache service (equal to raw service) */
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_SET_FEAT, 0,
SCATTER_GATHER,0)) {
TRACE2(("gdth_search_drives(): set features CACHESERVICE OK\n"));
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) {
TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n",
ha->info));
ha->cache_feat |= (u16)ha->info;
}
}
/* reserve drives for raw service */
if (reserve_mode != 0) {
gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE_ALL,
reserve_mode == 1 ? 1 : 3, 0, 0);
TRACE2(("gdth_search_drives(): RESERVE_ALL code %d\n",
ha->status));
}
for (i = 0; i < MAX_RES_ARGS; i += 4) {
if (reserve_list[i] == ha->hanum && reserve_list[i+1] < ha->bus_cnt &&
reserve_list[i+2] < ha->tid_cnt && reserve_list[i+3] < MAXLUN) {
TRACE2(("gdth_search_drives(): reserve ha %d bus %d id %d lun %d\n",
reserve_list[i], reserve_list[i+1],
reserve_list[i+2], reserve_list[i+3]));
if (!gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE, 0,
reserve_list[i+1], reserve_list[i+2] |
(reserve_list[i+3] << 8))) {
printk("GDT-HA %d: Error raw service (RESERVE, code %d)\n",
ha->hanum, ha->status);
}
}
}
/* Determine OEM string using IOCTL */
oemstr = (gdth_oem_str_ioctl *)ha->pscratch;
oemstr->params.ctl_version = 0x01;
oemstr->params.buffer_size = sizeof(oemstr->text);
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
CACHE_READ_OEM_STRING_RECORD,INVALID_CHANNEL,
sizeof(gdth_oem_str_ioctl))) {
TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD OK\n"));
printk("GDT-HA %d: Vendor: %s Name: %s\n",
ha->hanum, oemstr->text.oem_company_name, ha->binfo.type_string);
/* Save the Host Drive inquiry data */
strlcpy(ha->oem_name,oemstr->text.scsi_host_drive_inquiry_vendor_id,
sizeof(ha->oem_name));
} else {
/* Old method, based on PCI ID */
TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD failed\n"));
printk("GDT-HA %d: Name: %s\n",
ha->hanum, ha->binfo.type_string);
if (ha->oem_id == OEM_ID_INTEL)
strlcpy(ha->oem_name,"Intel ", sizeof(ha->oem_name));
else
strlcpy(ha->oem_name,"ICP ", sizeof(ha->oem_name));
}
/* scanning for host drives */
for (i = 0; i < cdev_cnt; ++i)
gdth_analyse_hdrive(ha, i);
TRACE(("gdth_search_drives() OK\n"));
return 1;
}
static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive)
{
u32 drv_cyls;
int drv_hds, drv_secs;
TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive));
if (hdrive >= MAX_HDRIVES)
return 0;
if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_INFO, hdrive, 0, 0))
return 0;
ha->hdr[hdrive].present = TRUE;
ha->hdr[hdrive].size = ha->info;
/* evaluate mapping (sectors per head, heads per cylinder) */
ha->hdr[hdrive].size &= ~SECS32;
if (ha->info2 == 0) {
gdth_eval_mapping(ha->hdr[hdrive].size,&drv_cyls,&drv_hds,&drv_secs);
} else {
drv_hds = ha->info2 & 0xff;
drv_secs = (ha->info2 >> 8) & 0xff;
drv_cyls = (u32)ha->hdr[hdrive].size / drv_hds / drv_secs;
}
ha->hdr[hdrive].heads = (u8)drv_hds;
ha->hdr[hdrive].secs = (u8)drv_secs;
/* round size */
ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs;
if (ha->cache_feat & GDT_64BIT) {
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0)
&& ha->info2 != 0) {
ha->hdr[hdrive].size = ((u64)ha->info2 << 32) | ha->info;
}
}
TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n",
hdrive,ha->hdr[hdrive].size,drv_hds,drv_secs));
/* get informations about device */
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) {
TRACE2(("gdth_search_dr() cache drive %d devtype %d\n",
hdrive,ha->info));
ha->hdr[hdrive].devtype = (u16)ha->info;
}
/* cluster info */
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_CLUST_INFO, hdrive, 0, 0)) {
TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n",
hdrive,ha->info));
if (!shared_access)
ha->hdr[hdrive].cluster_type = (u8)ha->info;
}
/* R/W attributes */
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) {
TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n",
hdrive,ha->info));
ha->hdr[hdrive].rw_attribs = (u8)ha->info;
}
return 1;
}
/* command queueing/sending functions */
static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority)
{
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
register struct scsi_cmnd *pscp;
register struct scsi_cmnd *nscp;
unsigned long flags;
TRACE(("gdth_putq() priority %d\n",priority));
spin_lock_irqsave(&ha->smp_lock, flags);
if (!cmndinfo->internal_command)
cmndinfo->priority = priority;
if (ha->req_first==NULL) {
ha->req_first = scp; /* queue was empty */
scp->SCp.ptr = NULL;
} else { /* queue not empty */
pscp = ha->req_first;
nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
/* priority: 0-highest,..,0xff-lowest */
while (nscp && gdth_cmnd_priv(nscp)->priority <= priority) {
pscp = nscp;
nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
}
pscp->SCp.ptr = (char *)scp;
scp->SCp.ptr = (char *)nscp;
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
#ifdef GDTH_STATISTICS
flags = 0;
for (nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
++flags;
if (max_rq < flags) {
max_rq = flags;
TRACE3(("GDT: max_rq = %d\n",(u16)max_rq));
}
#endif
}
static void gdth_next(gdth_ha_str *ha)
{
register struct scsi_cmnd *pscp;
register struct scsi_cmnd *nscp;
u8 b, t, l, firsttime;
u8 this_cmd, next_cmd;
unsigned long flags = 0;
int cmd_index;
TRACE(("gdth_next() hanum %d\n", ha->hanum));
if (!gdth_polling)
spin_lock_irqsave(&ha->smp_lock, flags);
ha->cmd_cnt = ha->cmd_offs_dpmem = 0;
this_cmd = firsttime = TRUE;
next_cmd = gdth_polling ? FALSE:TRUE;
cmd_index = 0;
for (nscp = pscp = ha->req_first; nscp; nscp = (struct scsi_cmnd *)nscp->SCp.ptr) {
struct gdth_cmndinfo *nscp_cmndinfo = gdth_cmnd_priv(nscp);
if (nscp != pscp && nscp != (struct scsi_cmnd *)pscp->SCp.ptr)
pscp = (struct scsi_cmnd *)pscp->SCp.ptr;
if (!nscp_cmndinfo->internal_command) {
b = nscp->device->channel;
t = nscp->device->id;
l = nscp->device->lun;
if (nscp_cmndinfo->priority >= DEFAULT_PRI) {
if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
(b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock))
continue;
}
} else
b = t = l = 0;
if (firsttime) {
if (gdth_test_busy(ha)) { /* controller busy ? */
TRACE(("gdth_next() controller %d busy !\n", ha->hanum));
if (!gdth_polling) {
spin_unlock_irqrestore(&ha->smp_lock, flags);
return;
}
while (gdth_test_busy(ha))
gdth_delay(1);
}
firsttime = FALSE;
}
if (!nscp_cmndinfo->internal_command) {
if (nscp_cmndinfo->phase == -1) {
nscp_cmndinfo->phase = CACHESERVICE; /* default: cache svc. */
if (nscp->cmnd[0] == TEST_UNIT_READY) {
TRACE2(("TEST_UNIT_READY Bus %d Id %d LUN %d\n",
b, t, l));
/* TEST_UNIT_READY -> set scan mode */
if ((ha->scan_mode & 0x0f) == 0) {
if (b == 0 && t == 0 && l == 0) {
ha->scan_mode |= 1;
TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
}
} else if ((ha->scan_mode & 0x0f) == 1) {
if (b == 0 && ((t == 0 && l == 1) ||
(t == 1 && l == 0))) {
nscp_cmndinfo->OpCode = GDT_SCAN_START;
nscp_cmndinfo->phase = ((ha->scan_mode & 0x10 ? 1:0) << 8)
| SCSIRAWSERVICE;
ha->scan_mode = 0x12;
TRACE2(("Scan mode: 0x%x (SCAN_START)\n",
ha->scan_mode));
} else {
ha->scan_mode &= 0x10;
TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
}
} else if (ha->scan_mode == 0x12) {
if (b == ha->bus_cnt && t == ha->tid_cnt-1) {
nscp_cmndinfo->phase = SCSIRAWSERVICE;
nscp_cmndinfo->OpCode = GDT_SCAN_END;
ha->scan_mode &= 0x10;
TRACE2(("Scan mode: 0x%x (SCAN_END)\n",
ha->scan_mode));
}
}
}
if (b == ha->virt_bus && nscp->cmnd[0] != INQUIRY &&
nscp->cmnd[0] != READ_CAPACITY && nscp->cmnd[0] != MODE_SENSE &&
(ha->hdr[t].cluster_type & CLUSTER_DRIVE)) {
/* always GDT_CLUST_INFO! */
nscp_cmndinfo->OpCode = GDT_CLUST_INFO;
}
}
}
if (nscp_cmndinfo->OpCode != -1) {
if ((nscp_cmndinfo->phase & 0xff) == CACHESERVICE) {
if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
this_cmd = FALSE;
next_cmd = FALSE;
} else if ((nscp_cmndinfo->phase & 0xff) == SCSIRAWSERVICE) {
if (!(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b))))
this_cmd = FALSE;
next_cmd = FALSE;
} else {
memset((char*)nscp->sense_buffer,0,16);
nscp->sense_buffer[0] = 0x70;
nscp->sense_buffer[2] = NOT_READY;
nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
if (!nscp_cmndinfo->wait_for_completion)
nscp_cmndinfo->wait_for_completion++;
else
gdth_scsi_done(nscp);
}
} else if (gdth_cmnd_priv(nscp)->internal_command) {
if (!(cmd_index=gdth_special_cmd(ha, nscp)))
this_cmd = FALSE;
next_cmd = FALSE;
} else if (b != ha->virt_bus) {
if (ha->raw[BUS_L2P(ha,b)].io_cnt[t] >= GDTH_MAX_RAW ||
!(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b))))
this_cmd = FALSE;
else
ha->raw[BUS_L2P(ha,b)].io_cnt[t]++;
} else if (t >= MAX_HDRIVES || !ha->hdr[t].present || l != 0) {
TRACE2(("Command 0x%x to bus %d id %d lun %d -> IGNORE\n",
nscp->cmnd[0], b, t, l));
nscp->result = DID_BAD_TARGET << 16;
if (!nscp_cmndinfo->wait_for_completion)
nscp_cmndinfo->wait_for_completion++;
else
gdth_scsi_done(nscp);
} else {
switch (nscp->cmnd[0]) {
case TEST_UNIT_READY:
case INQUIRY:
case REQUEST_SENSE:
case READ_CAPACITY:
case VERIFY:
case START_STOP:
case MODE_SENSE:
case SERVICE_ACTION_IN_16:
TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
nscp->cmnd[4],nscp->cmnd[5]));
if (ha->hdr[t].media_changed && nscp->cmnd[0] != INQUIRY) {
/* return UNIT_ATTENTION */
TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
nscp->cmnd[0], t));
ha->hdr[t].media_changed = FALSE;
memset((char*)nscp->sense_buffer,0,16);
nscp->sense_buffer[0] = 0x70;
nscp->sense_buffer[2] = UNIT_ATTENTION;
nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
if (!nscp_cmndinfo->wait_for_completion)
nscp_cmndinfo->wait_for_completion++;
else
gdth_scsi_done(nscp);
} else if (gdth_internal_cache_cmd(ha, nscp))
gdth_scsi_done(nscp);
break;
case ALLOW_MEDIUM_REMOVAL:
TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
nscp->cmnd[4],nscp->cmnd[5]));
if ( (nscp->cmnd[4]&1) && !(ha->hdr[t].devtype&1) ) {
TRACE(("Prevent r. nonremov. drive->do nothing\n"));
nscp->result = DID_OK << 16;
nscp->sense_buffer[0] = 0;
if (!nscp_cmndinfo->wait_for_completion)
nscp_cmndinfo->wait_for_completion++;
else
gdth_scsi_done(nscp);
} else {
nscp->cmnd[3] = (ha->hdr[t].devtype&1) ? 1:0;
TRACE(("Prevent/allow r. %d rem. drive %d\n",
nscp->cmnd[4],nscp->cmnd[3]));
if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
this_cmd = FALSE;
}
break;
case RESERVE:
case RELEASE:
TRACE2(("cache cmd %s\n",nscp->cmnd[0] == RESERVE ?
"RESERVE" : "RELEASE"));
if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
this_cmd = FALSE;
break;
case READ_6:
case WRITE_6:
case READ_10:
case WRITE_10:
case READ_16:
case WRITE_16:
if (ha->hdr[t].media_changed) {
/* return UNIT_ATTENTION */
TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
nscp->cmnd[0], t));
ha->hdr[t].media_changed = FALSE;
memset((char*)nscp->sense_buffer,0,16);
nscp->sense_buffer[0] = 0x70;
nscp->sense_buffer[2] = UNIT_ATTENTION;
nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
if (!nscp_cmndinfo->wait_for_completion)
nscp_cmndinfo->wait_for_completion++;
else
gdth_scsi_done(nscp);
} else if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
this_cmd = FALSE;
break;
default:
TRACE2(("cache cmd %x/%x/%x/%x/%x/%x unknown\n",nscp->cmnd[0],
nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
nscp->cmnd[4],nscp->cmnd[5]));
printk("GDT-HA %d: Unknown SCSI command 0x%x to cache service !\n",
ha->hanum, nscp->cmnd[0]);
nscp->result = DID_ABORT << 16;
if (!nscp_cmndinfo->wait_for_completion)
nscp_cmndinfo->wait_for_completion++;
else
gdth_scsi_done(nscp);
break;
}
}
if (!this_cmd)
break;
if (nscp == ha->req_first)
ha->req_first = pscp = (struct scsi_cmnd *)nscp->SCp.ptr;
else
pscp->SCp.ptr = nscp->SCp.ptr;
if (!next_cmd)
break;
}
if (ha->cmd_cnt > 0) {
gdth_release_event(ha);
}
if (!gdth_polling)
spin_unlock_irqrestore(&ha->smp_lock, flags);
if (gdth_polling && ha->cmd_cnt > 0) {
if (!gdth_wait(ha, cmd_index, POLL_TIMEOUT))
printk("GDT-HA %d: Command %d timed out !\n",
ha->hanum, cmd_index);
}
}
/*
* gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's
* buffers, kmap_atomic() as needed.
*/
static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
char *buffer, u16 count)
{
u16 cpcount,i, max_sg = scsi_sg_count(scp);
u16 cpsum,cpnow;
struct scatterlist *sl;
char *address;
cpcount = min_t(u16, count, scsi_bufflen(scp));
if (cpcount) {
cpsum=0;
scsi_for_each_sg(scp, sl, max_sg, i) {
unsigned long flags;
cpnow = (u16)sl->length;
TRACE(("copy_internal() now %d sum %d count %d %d\n",
cpnow, cpsum, cpcount, scsi_bufflen(scp)));
if (cpsum+cpnow > cpcount)
cpnow = cpcount - cpsum;
cpsum += cpnow;
if (!sg_page(sl)) {
printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n",
ha->hanum);
return;
}
local_irq_save(flags);
address = kmap_atomic(sg_page(sl)) + sl->offset;
memcpy(address, buffer, cpnow);
flush_dcache_page(sg_page(sl));
kunmap_atomic(address);
local_irq_restore(flags);
if (cpsum == cpcount)
break;
buffer += cpnow;
}
} else if (count) {
printk("GDT-HA %d: SCSI command with no buffers but data transfer expected!\n",
ha->hanum);
WARN_ON(1);
}
}
static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
{
u8 t;
gdth_inq_data inq;
gdth_rdcap_data rdc;
gdth_sense_data sd;
gdth_modep_data mpd;
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
t = scp->device->id;
TRACE(("gdth_internal_cache_cmd() cmd 0x%x hdrive %d\n",
scp->cmnd[0],t));
scp->result = DID_OK << 16;
scp->sense_buffer[0] = 0;
switch (scp->cmnd[0]) {
case TEST_UNIT_READY:
case VERIFY:
case START_STOP:
TRACE2(("Test/Verify/Start hdrive %d\n",t));
break;
case INQUIRY:
TRACE2(("Inquiry hdrive %d devtype %d\n",
t,ha->hdr[t].devtype));
inq.type_qual = (ha->hdr[t].devtype&4) ? TYPE_ROM:TYPE_DISK;
/* you can here set all disks to removable, if you want to do
a flush using the ALLOW_MEDIUM_REMOVAL command */
inq.modif_rmb = 0x00;
if ((ha->hdr[t].devtype & 1) ||
(ha->hdr[t].cluster_type & CLUSTER_DRIVE))
inq.modif_rmb = 0x80;
inq.version = 2;
inq.resp_aenc = 2;
inq.add_length= 32;
strcpy(inq.vendor,ha->oem_name);
snprintf(inq.product, sizeof(inq.product), "Host Drive #%02d",t);
strcpy(inq.revision," ");
gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data));
break;
case REQUEST_SENSE:
TRACE2(("Request sense hdrive %d\n",t));
sd.errorcode = 0x70;
sd.segno = 0x00;
sd.key = NO_SENSE;
sd.info = 0;
sd.add_length= 0;
gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data));
break;
case MODE_SENSE:
TRACE2(("Mode sense hdrive %d\n",t));
memset((char*)&mpd,0,sizeof(gdth_modep_data));
mpd.hd.data_length = sizeof(gdth_modep_data);
mpd.hd.dev_par = (ha->hdr[t].devtype&2) ? 0x80:0;
mpd.hd.bd_length = sizeof(mpd.bd);
mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16;
mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8;
mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff);
gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data));
break;
case READ_CAPACITY:
TRACE2(("Read capacity hdrive %d\n",t));
if (ha->hdr[t].size > (u64)0xffffffff)
rdc.last_block_no = 0xffffffff;
else
rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
rdc.block_length = cpu_to_be32(SECTOR_SIZE);
gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data));
break;
case SERVICE_ACTION_IN_16:
if ((scp->cmnd[1] & 0x1f) == SAI_READ_CAPACITY_16 &&
(ha->cache_feat & GDT_64BIT)) {
gdth_rdcap16_data rdc16;
TRACE2(("Read capacity (16) hdrive %d\n",t));
rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1);
rdc16.block_length = cpu_to_be32(SECTOR_SIZE);
gdth_copy_internal_data(ha, scp, (char*)&rdc16,
sizeof(gdth_rdcap16_data));
} else {
scp->result = DID_ABORT << 16;
}
break;
default:
TRACE2(("Internal cache cmd 0x%x unknown\n",scp->cmnd[0]));
break;
}
if (!cmndinfo->wait_for_completion)
cmndinfo->wait_for_completion++;
else
return 1;
return 0;
}
static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
u16 hdrive)
{
register gdth_cmd_str *cmdp;
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
u32 cnt, blockcnt;
u64 no, blockno;
int i, cmd_index, read_write, sgcnt, mode64;
cmdp = ha->pccb;
TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n",
scp->cmnd[0],scp->cmd_len,hdrive));
mode64 = (ha->cache_feat & GDT_64BIT) ? TRUE : FALSE;
/* test for READ_16, WRITE_16 if !mode64 ? ---
not required, should not occur due to error return on
READ_CAPACITY_16 */
cmdp->Service = CACHESERVICE;
cmdp->RequestBuffer = scp;
/* search free command index */
if (!(cmd_index=gdth_get_cmd_index(ha))) {
TRACE(("GDT: No free command index found\n"));
return 0;
}
/* if it's the first command, set command semaphore */
if (ha->cmd_cnt == 0)
gdth_set_sema0(ha);
/* fill command */
read_write = 0;
if (cmndinfo->OpCode != -1)
cmdp->OpCode = cmndinfo->OpCode; /* special cache cmd. */
else if (scp->cmnd[0] == RESERVE)
cmdp->OpCode = GDT_RESERVE_DRV;
else if (scp->cmnd[0] == RELEASE)
cmdp->OpCode = GDT_RELEASE_DRV;
else if (scp->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
if (scp->cmnd[4] & 1) /* prevent ? */
cmdp->OpCode = GDT_MOUNT;
else if (scp->cmnd[3] & 1) /* removable drive ? */
cmdp->OpCode = GDT_UNMOUNT;
else
cmdp->OpCode = GDT_FLUSH;
} else if (scp->cmnd[0] == WRITE_6 || scp->cmnd[0] == WRITE_10 ||
scp->cmnd[0] == WRITE_12 || scp->cmnd[0] == WRITE_16
) {
read_write = 1;
if (gdth_write_through || ((ha->hdr[hdrive].rw_attribs & 1) &&
(ha->cache_feat & GDT_WR_THROUGH)))
cmdp->OpCode = GDT_WRITE_THR;
else
cmdp->OpCode = GDT_WRITE;
} else {
read_write = 2;
cmdp->OpCode = GDT_READ;
}
cmdp->BoardNode = LOCALBOARD;
if (mode64) {
cmdp->u.cache64.DeviceNo = hdrive;
cmdp->u.cache64.BlockNo = 1;
cmdp->u.cache64.sg_canz = 0;
} else {
cmdp->u.cache.DeviceNo = hdrive;
cmdp->u.cache.BlockNo = 1;
cmdp->u.cache.sg_canz = 0;
}
if (read_write) {
if (scp->cmd_len == 16) {
memcpy(&no, &scp->cmnd[2], sizeof(u64));
blockno = be64_to_cpu(no);
memcpy(&cnt, &scp->cmnd[10], sizeof(u32));
blockcnt = be32_to_cpu(cnt);
} else if (scp->cmd_len == 10) {
memcpy(&no, &scp->cmnd[2], sizeof(u32));
blockno = be32_to_cpu(no);
memcpy(&cnt, &scp->cmnd[7], sizeof(u16));
blockcnt = be16_to_cpu(cnt);
} else {
memcpy(&no, &scp->cmnd[0], sizeof(u32));
blockno = be32_to_cpu(no) & 0x001fffffUL;
blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4];
}
if (mode64) {
cmdp->u.cache64.BlockNo = blockno;
cmdp->u.cache64.BlockCnt = blockcnt;
} else {
cmdp->u.cache.BlockNo = (u32)blockno;
cmdp->u.cache.BlockCnt = blockcnt;
}
if (scsi_bufflen(scp)) {
cmndinfo->dma_dir = (read_write == 1 ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
scsi_sg_count(scp), cmndinfo->dma_dir);
if (mode64) {
struct scatterlist *sl;
cmdp->u.cache64.DestAddr= (u64)-1;
cmdp->u.cache64.sg_canz = sgcnt;
scsi_for_each_sg(scp, sl, sgcnt, i) {
cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl);
cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl);
}
} else {
struct scatterlist *sl;
cmdp->u.cache.DestAddr= 0xffffffff;
cmdp->u.cache.sg_canz = sgcnt;
scsi_for_each_sg(scp, sl, sgcnt, i) {
cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl);
cmdp->u.cache.sg_lst[i].sg_len = sg_dma_len(sl);
}
}
#ifdef GDTH_STATISTICS
if (max_sg < (u32)sgcnt) {
max_sg = (u32)sgcnt;
TRACE3(("GDT: max_sg = %d\n",max_sg));
}
#endif
}
}
/* evaluate command size, check space */
if (mode64) {
TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
cmdp->u.cache64.DestAddr,cmdp->u.cache64.sg_canz,
cmdp->u.cache64.sg_lst[0].sg_ptr,
cmdp->u.cache64.sg_lst[0].sg_len));
TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt));
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) +
(u16)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str);
} else {
TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz,
cmdp->u.cache.sg_lst[0].sg_ptr,
cmdp->u.cache.sg_lst[0].sg_len));
TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt));
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) +
(u16)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
}
if (ha->cmd_len & 3)
ha->cmd_len += (4 - (ha->cmd_len & 3));
if (ha->cmd_cnt > 0) {
if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
ha->ic_all_size) {
TRACE2(("gdth_fill_cache() DPMEM overflow\n"));
ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
return 0;
}
}
/* copy command */
gdth_copy_command(ha);
return cmd_index;
}
static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
{
register gdth_cmd_str *cmdp;
u16 i;
dma_addr_t sense_paddr;
int cmd_index, sgcnt, mode64;
u8 t,l;
struct gdth_cmndinfo *cmndinfo;
t = scp->device->id;
l = scp->device->lun;
cmdp = ha->pccb;
TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n",
scp->cmnd[0],b,t,l));
mode64 = (ha->raw_feat & GDT_64BIT) ? TRUE : FALSE;
cmdp->Service = SCSIRAWSERVICE;
cmdp->RequestBuffer = scp;
/* search free command index */
if (!(cmd_index=gdth_get_cmd_index(ha))) {
TRACE(("GDT: No free command index found\n"));
return 0;
}
/* if it's the first command, set command semaphore */
if (ha->cmd_cnt == 0)
gdth_set_sema0(ha);
cmndinfo = gdth_cmnd_priv(scp);
/* fill command */
if (cmndinfo->OpCode != -1) {
cmdp->OpCode = cmndinfo->OpCode; /* special raw cmd. */
cmdp->BoardNode = LOCALBOARD;
if (mode64) {
cmdp->u.raw64.direction = (cmndinfo->phase >> 8);
TRACE2(("special raw cmd 0x%x param 0x%x\n",
cmdp->OpCode, cmdp->u.raw64.direction));
/* evaluate command size */
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst);
} else {
cmdp->u.raw.direction = (cmndinfo->phase >> 8);
TRACE2(("special raw cmd 0x%x param 0x%x\n",
cmdp->OpCode, cmdp->u.raw.direction));
/* evaluate command size */
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst);
}
} else {
sense_paddr = dma_map_single(&ha->pdev->dev, scp->sense_buffer, 16,
DMA_FROM_DEVICE);
cmndinfo->sense_paddr = sense_paddr;
cmdp->OpCode = GDT_WRITE; /* always */
cmdp->BoardNode = LOCALBOARD;
if (mode64) {
cmdp->u.raw64.reserved = 0;
cmdp->u.raw64.mdisc_time = 0;
cmdp->u.raw64.mcon_time = 0;
cmdp->u.raw64.clen = scp->cmd_len;
cmdp->u.raw64.target = t;
cmdp->u.raw64.lun = l;
cmdp->u.raw64.bus = b;
cmdp->u.raw64.priority = 0;
cmdp->u.raw64.sdlen = scsi_bufflen(scp);
cmdp->u.raw64.sense_len = 16;
cmdp->u.raw64.sense_data = sense_paddr;
cmdp->u.raw64.direction =
gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
memcpy(cmdp->u.raw64.cmd,scp->cmnd,16);
cmdp->u.raw64.sg_ranz = 0;
} else {
cmdp->u.raw.reserved = 0;
cmdp->u.raw.mdisc_time = 0;
cmdp->u.raw.mcon_time = 0;
cmdp->u.raw.clen = scp->cmd_len;
cmdp->u.raw.target = t;
cmdp->u.raw.lun = l;
cmdp->u.raw.bus = b;
cmdp->u.raw.priority = 0;
cmdp->u.raw.link_p = 0;
cmdp->u.raw.sdlen = scsi_bufflen(scp);
cmdp->u.raw.sense_len = 16;
cmdp->u.raw.sense_data = sense_paddr;
cmdp->u.raw.direction =
gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
memcpy(cmdp->u.raw.cmd,scp->cmnd,12);
cmdp->u.raw.sg_ranz = 0;
}
if (scsi_bufflen(scp)) {
cmndinfo->dma_dir = DMA_BIDIRECTIONAL;
sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
scsi_sg_count(scp), cmndinfo->dma_dir);
if (mode64) {
struct scatterlist *sl;
cmdp->u.raw64.sdata = (u64)-1;
cmdp->u.raw64.sg_ranz = sgcnt;
scsi_for_each_sg(scp, sl, sgcnt, i) {
cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl);
cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl);
}
} else {
struct scatterlist *sl;
cmdp->u.raw.sdata = 0xffffffff;
cmdp->u.raw.sg_ranz = sgcnt;
scsi_for_each_sg(scp, sl, sgcnt, i) {
cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl);
cmdp->u.raw.sg_lst[i].sg_len = sg_dma_len(sl);
}
}
#ifdef GDTH_STATISTICS
if (max_sg < sgcnt) {
max_sg = sgcnt;
TRACE3(("GDT: max_sg = %d\n",sgcnt));
}
#endif
}
if (mode64) {
TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
cmdp->u.raw64.sdata,cmdp->u.raw64.sg_ranz,
cmdp->u.raw64.sg_lst[0].sg_ptr,
cmdp->u.raw64.sg_lst[0].sg_len));
/* evaluate command size */
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) +
(u16)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str);
} else {
TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz,
cmdp->u.raw.sg_lst[0].sg_ptr,
cmdp->u.raw.sg_lst[0].sg_len));
/* evaluate command size */
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) +
(u16)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
}
}
/* check space */
if (ha->cmd_len & 3)
ha->cmd_len += (4 - (ha->cmd_len & 3));
if (ha->cmd_cnt > 0) {
if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
ha->ic_all_size) {
TRACE2(("gdth_fill_raw() DPMEM overflow\n"));
ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
return 0;
}
}
/* copy command */
gdth_copy_command(ha);
return cmd_index;
}
static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
{
register gdth_cmd_str *cmdp;
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
int cmd_index;
cmdp= ha->pccb;
TRACE2(("gdth_special_cmd(): "));
*cmdp = *cmndinfo->internal_cmd_str;
cmdp->RequestBuffer = scp;
/* search free command index */
if (!(cmd_index=gdth_get_cmd_index(ha))) {
TRACE(("GDT: No free command index found\n"));
return 0;
}
/* if it's the first command, set command semaphore */
if (ha->cmd_cnt == 0)
gdth_set_sema0(ha);
/* evaluate command size, check space */
if (cmdp->OpCode == GDT_IOCTL) {
TRACE2(("IOCTL\n"));
ha->cmd_len =
GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(u64);
} else if (cmdp->Service == CACHESERVICE) {
TRACE2(("cache command %d\n",cmdp->OpCode));
if (ha->cache_feat & GDT_64BIT)
ha->cmd_len =
GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + sizeof(gdth_sg64_str);
else
ha->cmd_len =
GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + sizeof(gdth_sg_str);
} else if (cmdp->Service == SCSIRAWSERVICE) {
TRACE2(("raw command %d\n",cmdp->OpCode));
if (ha->raw_feat & GDT_64BIT)
ha->cmd_len =
GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + sizeof(gdth_sg64_str);
else
ha->cmd_len =
GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + sizeof(gdth_sg_str);
}
if (ha->cmd_len & 3)
ha->cmd_len += (4 - (ha->cmd_len & 3));
if (ha->cmd_cnt > 0) {
if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
ha->ic_all_size) {
TRACE2(("gdth_special_cmd() DPMEM overflow\n"));
ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
return 0;
}
}
/* copy command */
gdth_copy_command(ha);
return cmd_index;
}
/* Controller event handling functions */
static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
u16 idx, gdth_evt_data *evt)
{
gdth_evt_str *e;
/* no GDTH_LOCK_HA() ! */
TRACE2(("gdth_store_event() source %d idx %d\n", source, idx));
if (source == 0) /* no source -> no event */
return NULL;
if (ebuffer[elastidx].event_source == source &&
ebuffer[elastidx].event_idx == idx &&
((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
!memcmp((char *)&ebuffer[elastidx].event_data.eu,
(char *)&evt->eu, evt->size)) ||
(evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
!strcmp((char *)&ebuffer[elastidx].event_data.event_string,
(char *)&evt->event_string)))) {
e = &ebuffer[elastidx];
e->last_stamp = (u32)ktime_get_real_seconds();
++e->same_count;
} else {
if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
++elastidx;
if (elastidx == MAX_EVENTS)
elastidx = 0;
if (elastidx == eoldidx) { /* reached mark ? */
++eoldidx;
if (eoldidx == MAX_EVENTS)
eoldidx = 0;
}
}
e = &ebuffer[elastidx];
e->event_source = source;
e->event_idx = idx;
e->first_stamp = e->last_stamp = (u32)ktime_get_real_seconds();
e->same_count = 1;
e->event_data = *evt;
e->application = 0;
}
return e;
}
static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
{
gdth_evt_str *e;
int eindex;
unsigned long flags;
TRACE2(("gdth_read_event() handle %d\n", handle));
spin_lock_irqsave(&ha->smp_lock, flags);
if (handle == -1)
eindex = eoldidx;
else
eindex = handle;
estr->event_source = 0;
if (eindex < 0 || eindex >= MAX_EVENTS) {
spin_unlock_irqrestore(&ha->smp_lock, flags);
return eindex;
}
e = &ebuffer[eindex];
if (e->event_source != 0) {
if (eindex != elastidx) {
if (++eindex == MAX_EVENTS)
eindex = 0;
} else {
eindex = -1;
}
memcpy(estr, e, sizeof(gdth_evt_str));
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
return eindex;
}
static void gdth_readapp_event(gdth_ha_str *ha,
u8 application, gdth_evt_str *estr)
{
gdth_evt_str *e;
int eindex;
unsigned long flags;
u8 found = FALSE;
TRACE2(("gdth_readapp_event() app. %d\n", application));
spin_lock_irqsave(&ha->smp_lock, flags);
eindex = eoldidx;
for (;;) {
e = &ebuffer[eindex];
if (e->event_source == 0)
break;
if ((e->application & application) == 0) {
e->application |= application;
found = TRUE;
break;
}
if (eindex == elastidx)
break;
if (++eindex == MAX_EVENTS)
eindex = 0;
}
if (found)
memcpy(estr, e, sizeof(gdth_evt_str));
else
estr->event_source = 0;
spin_unlock_irqrestore(&ha->smp_lock, flags);
}
static void gdth_clear_events(void)
{
TRACE(("gdth_clear_events()"));
eoldidx = elastidx = 0;
ebuffer[0].event_source = 0;
}
/* SCSI interface functions */
static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
int gdth_from_wait, int* pIndex)
{
gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
gdt6_dpram_str __iomem *dp6_ptr;
struct scsi_cmnd *scp;
int rval, i;
u8 IStatus;
u16 Service;
unsigned long flags = 0;
TRACE(("gdth_interrupt() IRQ %d\n", ha->irq));
/* if polling and not from gdth_wait() -> return */
if (gdth_polling) {
if (!gdth_from_wait) {
return IRQ_HANDLED;
}
}
if (!gdth_polling)
spin_lock_irqsave(&ha->smp_lock, flags);
/* search controller */
IStatus = gdth_get_status(ha);
if (IStatus == 0) {
/* spurious interrupt */
if (!gdth_polling)
spin_unlock_irqrestore(&ha->smp_lock, flags);
return IRQ_HANDLED;
}
#ifdef GDTH_STATISTICS
++act_ints;
#endif
if (ha->type == GDT_PCI) {
dp6_ptr = ha->brd;
if (IStatus & 0x80) { /* error flag */
IStatus &= ~0x80;
ha->status = readw(&dp6_ptr->u.ic.Status);
TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
} else /* no error */
ha->status = S_OK;
ha->info = readl(&dp6_ptr->u.ic.Info[0]);
ha->service = readw(&dp6_ptr->u.ic.Service);
ha->info2 = readl(&dp6_ptr->u.ic.Info[1]);
writeb(0xff, &dp6_ptr->io.irqdel); /* acknowledge interrupt */
writeb(0, &dp6_ptr->u.ic.Cmd_Index);/* reset command index */
writeb(0, &dp6_ptr->io.Sema1); /* reset status semaphore */
} else if (ha->type == GDT_PCINEW) {
if (IStatus & 0x80) { /* error flag */
IStatus &= ~0x80;
ha->status = inw(PTR2USHORT(&ha->plx->status));
TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
} else
ha->status = S_OK;
ha->info = inl(PTR2USHORT(&ha->plx->info[0]));
ha->service = inw(PTR2USHORT(&ha->plx->service));
ha->info2 = inl(PTR2USHORT(&ha->plx->info[1]));
outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
outb(0x00, PTR2USHORT(&ha->plx->sema1_reg));
} else if (ha->type == GDT_PCIMPR) {
dp6m_ptr = ha->brd;
if (IStatus & 0x80) { /* error flag */
IStatus &= ~0x80;
ha->status = readw(&dp6m_ptr->i960r.status);
TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
} else /* no error */
ha->status = S_OK;
ha->info = readl(&dp6m_ptr->i960r.info[0]);
ha->service = readw(&dp6m_ptr->i960r.service);
ha->info2 = readl(&dp6m_ptr->i960r.info[1]);
/* event string */
if (IStatus == ASYNCINDEX) {
if (ha->service != SCREENSERVICE &&
(ha->fw_vers & 0xff) >= 0x1a) {
ha->dvr.severity = readb
(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.severity);
for (i = 0; i < 256; ++i) {
ha->dvr.event_string[i] = readb
(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.evt_str[i]);
if (ha->dvr.event_string[i] == 0)
break;
}
}
}
writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
writeb(0, &dp6m_ptr->i960r.sema1_reg);
} else {
TRACE2(("gdth_interrupt() unknown controller type\n"));
if (!gdth_polling)
spin_unlock_irqrestore(&ha->smp_lock, flags);
return IRQ_HANDLED;
}
TRACE(("gdth_interrupt() index %d stat %d info %d\n",
IStatus,ha->status,ha->info));
if (gdth_from_wait) {
*pIndex = (int)IStatus;
}
if (IStatus == ASYNCINDEX) {
TRACE2(("gdth_interrupt() async. event\n"));
gdth_async_event(ha);
if (!gdth_polling)
spin_unlock_irqrestore(&ha->smp_lock, flags);
gdth_next(ha);
return IRQ_HANDLED;
}
if (IStatus == SPEZINDEX) {
TRACE2(("Service unknown or not initialized !\n"));
ha->dvr.size = sizeof(ha->dvr.eu.driver);
ha->dvr.eu.driver.ionode = ha->hanum;
gdth_store_event(ha, ES_DRIVER, 4, &ha->dvr);
if (!gdth_polling)
spin_unlock_irqrestore(&ha->smp_lock, flags);
return IRQ_HANDLED;
}
scp = ha->cmd_tab[IStatus-2].cmnd;
Service = ha->cmd_tab[IStatus-2].service;
ha->cmd_tab[IStatus-2].cmnd = UNUSED_CMND;
if (scp == UNUSED_CMND) {
TRACE2(("gdth_interrupt() index to unused command (%d)\n",IStatus));
ha->dvr.size = sizeof(ha->dvr.eu.driver);
ha->dvr.eu.driver.ionode = ha->hanum;
ha->dvr.eu.driver.index = IStatus;
gdth_store_event(ha, ES_DRIVER, 1, &ha->dvr);
if (!gdth_polling)
spin_unlock_irqrestore(&ha->smp_lock, flags);
return IRQ_HANDLED;
}
if (scp == INTERNAL_CMND) {
TRACE(("gdth_interrupt() answer to internal command\n"));
if (!gdth_polling)
spin_unlock_irqrestore(&ha->smp_lock, flags);
return IRQ_HANDLED;
}
TRACE(("gdth_interrupt() sync. status\n"));
rval = gdth_sync_event(ha,Service,IStatus,scp);
if (!gdth_polling)
spin_unlock_irqrestore(&ha->smp_lock, flags);
if (rval == 2) {
gdth_putq(ha, scp, gdth_cmnd_priv(scp)->priority);
} else if (rval == 1) {
gdth_scsi_done(scp);
}
gdth_next(ha);
return IRQ_HANDLED;
}
static irqreturn_t gdth_interrupt(int irq, void *dev_id)
{
gdth_ha_str *ha = dev_id;
return __gdth_interrupt(ha, false, NULL);
}
static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
struct scsi_cmnd *scp)
{
gdth_msg_str *msg;
gdth_cmd_str *cmdp;
u8 b, t;
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
cmdp = ha->pccb;
TRACE(("gdth_sync_event() serv %d status %d\n",
service,ha->status));
if (service == SCREENSERVICE) {
msg = ha->pmsg;
TRACE(("len: %d, answer: %d, ext: %d, alen: %d\n",
msg->msg_len,msg->msg_answer,msg->msg_ext,msg->msg_alen));
if (msg->msg_len > MSGLEN+1)
msg->msg_len = MSGLEN+1;
if (msg->msg_len)
if (!(msg->msg_answer && msg->msg_ext)) {
msg->msg_text[msg->msg_len] = '\0';
printk("%s",msg->msg_text);
}
if (msg->msg_ext && !msg->msg_answer) {
while (gdth_test_busy(ha))
gdth_delay(0);
cmdp->Service = SCREENSERVICE;
cmdp->RequestBuffer = SCREEN_CMND;
gdth_get_cmd_index(ha);
gdth_set_sema0(ha);
cmdp->OpCode = GDT_READ;
cmdp->BoardNode = LOCALBOARD;
cmdp->u.screen.reserved = 0;
cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
ha->cmd_offs_dpmem = 0;
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
+ sizeof(u64);
ha->cmd_cnt = 0;
gdth_copy_command(ha);
gdth_release_event(ha);
return 0;
}
if (msg->msg_answer && msg->msg_alen) {
/* default answers (getchar() not possible) */
if (msg->msg_alen == 1) {
msg->msg_alen = 0;
msg->msg_len = 1;
msg->msg_text[0] = 0;
} else {
msg->msg_alen -= 2;
msg->msg_len = 2;
msg->msg_text[0] = 1;
msg->msg_text[1] = 0;
}
msg->msg_ext = 0;
msg->msg_answer = 0;
while (gdth_test_busy(ha))
gdth_delay(0);
cmdp->Service = SCREENSERVICE;
cmdp->RequestBuffer = SCREEN_CMND;
gdth_get_cmd_index(ha);
gdth_set_sema0(ha);
cmdp->OpCode = GDT_WRITE;
cmdp->BoardNode = LOCALBOARD;
cmdp->u.screen.reserved = 0;
cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
ha->cmd_offs_dpmem = 0;
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
+ sizeof(u64);
ha->cmd_cnt = 0;
gdth_copy_command(ha);
gdth_release_event(ha);
return 0;
}
printk("\n");
} else {
b = scp->device->channel;
t = scp->device->id;
if (cmndinfo->OpCode == -1 && b != ha->virt_bus) {
ha->raw[BUS_L2P(ha,b)].io_cnt[t]--;
}
/* cache or raw service */
if (ha->status == S_BSY) {
TRACE2(("Controller busy -> retry !\n"));
if (cmndinfo->OpCode == GDT_MOUNT)
cmndinfo->OpCode = GDT_CLUST_INFO;
/* retry */
return 2;
}
if (scsi_bufflen(scp))
dma_unmap_sg(&ha->pdev->dev, scsi_sglist(scp), scsi_sg_count(scp),
cmndinfo->dma_dir);
if (cmndinfo->sense_paddr)
dma_unmap_page(&ha->pdev->dev, cmndinfo->sense_paddr, 16,
DMA_FROM_DEVICE);
if (ha->status == S_OK) {
cmndinfo->status = S_OK;
cmndinfo->info = ha->info;
if (cmndinfo->OpCode != -1) {
TRACE2(("gdth_sync_event(): special cmd 0x%x OK\n",
cmndinfo->OpCode));
/* special commands GDT_CLUST_INFO/GDT_MOUNT ? */
if (cmndinfo->OpCode == GDT_CLUST_INFO) {
ha->hdr[t].cluster_type = (u8)ha->info;
if (!(ha->hdr[t].cluster_type &
CLUSTER_MOUNTED)) {
/* NOT MOUNTED -> MOUNT */
cmndinfo->OpCode = GDT_MOUNT;
if (ha->hdr[t].cluster_type &
CLUSTER_RESERVED) {
/* cluster drive RESERVED (on the other node) */
cmndinfo->phase = -2; /* reservation conflict */
}
} else {
cmndinfo->OpCode = -1;
}
} else {
if (cmndinfo->OpCode == GDT_MOUNT) {
ha->hdr[t].cluster_type |= CLUSTER_MOUNTED;
ha->hdr[t].media_changed = TRUE;
} else if (cmndinfo->OpCode == GDT_UNMOUNT) {
ha->hdr[t].cluster_type &= ~CLUSTER_MOUNTED;
ha->hdr[t].media_changed = TRUE;
}
cmndinfo->OpCode = -1;
}
/* retry */
cmndinfo->priority = HIGH_PRI;
return 2;
} else {
/* RESERVE/RELEASE ? */
if (scp->cmnd[0] == RESERVE) {
ha->hdr[t].cluster_type |= CLUSTER_RESERVED;
} else if (scp->cmnd[0] == RELEASE) {
ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
}
scp->result = DID_OK << 16;
scp->sense_buffer[0] = 0;
}
} else {
cmndinfo->status = ha->status;
cmndinfo->info = ha->info;
if (cmndinfo->OpCode != -1) {
TRACE2(("gdth_sync_event(): special cmd 0x%x error 0x%x\n",
cmndinfo->OpCode, ha->status));
if (cmndinfo->OpCode == GDT_SCAN_START ||
cmndinfo->OpCode == GDT_SCAN_END) {
cmndinfo->OpCode = -1;
/* retry */
cmndinfo->priority = HIGH_PRI;
return 2;
}
memset((char*)scp->sense_buffer,0,16);
scp->sense_buffer[0] = 0x70;
scp->sense_buffer[2] = NOT_READY;
scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
} else if (service == CACHESERVICE) {
if (ha->status == S_CACHE_UNKNOWN &&
(ha->hdr[t].cluster_type &
CLUSTER_RESERVE_STATE) == CLUSTER_RESERVE_STATE) {
/* bus reset -> force GDT_CLUST_INFO */
ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
}
memset((char*)scp->sense_buffer,0,16);
if (ha->status == (u16)S_CACHE_RESERV) {
scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1);
} else {
scp->sense_buffer[0] = 0x70;
scp->sense_buffer[2] = NOT_READY;
scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
}
if (!cmndinfo->internal_command) {
ha->dvr.size = sizeof(ha->dvr.eu.sync);
ha->dvr.eu.sync.ionode = ha->hanum;
ha->dvr.eu.sync.service = service;
ha->dvr.eu.sync.status = ha->status;
ha->dvr.eu.sync.info = ha->info;
ha->dvr.eu.sync.hostdrive = t;
if (ha->status >= 0x8000)
gdth_store_event(ha, ES_SYNC, 0, &ha->dvr);
else
gdth_store_event(ha, ES_SYNC, service, &ha->dvr);
}
} else {
/* sense buffer filled from controller firmware (DMA) */
if (ha->status != S_RAW_SCSI || ha->info >= 0x100) {
scp->result = DID_BAD_TARGET << 16;
} else {
scp->result = (DID_OK << 16) | ha->info;
}
}
}
if (!cmndinfo->wait_for_completion)
cmndinfo->wait_for_completion++;
else
return 1;
}
return 0;
}
static char *async_cache_tab[] = {
/* 0*/ "\011\000\002\002\002\004\002\006\004"
"GDT HA %u, service %u, async. status %u/%lu unknown",
/* 1*/ "\011\000\002\002\002\004\002\006\004"
"GDT HA %u, service %u, async. status %u/%lu unknown",
/* 2*/ "\005\000\002\006\004"
"GDT HA %u, Host Drive %lu not ready",
/* 3*/ "\005\000\002\006\004"
"GDT HA %u, Host Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
/* 4*/ "\005\000\002\006\004"
"GDT HA %u, mirror update on Host Drive %lu failed",
/* 5*/ "\005\000\002\006\004"
"GDT HA %u, Mirror Drive %lu failed",
/* 6*/ "\005\000\002\006\004"
"GDT HA %u, Mirror Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
/* 7*/ "\005\000\002\006\004"
"GDT HA %u, Host Drive %lu write protected",
/* 8*/ "\005\000\002\006\004"
"GDT HA %u, media changed in Host Drive %lu",
/* 9*/ "\005\000\002\006\004"
"GDT HA %u, Host Drive %lu is offline",
/*10*/ "\005\000\002\006\004"
"GDT HA %u, media change of Mirror Drive %lu",
/*11*/ "\005\000\002\006\004"
"GDT HA %u, Mirror Drive %lu is write protected",
/*12*/ "\005\000\002\006\004"
"GDT HA %u, general error on Host Drive %lu. Please check the devices of this drive!",
/*13*/ "\007\000\002\006\002\010\002"
"GDT HA %u, Array Drive %u: Cache Drive %u failed",
/*14*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: FAIL state entered",
/*15*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: error",
/*16*/ "\007\000\002\006\002\010\002"
"GDT HA %u, Array Drive %u: failed drive replaced by Cache Drive %u",
/*17*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: parity build failed",
/*18*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: drive rebuild failed",
/*19*/ "\005\000\002\010\002"
"GDT HA %u, Test of Hot Fix %u failed",
/*20*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: drive build finished successfully",
/*21*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: drive rebuild finished successfully",
/*22*/ "\007\000\002\006\002\010\002"
"GDT HA %u, Array Drive %u: Hot Fix %u activated",
/*23*/ "\005\000\002\006\002"
"GDT HA %u, Host Drive %u: processing of i/o aborted due to serious drive error",
/*24*/ "\005\000\002\010\002"
"GDT HA %u, mirror update on Cache Drive %u completed",
/*25*/ "\005\000\002\010\002"
"GDT HA %u, mirror update on Cache Drive %lu failed",
/*26*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: drive rebuild started",
/*27*/ "\005\000\002\012\001"
"GDT HA %u, Fault bus %u: SHELF OK detected",
/*28*/ "\005\000\002\012\001"
"GDT HA %u, Fault bus %u: SHELF not OK detected",
/*29*/ "\007\000\002\012\001\013\001"
"GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug started",
/*30*/ "\007\000\002\012\001\013\001"
"GDT HA %u, Fault bus %u, ID %u: new disk detected",
/*31*/ "\007\000\002\012\001\013\001"
"GDT HA %u, Fault bus %u, ID %u: old disk detected",
/*32*/ "\007\000\002\012\001\013\001"
"GDT HA %u, Fault bus %u, ID %u: plugging an active disk is invalid",
/*33*/ "\007\000\002\012\001\013\001"
"GDT HA %u, Fault bus %u, ID %u: invalid device detected",
/*34*/ "\011\000\002\012\001\013\001\006\004"
"GDT HA %u, Fault bus %u, ID %u: insufficient disk capacity (%lu MB required)",
/*35*/ "\007\000\002\012\001\013\001"
"GDT HA %u, Fault bus %u, ID %u: disk write protected",
/*36*/ "\007\000\002\012\001\013\001"
"GDT HA %u, Fault bus %u, ID %u: disk not available",
/*37*/ "\007\000\002\012\001\006\004"
"GDT HA %u, Fault bus %u: swap detected (%lu)",
/*38*/ "\007\000\002\012\001\013\001"
"GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug finished successfully",
/*39*/ "\007\000\002\012\001\013\001"
"GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted due to user Hot Plug",
/*40*/ "\007\000\002\012\001\013\001"
"GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted",
/*41*/ "\007\000\002\012\001\013\001"
"GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug for Hot Fix started",
/*42*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: drive build started",
/*43*/ "\003\000\002"
"GDT HA %u, DRAM parity error detected",
/*44*/ "\005\000\002\006\002"
"GDT HA %u, Mirror Drive %u: update started",
/*45*/ "\007\000\002\006\002\010\002"
"GDT HA %u, Mirror Drive %u: Hot Fix %u activated",
/*46*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: no matching Pool Hot Fix Drive available",
/*47*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: Pool Hot Fix Drive available",
/*48*/ "\005\000\002\006\002"
"GDT HA %u, Mirror Drive %u: no matching Pool Hot Fix Drive available",
/*49*/ "\005\000\002\006\002"
"GDT HA %u, Mirror Drive %u: Pool Hot Fix Drive available",
/*50*/ "\007\000\002\012\001\013\001"
"GDT HA %u, SCSI bus %u, ID %u: IGNORE_WIDE_RESIDUE message received",
/*51*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: expand started",
/*52*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: expand finished successfully",
/*53*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: expand failed",
/*54*/ "\003\000\002"
"GDT HA %u, CPU temperature critical",
/*55*/ "\003\000\002"
"GDT HA %u, CPU temperature OK",
/*56*/ "\005\000\002\006\004"
"GDT HA %u, Host drive %lu created",
/*57*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: expand restarted",
/*58*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: expand stopped",
/*59*/ "\005\000\002\010\002"
"GDT HA %u, Mirror Drive %u: drive build quited",
/*60*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: parity build quited",
/*61*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: drive rebuild quited",
/*62*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: parity verify started",
/*63*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: parity verify done",
/*64*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: parity verify failed",
/*65*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: parity error detected",
/*66*/ "\005\000\002\006\002"
"GDT HA %u, Array Drive %u: parity verify quited",
/*67*/ "\005\000\002\006\002"
"GDT HA %u, Host Drive %u reserved",
/*68*/ "\005\000\002\006\002"
"GDT HA %u, Host Drive %u mounted and released",
/*69*/ "\005\000\002\006\002"
"GDT HA %u, Host Drive %u released",
/*70*/ "\003\000\002"
"GDT HA %u, DRAM error detected and corrected with ECC",
/*71*/ "\003\000\002"
"GDT HA %u, Uncorrectable DRAM error detected with ECC",
/*72*/ "\011\000\002\012\001\013\001\014\001"
"GDT HA %u, SCSI bus %u, ID %u, LUN %u: reassigning block",
/*73*/ "\005\000\002\006\002"
"GDT HA %u, Host drive %u resetted locally",
/*74*/ "\005\000\002\006\002"
"GDT HA %u, Host drive %u resetted remotely",
/*75*/ "\003\000\002"
"GDT HA %u, async. status 75 unknown",
};
static int gdth_async_event(gdth_ha_str *ha)
{
gdth_cmd_str *cmdp;
int cmd_index;
cmdp= ha->pccb;
TRACE2(("gdth_async_event() ha %d serv %d\n",
ha->hanum, ha->service));
if (ha->service == SCREENSERVICE) {
if (ha->status == MSG_REQUEST) {
while (gdth_test_busy(ha))
gdth_delay(0);
cmdp->Service = SCREENSERVICE;
cmdp->RequestBuffer = SCREEN_CMND;
cmd_index = gdth_get_cmd_index(ha);
gdth_set_sema0(ha);
cmdp->OpCode = GDT_READ;
cmdp->BoardNode = LOCALBOARD;
cmdp->u.screen.reserved = 0;
cmdp->u.screen.su.msg.msg_handle= MSG_INV_HANDLE;
cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
ha->cmd_offs_dpmem = 0;
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
+ sizeof(u64);
ha->cmd_cnt = 0;
gdth_copy_command(ha);
printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8),
(u16)((ha->brd_phys>>3)&0x1f));
gdth_release_event(ha);
}
} else {
if (ha->type == GDT_PCIMPR &&
(ha->fw_vers & 0xff) >= 0x1a) {
ha->dvr.size = 0;
ha->dvr.eu.async.ionode = ha->hanum;
ha->dvr.eu.async.status = ha->status;
/* severity and event_string already set! */
} else {
ha->dvr.size = sizeof(ha->dvr.eu.async);
ha->dvr.eu.async.ionode = ha->hanum;
ha->dvr.eu.async.service = ha->service;
ha->dvr.eu.async.status = ha->status;
ha->dvr.eu.async.info = ha->info;
*(u32 *)ha->dvr.eu.async.scsi_coord = ha->info2;
}
gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr );
gdth_log_event( &ha->dvr, NULL );
/* new host drive from expand? */
if (ha->service == CACHESERVICE && ha->status == 56) {
TRACE2(("gdth_async_event(): new host drive %d created\n",
(u16)ha->info));
/* gdth_analyse_hdrive(hanum, (u16)ha->info); */
}
}
return 1;
}
static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
{
gdth_stackframe stack;
char *f = NULL;
int i,j;
TRACE2(("gdth_log_event()\n"));
if (dvr->size == 0) {
if (buffer == NULL) {
printk("Adapter %d: %s\n",dvr->eu.async.ionode,dvr->event_string);
} else {
sprintf(buffer,"Adapter %d: %s\n",
dvr->eu.async.ionode,dvr->event_string);
}
} else if (dvr->eu.async.service == CACHESERVICE &&
INDEX_OK(dvr->eu.async.status, async_cache_tab)) {
TRACE2(("GDT: Async. event cache service, event no.: %d\n",
dvr->eu.async.status));
f = async_cache_tab[dvr->eu.async.status];
/* i: parameter to push, j: stack element to fill */
for (j=0,i=1; i < f[0]; i+=2) {
switch (f[i+1]) {
case 4:
stack.b[j++] = *(u32*)&dvr->eu.stream[(int)f[i]];
break;
case 2:
stack.b[j++] = *(u16*)&dvr->eu.stream[(int)f[i]];
break;
case 1:
stack.b[j++] = *(u8*)&dvr->eu.stream[(int)f[i]];
break;
default:
break;
}
}
if (buffer == NULL) {
printk(&f[(int)f[0]],stack);
printk("\n");
} else {
sprintf(buffer,&f[(int)f[0]],stack);
}
} else {
if (buffer == NULL) {
printk("GDT HA %u, Unknown async. event service %d event no. %d\n",
dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status);
} else {
sprintf(buffer,"GDT HA %u, Unknown async. event service %d event no. %d",
dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status);
}
}
}
#ifdef GDTH_STATISTICS
static u8 gdth_timer_running;
static void gdth_timeout(struct timer_list *unused)
{
u32 i;
struct scsi_cmnd *nscp;
gdth_ha_str *ha;
unsigned long flags;
if(unlikely(list_empty(&gdth_instances))) {
gdth_timer_running = 0;
return;
}
ha = list_first_entry(&gdth_instances, gdth_ha_str, list);
spin_lock_irqsave(&ha->smp_lock, flags);
for (act_stats=0,i=0; i<GDTH_MAXCMDS; ++i)
if (ha->cmd_tab[i].cmnd != UNUSED_CMND)
++act_stats;
for (act_rq=0,
nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
++act_rq;
TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n",
act_ints, act_ios, act_stats, act_rq));
act_ints = act_ios = 0;
gdth_timer.expires = jiffies + 30 * HZ;
add_timer(&gdth_timer);
spin_unlock_irqrestore(&ha->smp_lock, flags);
}
static void gdth_timer_init(void)
{
if (gdth_timer_running)
return;
gdth_timer_running = 1;
TRACE2(("gdth_detect(): Initializing timer !\n"));
gdth_timer.expires = jiffies + HZ;
add_timer(&gdth_timer);
}
#else
static inline void gdth_timer_init(void)
{
}
#endif
static void __init internal_setup(char *str,int *ints)
{
int i;
char *cur_str, *argv;
TRACE2(("internal_setup() str %s ints[0] %d\n",
str ? str:"NULL", ints ? ints[0]:0));
/* analyse string */
argv = str;
while (argv && (cur_str = strchr(argv, ':'))) {
int val = 0, c = *++cur_str;
if (c == 'n' || c == 'N')
val = 0;
else if (c == 'y' || c == 'Y')
val = 1;
else
val = (int)simple_strtoul(cur_str, NULL, 0);
if (!strncmp(argv, "disable:", 8))
disable = val;
else if (!strncmp(argv, "reserve_mode:", 13))
reserve_mode = val;
else if (!strncmp(argv, "reverse_scan:", 13))
reverse_scan = val;
else if (!strncmp(argv, "hdr_channel:", 12))
hdr_channel = val;
else if (!strncmp(argv, "max_ids:", 8))
max_ids = val;
else if (!strncmp(argv, "rescan:", 7))
rescan = val;
else if (!strncmp(argv, "shared_access:", 14))
shared_access = val;
else if (!strncmp(argv, "reserve_list:", 13)) {
reserve_list[0] = val;
for (i = 1; i < MAX_RES_ARGS; i++) {
cur_str = strchr(cur_str, ',');
if (!cur_str)
break;
if (!isdigit((int)*++cur_str)) {
--cur_str;
break;
}
reserve_list[i] =
(int)simple_strtoul(cur_str, NULL, 0);
}
if (!cur_str)
break;
argv = ++cur_str;
continue;
}
if ((argv = strchr(argv, ',')))
++argv;
}
}
int __init option_setup(char *str)
{
int ints[MAXHA];
char *cur = str;
int i = 1;
TRACE2(("option_setup() str %s\n", str ? str:"NULL"));
while (cur && isdigit(*cur) && i < MAXHA) {
ints[i++] = simple_strtoul(cur, NULL, 0);
if ((cur = strchr(cur, ',')) != NULL) cur++;
}
ints[0] = i - 1;
internal_setup(cur, ints);
return 1;
}
static const char *gdth_ctr_name(gdth_ha_str *ha)
{
TRACE2(("gdth_ctr_name()\n"));
if (ha->type == GDT_PCI) {
switch (ha->pdev->device) {
case PCI_DEVICE_ID_VORTEX_GDT60x0:
return("GDT6000/6020/6050");
case PCI_DEVICE_ID_VORTEX_GDT6000B:
return("GDT6000B/6010");
}
}
/* new controllers (GDT_PCINEW, GDT_PCIMPR, ..) use board_info IOCTL! */
return("");
}
static const char *gdth_info(struct Scsi_Host *shp)
{
gdth_ha_str *ha = shost_priv(shp);
TRACE2(("gdth_info()\n"));
return ((const char *)ha->binfo.type_string);
}
static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
{
gdth_ha_str *ha = shost_priv(scp->device->host);
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
u8 b, t;
unsigned long flags;
enum blk_eh_timer_return retval = BLK_EH_DONE;
TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
b = scp->device->channel;
t = scp->device->id;
/*
* We don't really honor the command timeout, but we try to
* honor 6 times of the actual command timeout! So reset the
* timer if this is less than 6th timeout on this command!
*/
if (++cmndinfo->timeout_count < 6)
retval = BLK_EH_RESET_TIMER;
/* Reset the timeout if it is locked IO */
spin_lock_irqsave(&ha->smp_lock, flags);
if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
(b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
TRACE2(("%s(): locked IO, reset timeout\n", __func__));
retval = BLK_EH_RESET_TIMER;
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
return retval;
}
static int gdth_eh_bus_reset(struct scsi_cmnd *scp)
{
gdth_ha_str *ha = shost_priv(scp->device->host);
int i;
unsigned long flags;
struct scsi_cmnd *cmnd;
u8 b;
TRACE2(("gdth_eh_bus_reset()\n"));
b = scp->device->channel;
/* clear command tab */
spin_lock_irqsave(&ha->smp_lock, flags);
for (i = 0; i < GDTH_MAXCMDS; ++i) {
cmnd = ha->cmd_tab[i].cmnd;
if (!SPECIAL_SCP(cmnd) && cmnd->device->channel == b)
ha->cmd_tab[i].cmnd = UNUSED_CMND;
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
if (b == ha->virt_bus) {
/* host drives */
for (i = 0; i < MAX_HDRIVES; ++i) {
if (ha->hdr[i].present) {
spin_lock_irqsave(&ha->smp_lock, flags);
gdth_polling = TRUE;
while (gdth_test_busy(ha))
gdth_delay(0);
if (gdth_internal_cmd(ha, CACHESERVICE,
GDT_CLUST_RESET, i, 0, 0))
ha->hdr[i].cluster_type &= ~CLUSTER_RESERVED;
gdth_polling = FALSE;
spin_unlock_irqrestore(&ha->smp_lock, flags);
}
}
} else {
/* raw devices */
spin_lock_irqsave(&ha->smp_lock, flags);
for (i = 0; i < MAXID; ++i)
ha->raw[BUS_L2P(ha,b)].io_cnt[i] = 0;
gdth_polling = TRUE;
while (gdth_test_busy(ha))
gdth_delay(0);
gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESET_BUS,
BUS_L2P(ha,b), 0, 0);
gdth_polling = FALSE;
spin_unlock_irqrestore(&ha->smp_lock, flags);
}
return SUCCESS;
}
static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip)
{
u8 b, t;
gdth_ha_str *ha = shost_priv(sdev->host);
struct scsi_device *sd;
unsigned capacity;
sd = sdev;
capacity = cap;
b = sd->channel;
t = sd->id;
TRACE2(("gdth_bios_param() ha %d bus %d target %d\n", ha->hanum, b, t));
if (b != ha->virt_bus || ha->hdr[t].heads == 0) {
/* raw device or host drive without mapping information */
TRACE2(("Evaluate mapping\n"));
gdth_eval_mapping(capacity,&ip[2],&ip[0],&ip[1]);
} else {
ip[0] = ha->hdr[t].heads;
ip[1] = ha->hdr[t].secs;
ip[2] = capacity / ip[0] / ip[1];
}
TRACE2(("gdth_bios_param(): %d heads, %d secs, %d cyls\n",
ip[0],ip[1],ip[2]));
return 0;
}
static int gdth_queuecommand_lck(struct scsi_cmnd *scp,
void (*done)(struct scsi_cmnd *))
{
gdth_ha_str *ha = shost_priv(scp->device->host);
struct gdth_cmndinfo *cmndinfo;
TRACE(("gdth_queuecommand() cmd 0x%x\n", scp->cmnd[0]));
cmndinfo = gdth_get_cmndinfo(ha);
BUG_ON(!cmndinfo);
scp->scsi_done = done;
cmndinfo->timeout_count = 0;
cmndinfo->priority = DEFAULT_PRI;
return __gdth_queuecommand(ha, scp, cmndinfo);
}
static DEF_SCSI_QCMD(gdth_queuecommand)
static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
struct gdth_cmndinfo *cmndinfo)
{
scp->host_scribble = (unsigned char *)cmndinfo;
cmndinfo->wait_for_completion = 1;
cmndinfo->phase = -1;
cmndinfo->OpCode = -1;
#ifdef GDTH_STATISTICS
++act_ios;
#endif
gdth_putq(ha, scp, cmndinfo->priority);
gdth_next(ha);
return 0;
}
static int gdth_open(struct inode *inode, struct file *filep)
{
gdth_ha_str *ha;
mutex_lock(&gdth_mutex);
list_for_each_entry(ha, &gdth_instances, list) {
if (!ha->sdev)
ha->sdev = scsi_get_host_dev(ha->shost);
}
mutex_unlock(&gdth_mutex);
TRACE(("gdth_open()\n"));
return 0;
}
static int gdth_close(struct inode *inode, struct file *filep)
{
TRACE(("gdth_close()\n"));
return 0;
}
static int ioc_event(void __user *arg)
{
gdth_ioctl_event evt;
gdth_ha_str *ha;
unsigned long flags;
if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event)))
return -EFAULT;
ha = gdth_find_ha(evt.ionode);
if (!ha)
return -EFAULT;
if (evt.erase == 0xff) {
if (evt.event.event_source == ES_TEST)
evt.event.event_data.size=sizeof(evt.event.event_data.eu.test);
else if (evt.event.event_source == ES_DRIVER)
evt.event.event_data.size=sizeof(evt.event.event_data.eu.driver);
else if (evt.event.event_source == ES_SYNC)
evt.event.event_data.size=sizeof(evt.event.event_data.eu.sync);
else
evt.event.event_data.size=sizeof(evt.event.event_data.eu.async);
spin_lock_irqsave(&ha->smp_lock, flags);
gdth_store_event(ha, evt.event.event_source, evt.event.event_idx,
&evt.event.event_data);
spin_unlock_irqrestore(&ha->smp_lock, flags);
} else if (evt.erase == 0xfe) {
gdth_clear_events();
} else if (evt.erase == 0) {
evt.handle = gdth_read_event(ha, evt.handle, &evt.event);
} else {
gdth_readapp_event(ha, evt.erase, &evt.event);
}
if (copy_to_user(arg, &evt, sizeof(gdth_ioctl_event)))
return -EFAULT;
return 0;
}
static int ioc_lockdrv(void __user *arg)
{
gdth_ioctl_lockdrv ldrv;
u8 i, j;
unsigned long flags;
gdth_ha_str *ha;
if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
return -EFAULT;
ha = gdth_find_ha(ldrv.ionode);
if (!ha)
return -EFAULT;
for (i = 0; i < ldrv.drive_cnt && i < MAX_HDRIVES; ++i) {
j = ldrv.drives[i];
if (j >= MAX_HDRIVES || !ha->hdr[j].present)
continue;
if (ldrv.lock) {
spin_lock_irqsave(&ha->smp_lock, flags);
ha->hdr[j].lock = 1;
spin_unlock_irqrestore(&ha->smp_lock, flags);
gdth_wait_completion(ha, ha->bus_cnt, j);
} else {
spin_lock_irqsave(&ha->smp_lock, flags);
ha->hdr[j].lock = 0;
spin_unlock_irqrestore(&ha->smp_lock, flags);
gdth_next(ha);
}
}
return 0;
}
static int ioc_resetdrv(void __user *arg, char *cmnd)
{
gdth_ioctl_reset res;
gdth_cmd_str cmd;
gdth_ha_str *ha;
int rval;
if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
res.number >= MAX_HDRIVES)
return -EFAULT;
ha = gdth_find_ha(res.ionode);
if (!ha)
return -EFAULT;
if (!ha->hdr[res.number].present)
return 0;
memset(&cmd, 0, sizeof(gdth_cmd_str));
cmd.Service = CACHESERVICE;
cmd.OpCode = GDT_CLUST_RESET;
if (ha->cache_feat & GDT_64BIT)
cmd.u.cache64.DeviceNo = res.number;
else
cmd.u.cache.DeviceNo = res.number;
rval = __gdth_execute(ha->sdev, &cmd, cmnd, 30, NULL);
if (rval < 0)
return rval;
res.status = rval;
if (copy_to_user(arg, &res, sizeof(gdth_ioctl_reset)))
return -EFAULT;
return 0;
}
static void gdth_ioc_cacheservice(gdth_ha_str *ha, gdth_ioctl_general *gen,
u64 paddr)
{
if (ha->cache_feat & GDT_64BIT) {
/* copy elements from 32-bit IOCTL structure */
gen->command.u.cache64.BlockCnt = gen->command.u.cache.BlockCnt;
gen->command.u.cache64.BlockNo = gen->command.u.cache.BlockNo;
gen->command.u.cache64.DeviceNo = gen->command.u.cache.DeviceNo;
if (ha->cache_feat & SCATTER_GATHER) {
gen->command.u.cache64.DestAddr = (u64)-1;
gen->command.u.cache64.sg_canz = 1;
gen->command.u.cache64.sg_lst[0].sg_ptr = paddr;
gen->command.u.cache64.sg_lst[0].sg_len = gen->data_len;
gen->command.u.cache64.sg_lst[1].sg_len = 0;
} else {
gen->command.u.cache64.DestAddr = paddr;
gen->command.u.cache64.sg_canz = 0;
}
} else {
if (ha->cache_feat & SCATTER_GATHER) {
gen->command.u.cache.DestAddr = 0xffffffff;
gen->command.u.cache.sg_canz = 1;
gen->command.u.cache.sg_lst[0].sg_ptr = (u32)paddr;
gen->command.u.cache.sg_lst[0].sg_len = gen->data_len;
gen->command.u.cache.sg_lst[1].sg_len = 0;
} else {
gen->command.u.cache.DestAddr = paddr;
gen->command.u.cache.sg_canz = 0;
}
}
}
static void gdth_ioc_scsiraw(gdth_ha_str *ha, gdth_ioctl_general *gen,
u64 paddr)
{
if (ha->raw_feat & GDT_64BIT) {
/* copy elements from 32-bit IOCTL structure */
char cmd[16];
gen->command.u.raw64.sense_len = gen->command.u.raw.sense_len;
gen->command.u.raw64.bus = gen->command.u.raw.bus;
gen->command.u.raw64.lun = gen->command.u.raw.lun;
gen->command.u.raw64.target = gen->command.u.raw.target;
memcpy(cmd, gen->command.u.raw.cmd, 16);
memcpy(gen->command.u.raw64.cmd, cmd, 16);
gen->command.u.raw64.clen = gen->command.u.raw.clen;
gen->command.u.raw64.sdlen = gen->command.u.raw.sdlen;
gen->command.u.raw64.direction = gen->command.u.raw.direction;
/* addresses */
if (ha->raw_feat & SCATTER_GATHER) {
gen->command.u.raw64.sdata = (u64)-1;
gen->command.u.raw64.sg_ranz = 1;
gen->command.u.raw64.sg_lst[0].sg_ptr = paddr;
gen->command.u.raw64.sg_lst[0].sg_len = gen->data_len;
gen->command.u.raw64.sg_lst[1].sg_len = 0;
} else {
gen->command.u.raw64.sdata = paddr;
gen->command.u.raw64.sg_ranz = 0;
}
gen->command.u.raw64.sense_data = paddr + gen->data_len;
} else {
if (ha->raw_feat & SCATTER_GATHER) {
gen->command.u.raw.sdata = 0xffffffff;
gen->command.u.raw.sg_ranz = 1;
gen->command.u.raw.sg_lst[0].sg_ptr = (u32)paddr;
gen->command.u.raw.sg_lst[0].sg_len = gen->data_len;
gen->command.u.raw.sg_lst[1].sg_len = 0;
} else {
gen->command.u.raw.sdata = paddr;
gen->command.u.raw.sg_ranz = 0;
}
gen->command.u.raw.sense_data = (u32)paddr + gen->data_len;
}
}
static int ioc_general(void __user *arg, char *cmnd)
{
gdth_ioctl_general gen;
gdth_ha_str *ha;
char *buf = NULL;
dma_addr_t paddr;
int rval;
if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
return -EFAULT;
ha = gdth_find_ha(gen.ionode);
if (!ha)
return -EFAULT;
if (gen.data_len > INT_MAX)
return -EINVAL;
if (gen.sense_len > INT_MAX)
return -EINVAL;
if (gen.data_len + gen.sense_len > INT_MAX)
return -EINVAL;
if (gen.data_len + gen.sense_len > 0) {
buf = dma_alloc_coherent(&ha->pdev->dev,
gen.data_len + gen.sense_len, &paddr,
GFP_KERNEL);
if (!buf)
return -EFAULT;
rval = -EFAULT;
if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general),
gen.data_len + gen.sense_len))
goto out_free_buf;
if (gen.command.OpCode == GDT_IOCTL)
gen.command.u.ioctl.p_param = paddr;
else if (gen.command.Service == CACHESERVICE)
gdth_ioc_cacheservice(ha, &gen, paddr);
else if (gen.command.Service == SCSIRAWSERVICE)
gdth_ioc_scsiraw(ha, &gen, paddr);
else
goto out_free_buf;
}
rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout,
&gen.info);
if (rval < 0)
goto out_free_buf;
gen.status = rval;
rval = -EFAULT;
if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf,
gen.data_len + gen.sense_len))
goto out_free_buf;
if (copy_to_user(arg, &gen,
sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str)))
goto out_free_buf;
rval = 0;
out_free_buf:
if (buf)
dma_free_coherent(&ha->pdev->dev, gen.data_len + gen.sense_len,
buf, paddr);
return rval;
}
static int ioc_hdrlist(void __user *arg, char *cmnd)
{
gdth_ioctl_rescan *rsc;
gdth_cmd_str *cmd;
gdth_ha_str *ha;
u8 i;
int rc = -ENOMEM;
u32 cluster_type = 0;
rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
if (!rsc || !cmd)
goto free_fail;
if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) ||
(NULL == (ha = gdth_find_ha(rsc->ionode)))) {
rc = -EFAULT;
goto free_fail;
}
memset(cmd, 0, sizeof(gdth_cmd_str));
for (i = 0; i < MAX_HDRIVES; ++i) {
if (!ha->hdr[i].present) {
rsc->hdr_list[i].bus = 0xff;
continue;
}
rsc->hdr_list[i].bus = ha->virt_bus;
rsc->hdr_list[i].target = i;
rsc->hdr_list[i].lun = 0;
rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
if (ha->hdr[i].cluster_type & CLUSTER_DRIVE) {
cmd->Service = CACHESERVICE;
cmd->OpCode = GDT_CLUST_INFO;
if (ha->cache_feat & GDT_64BIT)
cmd->u.cache64.DeviceNo = i;
else
cmd->u.cache.DeviceNo = i;
if (__gdth_execute(ha->sdev, cmd, cmnd, 30, &cluster_type) == S_OK)
rsc->hdr_list[i].cluster_type = cluster_type;
}
}
if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan)))
rc = -EFAULT;
else
rc = 0;
free_fail:
kfree(rsc);
kfree(cmd);
return rc;
}
static int ioc_rescan(void __user *arg, char *cmnd)
{
gdth_ioctl_rescan *rsc;
gdth_cmd_str *cmd;
u16 i, status, hdr_cnt;
u32 info;
int cyls, hds, secs;
int rc = -ENOMEM;
unsigned long flags;
gdth_ha_str *ha;
rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd || !rsc)
goto free_fail;
if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) ||
(NULL == (ha = gdth_find_ha(rsc->ionode)))) {
rc = -EFAULT;
goto free_fail;
}
memset(cmd, 0, sizeof(gdth_cmd_str));
if (rsc->flag == 0) {
/* old method: re-init. cache service */
cmd->Service = CACHESERVICE;
if (ha->cache_feat & GDT_64BIT) {
cmd->OpCode = GDT_X_INIT_HOST;
cmd->u.cache64.DeviceNo = LINUX_OS;
} else {
cmd->OpCode = GDT_INIT;
cmd->u.cache.DeviceNo = LINUX_OS;
}
status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
i = 0;
hdr_cnt = (status == S_OK ? (u16)info : 0);
} else {
i = rsc->hdr_no;
hdr_cnt = i + 1;
}
for (; i < hdr_cnt && i < MAX_HDRIVES; ++i) {
cmd->Service = CACHESERVICE;
cmd->OpCode = GDT_INFO;
if (ha->cache_feat & GDT_64BIT)
cmd->u.cache64.DeviceNo = i;
else
cmd->u.cache.DeviceNo = i;
status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
spin_lock_irqsave(&ha->smp_lock, flags);
rsc->hdr_list[i].bus = ha->virt_bus;
rsc->hdr_list[i].target = i;
rsc->hdr_list[i].lun = 0;
if (status != S_OK) {
ha->hdr[i].present = FALSE;
} else {
ha->hdr[i].present = TRUE;
ha->hdr[i].size = info;
/* evaluate mapping */
ha->hdr[i].size &= ~SECS32;
gdth_eval_mapping(ha->hdr[i].size,&cyls,&hds,&secs);
ha->hdr[i].heads = hds;
ha->hdr[i].secs = secs;
/* round size */
ha->hdr[i].size = cyls * hds * secs;
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
if (status != S_OK)
continue;
/* extended info, if GDT_64BIT, for drives > 2 TB */
/* but we need ha->info2, not yet stored in scp->SCp */
/* devtype, cluster info, R/W attribs */
cmd->Service = CACHESERVICE;
cmd->OpCode = GDT_DEVTYPE;
if (ha->cache_feat & GDT_64BIT)
cmd->u.cache64.DeviceNo = i;
else
cmd->u.cache.DeviceNo = i;
status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
spin_lock_irqsave(&ha->smp_lock, flags);
ha->hdr[i].devtype = (status == S_OK ? (u16)info : 0);
spin_unlock_irqrestore(&ha->smp_lock, flags);
cmd->Service = CACHESERVICE;
cmd->OpCode = GDT_CLUST_INFO;
if (ha->cache_feat & GDT_64BIT)
cmd->u.cache64.DeviceNo = i;
else
cmd->u.cache.DeviceNo = i;
status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
spin_lock_irqsave(&ha->smp_lock, flags);
ha->hdr[i].cluster_type =
((status == S_OK && !shared_access) ? (u16)info : 0);
spin_unlock_irqrestore(&ha->smp_lock, flags);
rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
cmd->Service = CACHESERVICE;
cmd->OpCode = GDT_RW_ATTRIBS;
if (ha->cache_feat & GDT_64BIT)
cmd->u.cache64.DeviceNo = i;
else
cmd->u.cache.DeviceNo = i;
status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
spin_lock_irqsave(&ha->smp_lock, flags);
ha->hdr[i].rw_attribs = (status == S_OK ? (u16)info : 0);
spin_unlock_irqrestore(&ha->smp_lock, flags);
}
if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan)))
rc = -EFAULT;
else
rc = 0;
free_fail:
kfree(rsc);
kfree(cmd);
return rc;
}
static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
gdth_ha_str *ha;
struct scsi_cmnd *scp;
unsigned long flags;
char cmnd[MAX_COMMAND_SIZE];
void __user *argp = (void __user *)arg;
memset(cmnd, 0xff, 12);
TRACE(("gdth_ioctl() cmd 0x%x\n", cmd));
switch (cmd) {
case GDTIOCTL_CTRCNT:
{
int cnt = gdth_ctr_count;
if (put_user(cnt, (int __user *)argp))
return -EFAULT;
break;
}
case GDTIOCTL_DRVERS:
{
int ver = (GDTH_VERSION<<8) | GDTH_SUBVERSION;
if (put_user(ver, (int __user *)argp))
return -EFAULT;
break;
}
case GDTIOCTL_OSVERS:
{
gdth_ioctl_osvers osv;
osv.version = (u8)(LINUX_VERSION_CODE >> 16);
osv.subversion = (u8)(LINUX_VERSION_CODE >> 8);
osv.revision = (u16)(LINUX_VERSION_CODE & 0xff);
if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers)))
return -EFAULT;
break;
}
case GDTIOCTL_CTRTYPE:
{
gdth_ioctl_ctrtype ctrt;
if (copy_from_user(&ctrt, argp, sizeof(gdth_ioctl_ctrtype)) ||
(NULL == (ha = gdth_find_ha(ctrt.ionode))))
return -EFAULT;
if (ha->type != GDT_PCIMPR) {
ctrt.type = (u8)((ha->stype<<4) + 6);
} else {
ctrt.type = (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
if (ha->stype >= 0x300)
ctrt.ext_type = 0x6000 | ha->pdev->subsystem_device;
else
ctrt.ext_type = 0x6000 | ha->stype;
}
ctrt.device_id = ha->pdev->device;
ctrt.sub_device_id = ha->pdev->subsystem_device;
ctrt.info = ha->brd_phys;
ctrt.oem_id = ha->oem_id;
if (copy_to_user(argp, &ctrt, sizeof(gdth_ioctl_ctrtype)))
return -EFAULT;
break;
}
case GDTIOCTL_GENERAL:
return ioc_general(argp, cmnd);
case GDTIOCTL_EVENT:
return ioc_event(argp);
case GDTIOCTL_LOCKDRV:
return ioc_lockdrv(argp);
case GDTIOCTL_LOCKCHN:
{
gdth_ioctl_lockchn lchn;
u8 i, j;
if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) ||
(NULL == (ha = gdth_find_ha(lchn.ionode))))
return -EFAULT;
i = lchn.channel;
if (i < ha->bus_cnt) {
if (lchn.lock) {
spin_lock_irqsave(&ha->smp_lock, flags);
ha->raw[i].lock = 1;
spin_unlock_irqrestore(&ha->smp_lock, flags);
for (j = 0; j < ha->tid_cnt; ++j)
gdth_wait_completion(ha, i, j);
} else {
spin_lock_irqsave(&ha->smp_lock, flags);
ha->raw[i].lock = 0;
spin_unlock_irqrestore(&ha->smp_lock, flags);
for (j = 0; j < ha->tid_cnt; ++j)
gdth_next(ha);
}
}
break;
}
case GDTIOCTL_RESCAN:
return ioc_rescan(argp, cmnd);
case GDTIOCTL_HDRLIST:
return ioc_hdrlist(argp, cmnd);
case GDTIOCTL_RESET_BUS:
{
gdth_ioctl_reset res;
int rval;
if (copy_from_user(&res, argp, sizeof(gdth_ioctl_reset)) ||
(NULL == (ha = gdth_find_ha(res.ionode))))
return -EFAULT;
scp = kzalloc(sizeof(*scp), GFP_KERNEL);
if (!scp)
return -ENOMEM;
scp->device = ha->sdev;
scp->cmd_len = 12;
scp->device->channel = res.number;
rval = gdth_eh_bus_reset(scp);
res.status = (rval == SUCCESS ? S_OK : S_GENERR);
kfree(scp);
if (copy_to_user(argp, &res, sizeof(gdth_ioctl_reset)))
return -EFAULT;
break;
}
case GDTIOCTL_RESET_DRV:
return ioc_resetdrv(argp, cmnd);
default:
break;
}
return 0;
}
static long gdth_unlocked_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret;
mutex_lock(&gdth_mutex);
ret = gdth_ioctl(file, cmd, arg);
mutex_unlock(&gdth_mutex);
return ret;
}
/* flush routine */
static void gdth_flush(gdth_ha_str *ha)
{
int i;
gdth_cmd_str gdtcmd;
char cmnd[MAX_COMMAND_SIZE];
memset(cmnd, 0xff, MAX_COMMAND_SIZE);
TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
for (i = 0; i < MAX_HDRIVES; ++i) {
if (ha->hdr[i].present) {
gdtcmd.BoardNode = LOCALBOARD;
gdtcmd.Service = CACHESERVICE;
gdtcmd.OpCode = GDT_FLUSH;
if (ha->cache_feat & GDT_64BIT) {
gdtcmd.u.cache64.DeviceNo = i;
gdtcmd.u.cache64.BlockNo = 1;
gdtcmd.u.cache64.sg_canz = 0;
} else {
gdtcmd.u.cache.DeviceNo = i;
gdtcmd.u.cache.BlockNo = 1;
gdtcmd.u.cache.sg_canz = 0;
}
TRACE2(("gdth_flush(): flush ha %d drive %d\n", ha->hanum, i));
gdth_execute(ha->shost, &gdtcmd, cmnd, 30, NULL);
}
}
}
/* configure lun */
static int gdth_slave_configure(struct scsi_device *sdev)
{
sdev->skip_ms_page_3f = 1;
sdev->skip_ms_page_8 = 1;
return 0;
}
static struct scsi_host_template gdth_template = {
.name = "GDT SCSI Disk Array Controller",
.info = gdth_info,
.queuecommand = gdth_queuecommand,
.eh_bus_reset_handler = gdth_eh_bus_reset,
.slave_configure = gdth_slave_configure,
.bios_param = gdth_bios_param,
.show_info = gdth_show_info,
.write_info = gdth_set_info,
.eh_timed_out = gdth_timed_out,
.proc_name = "gdth",
.can_queue = GDTH_MAXCMDS,
.this_id = -1,
.sg_tablesize = GDTH_MAXSG,
.cmd_per_lun = GDTH_MAXC_P_L,
.unchecked_isa_dma = 1,
.no_write_same = 1,
};
static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
{
struct Scsi_Host *shp;
gdth_ha_str *ha;
dma_addr_t scratch_dma_handle = 0;
int error, i;
struct pci_dev *pdev = pcistr->pdev;
*ha_out = NULL;
shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str));
if (!shp)
return -ENOMEM;
ha = shost_priv(shp);
error = -ENODEV;
if (!gdth_init_pci(pdev, pcistr, ha))
goto out_host_put;
/* controller found and initialized */
printk("Configuring GDT-PCI HA at %d/%d IRQ %u\n",
pdev->bus->number,
PCI_SLOT(pdev->devfn),
ha->irq);
error = request_irq(ha->irq, gdth_interrupt,
IRQF_SHARED, "gdth", ha);
if (error) {
printk("GDT-PCI: Unable to allocate IRQ\n");
goto out_host_put;
}
shp->unchecked_isa_dma = 0;
shp->irq = ha->irq;
shp->dma_channel = 0xff;
ha->hanum = gdth_ctr_count++;
ha->shost = shp;
ha->pccb = &ha->cmdext;
ha->ccb_phys = 0L;
error = -ENOMEM;
ha->pscratch = dma_alloc_coherent(&ha->pdev->dev, GDTH_SCRATCH,
&scratch_dma_handle, GFP_KERNEL);
if (!ha->pscratch)
goto out_free_irq;
ha->scratch_phys = scratch_dma_handle;
ha->pmsg = dma_alloc_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
&scratch_dma_handle, GFP_KERNEL);
if (!ha->pmsg)
goto out_free_pscratch;
ha->msg_phys = scratch_dma_handle;
ha->scratch_busy = FALSE;
ha->req_first = NULL;
ha->tid_cnt = pdev->device >= 0x200 ? MAXID : MAX_HDRIVES;
if (max_ids > 0 && max_ids < ha->tid_cnt)
ha->tid_cnt = max_ids;
for (i = 0; i < GDTH_MAXCMDS; ++i)
ha->cmd_tab[i].cmnd = UNUSED_CMND;
ha->scan_mode = rescan ? 0x10 : 0;
error = -ENODEV;
if (!gdth_search_drives(ha)) {
printk("GDT-PCI %d: Error during device scan\n", ha->hanum);
goto out_free_pmsg;
}
if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
hdr_channel = ha->bus_cnt;
ha->virt_bus = hdr_channel;
/* 64-bit DMA only supported from FW >= x.43 */
if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) ||
!ha->dma64_support) {
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "GDT-PCI %d: "
"Unable to set 32-bit DMA\n", ha->hanum);
goto out_free_pmsg;
}
} else {
shp->max_cmd_len = 16;
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum);
} else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "GDT-PCI %d: "
"Unable to set 64/32-bit DMA\n", ha->hanum);
goto out_free_pmsg;
}
}
shp->max_id = ha->tid_cnt;
shp->max_lun = MAXLUN;
shp->max_channel = ha->bus_cnt;
spin_lock_init(&ha->smp_lock);
gdth_enable_int(ha);
error = scsi_add_host(shp, &pdev->dev);
if (error)
goto out_free_pmsg;
list_add_tail(&ha->list, &gdth_instances);
pci_set_drvdata(ha->pdev, ha);
gdth_timer_init();
scsi_scan_host(shp);
*ha_out = ha;
return 0;
out_free_pmsg:
dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
ha->pmsg, ha->msg_phys);
out_free_pscratch:
dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH,
ha->pscratch, ha->scratch_phys);
out_free_irq:
free_irq(ha->irq, ha);
gdth_ctr_count--;
out_host_put:
scsi_host_put(shp);
return error;
}
static void gdth_remove_one(gdth_ha_str *ha)
{
struct Scsi_Host *shp = ha->shost;
TRACE2(("gdth_remove_one()\n"));
scsi_remove_host(shp);
gdth_flush(ha);
if (ha->sdev) {
scsi_free_host_dev(ha->sdev);
ha->sdev = NULL;
}
if (shp->irq)
free_irq(shp->irq,ha);
if (ha->pscratch)
dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH,
ha->pscratch, ha->scratch_phys);
if (ha->pmsg)
dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
ha->pmsg, ha->msg_phys);
if (ha->ccb_phys)
dma_unmap_single(&ha->pdev->dev, ha->ccb_phys,
sizeof(gdth_cmd_str), DMA_BIDIRECTIONAL);
scsi_host_put(shp);
}
static int gdth_halt(struct notifier_block *nb, unsigned long event, void *buf)
{
gdth_ha_str *ha;
TRACE2(("gdth_halt() event %d\n", (int)event));
if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
return NOTIFY_DONE;
list_for_each_entry(ha, &gdth_instances, list)
gdth_flush(ha);
return NOTIFY_OK;
}
static struct notifier_block gdth_notifier = {
gdth_halt, NULL, 0
};
static int __init gdth_init(void)
{
if (disable) {
printk("GDT-HA: Controller driver disabled from"
" command line !\n");
return 0;
}
printk("GDT-HA: Storage RAID Controller Driver. Version: %s\n",
GDTH_VERSION_STR);
/* initializations */
gdth_polling = TRUE;
gdth_clear_events();
timer_setup(&gdth_timer, gdth_timeout, 0);
/* scanning for PCI controllers */
if (pci_register_driver(&gdth_pci_driver)) {
gdth_ha_str *ha;
list_for_each_entry(ha, &gdth_instances, list)
gdth_remove_one(ha);
return -ENODEV;
}
TRACE2(("gdth_detect() %d controller detected\n", gdth_ctr_count));
major = register_chrdev(0,"gdth", &gdth_fops);
register_reboot_notifier(&gdth_notifier);
gdth_polling = FALSE;
return 0;
}
static void __exit gdth_exit(void)
{
gdth_ha_str *ha;
unregister_chrdev(major, "gdth");
unregister_reboot_notifier(&gdth_notifier);
#ifdef GDTH_STATISTICS
del_timer_sync(&gdth_timer);
#endif
pci_unregister_driver(&gdth_pci_driver);
list_for_each_entry(ha, &gdth_instances, list)
gdth_remove_one(ha);
}
module_init(gdth_init);
module_exit(gdth_exit);
#ifndef MODULE
__setup("gdth=", option_setup);
#endif