Merge 5.4.133 into android11-5.4-lts
Changes in 5.4.133 drm/mxsfb: Don't select DRM_KMS_FB_HELPER drm/zte: Don't select DRM_KMS_FB_HELPER drm/amd/amdgpu/sriov disable all ip hw status by default drm/vc4: fix argument ordering in vc4_crtc_get_margins() net: pch_gbe: Use proper accessors to BE data in pch_ptp_match() drm/amd/display: fix use_max_lb flag for 420 pixel formats hugetlb: clear huge pte during flush function on mips platform atm: iphase: fix possible use-after-free in ia_module_exit() mISDN: fix possible use-after-free in HFC_cleanup() atm: nicstar: Fix possible use-after-free in nicstar_cleanup() net: Treat __napi_schedule_irqoff() as __napi_schedule() on PREEMPT_RT drm/mediatek: Fix PM reference leak in mtk_crtc_ddp_hw_init() reiserfs: add check for invalid 1st journal block drm/virtio: Fix double free on probe failure drm/sched: Avoid data corruptions udf: Fix NULL pointer dereference in udf_symlink function e100: handle eeprom as little endian igb: handle vlan types with checker enabled drm/bridge: cdns: Fix PM reference leak in cdns_dsi_transfer() clk: renesas: r8a77995: Add ZA2 clock clk: tegra: Ensure that PLLU configuration is applied properly ipv6: use prandom_u32() for ID generation RDMA/cxgb4: Fix missing error code in create_qp() dm space maps: don't reset space map allocation cursor when committing pinctrl: mcp23s08: fix race condition in irq handler ice: set the value of global config lock timeout longer virtio_net: Remove BUG() to avoid machine dead net: bcmgenet: check return value after calling platform_get_resource() net: mvpp2: check return value after calling platform_get_resource() net: micrel: check return value after calling platform_get_resource() drm/amd/display: Update scaling settings on modeset drm/amd/display: Release MST resources on switch from MST to SST drm/amd/display: Set DISPCLK_MAX_ERRDET_CYCLES to 7 drm/amdkfd: use allowed domain for vmbo validation fjes: check return value after calling platform_get_resource() selinux: use __GFP_NOWARN with GFP_NOWAIT in the AVC r8169: avoid link-up interrupt issue on RTL8106e if user enables ASPM drm/amd/display: Verify Gamma & Degamma LUT sizes in amdgpu_dm_atomic_check xfrm: Fix error reporting in xfrm_state_construct. wlcore/wl12xx: Fix wl12xx get_mac error if device is in ELP wl1251: Fix possible buffer overflow in wl1251_cmd_scan cw1200: add missing MODULE_DEVICE_TABLE bpf: Fix up register-based shifts in interpreter to silence KUBSAN mt76: mt7615: fix fixed-rate tx status reporting net: fix mistake path for netdev_features_strings net: sched: fix error return code in tcf_del_walker() drm/amdkfd: Walk through list with dqm lock hold rtl8xxxu: Fix device info for RTL8192EU devices MIPS: add PMD table accounting into MIPS'pmd_alloc_one atm: nicstar: use 'dma_free_coherent' instead of 'kfree' atm: nicstar: register the interrupt handler in the right place vsock: notify server to shutdown when client has pending signal RDMA/rxe: Don't overwrite errno from ib_umem_get() iwlwifi: mvm: don't change band on bound PHY contexts iwlwifi: pcie: free IML DMA memory allocation iwlwifi: pcie: fix context info freeing sfc: avoid double pci_remove of VFs sfc: error code if SRIOV cannot be disabled wireless: wext-spy: Fix out-of-bounds warning media, bpf: Do not copy more entries than user space requested net: ip: avoid OOM kills with large UDP sends over loopback RDMA/cma: Fix rdma_resolve_route() memory leak Bluetooth: btusb: Fixed too many in-token issue for Mediatek Chip. Bluetooth: Fix the HCI to MGMT status conversion table Bluetooth: Shutdown controller after workqueues are flushed or cancelled Bluetooth: btusb: fix bt fiwmare downloading failure issue for qca btsoc. sctp: validate from_addr_param return sctp: add size validation when walking chunks MIPS: loongsoon64: Reserve memory below starting pfn to prevent Oops MIPS: set mips32r5 for virt extensions fscrypt: don't ignore minor_hash when hash is 0 crypto: ccp - Annotate SEV Firmware file names perf bench: Fix 2 memory sanitizer warnings powerpc/mm: Fix lockup on kernel exec fault powerpc/barrier: Avoid collision with clang's __lwsync macro drm/amdgpu: Update NV SIMD-per-CU to 2 drm/radeon: Add the missed drm_gem_object_put() in radeon_user_framebuffer_create() drm/rockchip: dsi: remove extra component_del() call drm/amd/display: fix incorrrect valid irq check pinctrl/amd: Add device HID for new AMD GPIO controller drm/amd/display: Reject non-zero src_y and src_x for video planes drm/tegra: Don't set allow_fb_modifiers explicitly drm/msm/mdp4: Fix modifier support enabling drm/arm/malidp: Always list modifiers mmc: sdhci: Fix warning message when accessing RPMB in HS400 mode mmc: core: clear flags before allowing to retune mmc: core: Allow UHS-I voltage switch for SDSC cards if supported ata: ahci_sunxi: Disable DIPM cpu/hotplug: Cure the cpusets trainwreck clocksource/arm_arch_timer: Improve Allwinner A64 timer workaround fpga: stratix10-soc: Add missing fpga_mgr_free() call MIPS: fix "mipsel-linux-ld: decompress.c:undefined reference to `memmove'" ASoC: tegra: Set driver_name=tegra for all machine drivers qemu_fw_cfg: Make fw_cfg_rev_attr a proper kobj_attribute ipmi/watchdog: Stop watchdog timer when the current action is 'none' thermal/drivers/int340x/processor_thermal: Fix tcc setting ubifs: Fix races between xattr_{set|get} and listxattr operations power: supply: ab8500: Fix an old bug nvmem: core: add a missing of_node_put extcon: intel-mrfld: Sync hardware and software state on init seq_buf: Fix overflow in seq_buf_putmem_hex() rq-qos: fix missed wake-ups in rq_qos_throttle try two tracing: Simplify & fix saved_tgids logic tracing: Resize tgid_map to pid_max, not PID_MAX_DEFAULT ipack/carriers/tpci200: Fix a double free in tpci200_pci_probe coresight: tmc-etf: Fix global-out-of-bounds in tmc_update_etf_buffer() dm btree remove: assign new_root only when removal succeeds PCI: Leave Apple Thunderbolt controllers on for s2idle or standby PCI: aardvark: Fix checking for PIO Non-posted Request PCI: aardvark: Implement workaround for the readback value of VEND_ID media: subdev: disallow ioctl for saa6588/davinci media: dtv5100: fix control-request directions media: zr364xx: fix memory leak in zr364xx_start_readpipe media: gspca/sq905: fix control-request direction media: gspca/sunplus: fix zero-length control requests media: uvcvideo: Fix pixel format change for Elgato Cam Link 4K pinctrl: mcp23s08: Fix missing unlock on error in mcp23s08_irq() jfs: fix GPF in diFree smackfs: restrict bytes count in smk_set_cipso() Linux 5.4.133 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I4daf813e30444755db3a7d587f8be81ccd2f748b
This commit is contained in:
commit
a7e747c026
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 132
|
||||
SUBLEVEL = 133
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
* Very small subset of simple string routines
|
||||
*/
|
||||
|
||||
#include <linux/compiler_attributes.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
void *memcpy(void *dest, const void *src, size_t n)
|
||||
@ -27,3 +28,19 @@ void *memset(void *s, int c, size_t n)
|
||||
ss[i] = c;
|
||||
return s;
|
||||
}
|
||||
|
||||
void * __weak memmove(void *dest, const void *src, size_t n)
|
||||
{
|
||||
unsigned int i;
|
||||
const char *s = src;
|
||||
char *d = dest;
|
||||
|
||||
if ((uintptr_t)dest < (uintptr_t)src) {
|
||||
for (i = 0; i < n; i++)
|
||||
d[i] = s[i];
|
||||
} else {
|
||||
for (i = n; i > 0; i--)
|
||||
d[i - 1] = s[i - 1];
|
||||
}
|
||||
return dest;
|
||||
}
|
||||
|
@ -53,7 +53,13 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma)));
|
||||
/*
|
||||
* clear the huge pte entry firstly, so that the other smp threads will
|
||||
* not get old pte entry after finishing flush_tlb_page and before
|
||||
* setting new huge pte entry
|
||||
*/
|
||||
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
flush_tlb_page(vma, addr);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTE_NONE
|
||||
|
@ -2007,7 +2007,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
|
||||
({ int __res; \
|
||||
__asm__ __volatile__( \
|
||||
".set\tpush\n\t" \
|
||||
".set\tmips32r2\n\t" \
|
||||
".set\tmips32r5\n\t" \
|
||||
_ASM_SET_VIRT \
|
||||
"mfgc0\t%0, " #source ", %1\n\t" \
|
||||
".set\tpop" \
|
||||
@ -2020,7 +2020,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
|
||||
({ unsigned long long __res; \
|
||||
__asm__ __volatile__( \
|
||||
".set\tpush\n\t" \
|
||||
".set\tmips64r2\n\t" \
|
||||
".set\tmips64r5\n\t" \
|
||||
_ASM_SET_VIRT \
|
||||
"dmfgc0\t%0, " #source ", %1\n\t" \
|
||||
".set\tpop" \
|
||||
@ -2033,7 +2033,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
|
||||
do { \
|
||||
__asm__ __volatile__( \
|
||||
".set\tpush\n\t" \
|
||||
".set\tmips32r2\n\t" \
|
||||
".set\tmips32r5\n\t" \
|
||||
_ASM_SET_VIRT \
|
||||
"mtgc0\t%z0, " #register ", %1\n\t" \
|
||||
".set\tpop" \
|
||||
@ -2045,7 +2045,7 @@ do { \
|
||||
do { \
|
||||
__asm__ __volatile__( \
|
||||
".set\tpush\n\t" \
|
||||
".set\tmips64r2\n\t" \
|
||||
".set\tmips64r5\n\t" \
|
||||
_ASM_SET_VIRT \
|
||||
"dmtgc0\t%z0, " #register ", %1\n\t" \
|
||||
".set\tpop" \
|
||||
|
@ -62,11 +62,15 @@ do { \
|
||||
|
||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
pmd_t *pmd = NULL;
|
||||
struct page *pg;
|
||||
|
||||
pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
|
||||
if (pmd)
|
||||
pg = alloc_pages(GFP_KERNEL | __GFP_ACCOUNT, PMD_ORDER);
|
||||
if (pg) {
|
||||
pgtable_pmd_page_ctor(pg);
|
||||
pmd = (pmd_t *)page_address(pg);
|
||||
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
|
||||
}
|
||||
return pmd;
|
||||
}
|
||||
|
||||
|
@ -200,6 +200,9 @@ static void __init node_mem_init(unsigned int node)
|
||||
if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT))
|
||||
memblock_reserve((node_addrspace_offset | 0xfe000000),
|
||||
32 << 20);
|
||||
|
||||
/* Reserve pfn range 0~node[0]->node_start_pfn */
|
||||
memblock_reserve(0, PAGE_SIZE * start_pfn);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -44,6 +44,8 @@
|
||||
# define SMPWMB eieio
|
||||
#endif
|
||||
|
||||
/* clang defines this macro for a builtin, which will not work with runtime patching */
|
||||
#undef __lwsync
|
||||
#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
|
||||
#define dma_rmb() __lwsync()
|
||||
#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
|
||||
|
@ -204,9 +204,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
|
||||
{
|
||||
int is_exec = TRAP(regs) == 0x400;
|
||||
|
||||
/* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
|
||||
if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
|
||||
DSISR_PROTFAULT))) {
|
||||
if (is_exec) {
|
||||
pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
|
||||
address >= TASK_SIZE ? "exec-protected" : "user",
|
||||
address,
|
||||
|
@ -266,8 +266,8 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
|
||||
if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
|
||||
return;
|
||||
|
||||
prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
|
||||
has_sleeper = !wq_has_single_sleeper(&rqw->wait);
|
||||
has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
do {
|
||||
/* The memory barrier in set_task_state saves us here. */
|
||||
if (data.got_token)
|
||||
|
@ -200,7 +200,7 @@ static void ahci_sunxi_start_engine(struct ata_port *ap)
|
||||
}
|
||||
|
||||
static const struct ata_port_info ahci_sunxi_port_info = {
|
||||
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
|
||||
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ | ATA_FLAG_NO_DIPM,
|
||||
.pio_mask = ATA_PIO4,
|
||||
.udma_mask = ATA_UDMA6,
|
||||
.port_ops = &ahci_platform_ops,
|
||||
|
@ -3295,7 +3295,7 @@ static void __exit ia_module_exit(void)
|
||||
{
|
||||
pci_unregister_driver(&ia_driver);
|
||||
|
||||
del_timer(&ia_timer);
|
||||
del_timer_sync(&ia_timer);
|
||||
}
|
||||
|
||||
module_init(ia_module_init);
|
||||
|
@ -297,7 +297,7 @@ static void __exit nicstar_cleanup(void)
|
||||
{
|
||||
XPRINTK("nicstar: nicstar_cleanup() called.\n");
|
||||
|
||||
del_timer(&ns_timer);
|
||||
del_timer_sync(&ns_timer);
|
||||
|
||||
pci_unregister_driver(&nicstar_driver);
|
||||
|
||||
@ -525,6 +525,15 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
|
||||
/* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
|
||||
writel(0x00000000, card->membase + VPM);
|
||||
|
||||
card->intcnt = 0;
|
||||
if (request_irq
|
||||
(pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
|
||||
pr_err("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
|
||||
error = 9;
|
||||
ns_init_card_error(card, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Initialize TSQ */
|
||||
card->tsq.org = dma_alloc_coherent(&card->pcidev->dev,
|
||||
NS_TSQSIZE + NS_TSQ_ALIGNMENT,
|
||||
@ -751,15 +760,6 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
|
||||
|
||||
card->efbie = 1;
|
||||
|
||||
card->intcnt = 0;
|
||||
if (request_irq
|
||||
(pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
|
||||
printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
|
||||
error = 9;
|
||||
ns_init_card_error(card, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Register device */
|
||||
card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops,
|
||||
-1, NULL);
|
||||
@ -837,10 +837,12 @@ static void ns_init_card_error(ns_dev *card, int error)
|
||||
dev_kfree_skb_any(hb);
|
||||
}
|
||||
if (error >= 12) {
|
||||
kfree(card->rsq.org);
|
||||
dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
|
||||
card->rsq.org, card->rsq.dma);
|
||||
}
|
||||
if (error >= 11) {
|
||||
kfree(card->tsq.org);
|
||||
dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
|
||||
card->tsq.org, card->tsq.dma);
|
||||
}
|
||||
if (error >= 10) {
|
||||
free_irq(card->pcidev->irq, card);
|
||||
|
@ -2700,11 +2700,6 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
|
||||
struct btmtk_wmt_hdr *hdr;
|
||||
int err;
|
||||
|
||||
/* Submit control IN URB on demand to process the WMT event */
|
||||
err = btusb_mtk_submit_wmt_recv_urb(hdev);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* Send the WMT command and wait until the WMT event returns */
|
||||
hlen = sizeof(*hdr) + wmt_params->dlen;
|
||||
if (hlen > 255)
|
||||
@ -2726,6 +2721,11 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Submit control IN URB on demand to process the WMT event */
|
||||
err = btusb_mtk_submit_wmt_recv_urb(hdev);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* The vendor specific WMT commands are all answered by a vendor
|
||||
* specific event and will have the Command Status or Command
|
||||
* Complete as with usual HCI command flow control.
|
||||
@ -3263,6 +3263,11 @@ static int btusb_setup_qca_download_fw(struct hci_dev *hdev,
|
||||
sent += size;
|
||||
count -= size;
|
||||
|
||||
/* ep2 need time to switch from function acl to function dfu,
|
||||
* so we add 20ms delay here.
|
||||
*/
|
||||
msleep(20);
|
||||
|
||||
while (count) {
|
||||
size = min_t(size_t, count, QCA_DFU_PACKET_LEN);
|
||||
|
||||
|
@ -366,16 +366,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
|
||||
data[0] = 0;
|
||||
WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
|
||||
|
||||
if ((ipmi_version_major > 1)
|
||||
|| ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
|
||||
/* This is an IPMI 1.5-only feature. */
|
||||
data[0] |= WDOG_DONT_STOP_ON_SET;
|
||||
} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
|
||||
/*
|
||||
* In ipmi 1.0, setting the timer stops the watchdog, we
|
||||
* need to start it back up again.
|
||||
*/
|
||||
hbnow = 1;
|
||||
if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
|
||||
if ((ipmi_version_major > 1) ||
|
||||
((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
|
||||
/* This is an IPMI 1.5-only feature. */
|
||||
data[0] |= WDOG_DONT_STOP_ON_SET;
|
||||
} else {
|
||||
/*
|
||||
* In ipmi 1.0, setting the timer stops the watchdog, we
|
||||
* need to start it back up again.
|
||||
*/
|
||||
hbnow = 1;
|
||||
}
|
||||
}
|
||||
|
||||
data[1] = 0;
|
||||
|
@ -75,6 +75,7 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
|
||||
DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000),
|
||||
|
||||
/* Core Clock Outputs */
|
||||
DEF_FIXED("za2", R8A77995_CLK_ZA2, CLK_PLL0D3, 2, 1),
|
||||
DEF_FIXED("z2", R8A77995_CLK_Z2, CLK_PLL0D3, 1, 1),
|
||||
DEF_FIXED("ztr", R8A77995_CLK_ZTR, CLK_PLL1, 6, 1),
|
||||
DEF_FIXED("zt", R8A77995_CLK_ZT, CLK_PLL1, 4, 1),
|
||||
|
@ -1089,7 +1089,8 @@ static int clk_pllu_enable(struct clk_hw *hw)
|
||||
if (pll->lock)
|
||||
spin_lock_irqsave(pll->lock, flags);
|
||||
|
||||
_clk_pll_enable(hw);
|
||||
if (!clk_pll_is_enabled(hw))
|
||||
_clk_pll_enable(hw);
|
||||
|
||||
ret = clk_pll_wait_for_lock(pll);
|
||||
if (ret < 0)
|
||||
@ -1706,15 +1707,13 @@ static int clk_pllu_tegra114_enable(struct clk_hw *hw)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (clk_pll_is_enabled(hw))
|
||||
return 0;
|
||||
|
||||
input_rate = clk_hw_get_rate(__clk_get_hw(osc));
|
||||
|
||||
if (pll->lock)
|
||||
spin_lock_irqsave(pll->lock, flags);
|
||||
|
||||
_clk_pll_enable(hw);
|
||||
if (!clk_pll_is_enabled(hw))
|
||||
_clk_pll_enable(hw);
|
||||
|
||||
ret = clk_pll_wait_for_lock(pll);
|
||||
if (ret < 0)
|
||||
|
@ -348,7 +348,7 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
|
||||
do { \
|
||||
_val = read_sysreg(reg); \
|
||||
_retries--; \
|
||||
} while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \
|
||||
} while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries); \
|
||||
\
|
||||
WARN_ON_ONCE(!_retries); \
|
||||
_val; \
|
||||
|
@ -40,6 +40,10 @@ static int psp_probe_timeout = 5;
|
||||
module_param(psp_probe_timeout, int, 0644);
|
||||
MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
|
||||
|
||||
MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
|
||||
MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
|
||||
MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
|
||||
|
||||
static bool psp_dead;
|
||||
static int psp_timeout;
|
||||
|
||||
|
@ -197,6 +197,7 @@ static int mrfld_extcon_probe(struct platform_device *pdev)
|
||||
struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
|
||||
struct regmap *regmap = pmic->regmap;
|
||||
struct mrfld_extcon_data *data;
|
||||
unsigned int status;
|
||||
unsigned int id;
|
||||
int irq, ret;
|
||||
|
||||
@ -244,6 +245,14 @@ static int mrfld_extcon_probe(struct platform_device *pdev)
|
||||
/* Get initial state */
|
||||
mrfld_extcon_role_detect(data);
|
||||
|
||||
/*
|
||||
* Cached status value is used for cable detection, see comments
|
||||
* in mrfld_extcon_cable_detect(), we need to sync cached value
|
||||
* with a real state of the hardware.
|
||||
*/
|
||||
regmap_read(regmap, BCOVE_SCHGRIRQ1, &status);
|
||||
data->status = status;
|
||||
|
||||
mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR);
|
||||
mrfld_extcon_clear(data, BCOVE_MCHGRIRQ1, BCOVE_CHGRIRQ_ALL);
|
||||
|
||||
|
@ -296,15 +296,13 @@ static int fw_cfg_do_platform_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t fw_cfg_showrev(struct kobject *k, struct attribute *a, char *buf)
|
||||
static ssize_t fw_cfg_showrev(struct kobject *k, struct kobj_attribute *a,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%u\n", fw_cfg_rev);
|
||||
}
|
||||
|
||||
static const struct {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct kobject *k, struct attribute *a, char *buf);
|
||||
} fw_cfg_rev_attr = {
|
||||
static const struct kobj_attribute fw_cfg_rev_attr = {
|
||||
.attr = { .name = "rev", .mode = S_IRUSR },
|
||||
.show = fw_cfg_showrev,
|
||||
};
|
||||
|
@ -476,6 +476,7 @@ static int s10_remove(struct platform_device *pdev)
|
||||
struct s10_priv *priv = mgr->priv;
|
||||
|
||||
fpga_mgr_unregister(mgr);
|
||||
fpga_mgr_free(mgr);
|
||||
stratix10_svc_free_channel(priv->chan);
|
||||
|
||||
return 0;
|
||||
|
@ -55,12 +55,6 @@ static struct {
|
||||
spinlock_t mem_limit_lock;
|
||||
} kfd_mem_limit;
|
||||
|
||||
/* Struct used for amdgpu_amdkfd_bo_validate */
|
||||
struct amdgpu_vm_parser {
|
||||
uint32_t domain;
|
||||
bool wait;
|
||||
};
|
||||
|
||||
static const char * const domain_bit_to_string[] = {
|
||||
"CPU",
|
||||
"GTT",
|
||||
@ -293,11 +287,9 @@ validate_fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
|
||||
static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_vm_parser *p = param;
|
||||
|
||||
return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
|
||||
return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
|
||||
}
|
||||
|
||||
/* vm_validate_pt_pd_bos - Validate page table and directory BOs
|
||||
@ -311,20 +303,15 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
|
||||
{
|
||||
struct amdgpu_bo *pd = vm->root.base.bo;
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
|
||||
struct amdgpu_vm_parser param;
|
||||
int ret;
|
||||
|
||||
param.domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
param.wait = false;
|
||||
|
||||
ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
|
||||
¶m);
|
||||
ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
|
||||
if (ret) {
|
||||
pr_err("amdgpu: failed to validate PT BOs\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_amdkfd_validate(¶m, pd);
|
||||
ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
|
||||
if (ret) {
|
||||
pr_err("amdgpu: failed to validate PD\n");
|
||||
return ret;
|
||||
|
@ -2291,7 +2291,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
|
||||
AMD_IP_BLOCK_TYPE_IH,
|
||||
};
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
int j;
|
||||
struct amdgpu_ip_block *block;
|
||||
|
||||
|
@ -1584,7 +1584,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
|
||||
struct qcm_process_device *qpd)
|
||||
{
|
||||
int retval;
|
||||
struct queue *q, *next;
|
||||
struct queue *q;
|
||||
struct kernel_queue *kq, *kq_next;
|
||||
struct mqd_manager *mqd_mgr;
|
||||
struct device_process_node *cur, *next_dpn;
|
||||
@ -1639,6 +1639,19 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
|
||||
qpd->reset_wavefronts = false;
|
||||
}
|
||||
|
||||
/* Lastly, free mqd resources.
|
||||
* Do free_mqd() after dqm_unlock to avoid circular locking.
|
||||
*/
|
||||
while (!list_empty(&qpd->queues_list)) {
|
||||
q = list_first_entry(&qpd->queues_list, struct queue, list);
|
||||
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
|
||||
q->properties.type)];
|
||||
list_del(&q->list);
|
||||
qpd->queue_count--;
|
||||
dqm_unlock(dqm);
|
||||
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
|
||||
dqm_lock(dqm);
|
||||
}
|
||||
dqm_unlock(dqm);
|
||||
|
||||
/* Outside the DQM lock because under the DQM lock we can't do
|
||||
@ -1647,17 +1660,6 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
|
||||
if (found)
|
||||
kfd_dec_compute_active(dqm->dev);
|
||||
|
||||
/* Lastly, free mqd resources.
|
||||
* Do free_mqd() after dqm_unlock to avoid circular locking.
|
||||
*/
|
||||
list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
|
||||
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
|
||||
q->properties.type)];
|
||||
list_del(&q->list);
|
||||
qpd->queue_count--;
|
||||
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -2632,6 +2632,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
|
||||
scaling_info->src_rect.x = state->src_x >> 16;
|
||||
scaling_info->src_rect.y = state->src_y >> 16;
|
||||
|
||||
/*
|
||||
* For reasons we don't (yet) fully understand a non-zero
|
||||
* src_y coordinate into an NV12 buffer can cause a
|
||||
* system hang. To avoid hangs (and maybe be overly cautious)
|
||||
* let's reject both non-zero src_x and src_y.
|
||||
*
|
||||
* We currently know of only one use-case to reproduce a
|
||||
* scenario with non-zero src_x and src_y for NV12, which
|
||||
* is to gesture the YouTube Android app into full screen
|
||||
* on ChromeOS.
|
||||
*/
|
||||
if (state->fb &&
|
||||
state->fb->format->format == DRM_FORMAT_NV12 &&
|
||||
(scaling_info->src_rect.x != 0 ||
|
||||
scaling_info->src_rect.y != 0))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* For reasons we don't (yet) fully understand a non-zero
|
||||
* src_y coordinate into an NV12 buffer can cause a
|
||||
@ -6832,7 +6849,8 @@ skip_modeset:
|
||||
BUG_ON(dm_new_crtc_state->stream == NULL);
|
||||
|
||||
/* Scaling or underscan settings */
|
||||
if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
|
||||
if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
|
||||
drm_atomic_crtc_needs_modeset(new_crtc_state))
|
||||
update_stream_scaling_settings(
|
||||
&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
|
||||
|
||||
@ -7406,6 +7424,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
|
||||
continue;
|
||||
|
||||
ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (!new_crtc_state->enable)
|
||||
continue;
|
||||
|
||||
|
@ -387,6 +387,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
|
||||
#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
|
||||
|
||||
void amdgpu_dm_init_color_mod(void);
|
||||
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
|
||||
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
|
||||
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
|
||||
struct dc_plane_state *dc_plane_state);
|
||||
|
@ -277,6 +277,37 @@ static int __set_input_tf(struct dc_transfer_func *func,
|
||||
return res ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of
|
||||
* the expected size.
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
|
||||
{
|
||||
const struct drm_color_lut *lut = NULL;
|
||||
uint32_t size = 0;
|
||||
|
||||
lut = __extract_blob_lut(crtc_state->degamma_lut, &size);
|
||||
if (lut && size != MAX_COLOR_LUT_ENTRIES) {
|
||||
DRM_DEBUG_DRIVER(
|
||||
"Invalid Degamma LUT size. Should be %u but got %u.\n",
|
||||
MAX_COLOR_LUT_ENTRIES, size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
lut = __extract_blob_lut(crtc_state->gamma_lut, &size);
|
||||
if (lut && size != MAX_COLOR_LUT_ENTRIES &&
|
||||
size != MAX_COLOR_LEGACY_LUT_ENTRIES) {
|
||||
DRM_DEBUG_DRIVER(
|
||||
"Invalid Gamma LUT size. Should be %u (or %u for legacy) but got %u.\n",
|
||||
MAX_COLOR_LUT_ENTRIES, MAX_COLOR_LEGACY_LUT_ENTRIES,
|
||||
size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
|
||||
* @crtc: amdgpu_dm crtc state
|
||||
@ -311,14 +342,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
|
||||
bool is_legacy;
|
||||
int r;
|
||||
|
||||
degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, °amma_size);
|
||||
if (degamma_lut && degamma_size != MAX_COLOR_LUT_ENTRIES)
|
||||
return -EINVAL;
|
||||
r = amdgpu_dm_verify_lut_sizes(&crtc->base);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, °amma_size);
|
||||
regamma_lut = __extract_blob_lut(crtc->base.gamma_lut, ®amma_size);
|
||||
if (regamma_lut && regamma_size != MAX_COLOR_LUT_ENTRIES &&
|
||||
regamma_size != MAX_COLOR_LEGACY_LUT_ENTRIES)
|
||||
return -EINVAL;
|
||||
|
||||
has_degamma =
|
||||
degamma_lut && !__is_lut_linear(degamma_lut, degamma_size);
|
||||
|
@ -1284,6 +1284,8 @@ static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)
|
||||
link->type = dc_connection_single;
|
||||
link->local_sink = link->remote_sinks[0];
|
||||
link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT;
|
||||
dc_sink_retain(link->local_sink);
|
||||
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
|
||||
} else if (mst_enable == true &&
|
||||
link->type == dc_connection_single &&
|
||||
link->remote_sinks[0] != NULL) {
|
||||
|
@ -484,10 +484,13 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d
|
||||
int vtaps_c = scl_data->taps.v_taps_c;
|
||||
int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert);
|
||||
int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
|
||||
enum lb_memory_config mem_cfg = LB_MEMORY_CONFIG_0;
|
||||
|
||||
if (dpp->base.ctx->dc->debug.use_max_lb)
|
||||
return mem_cfg;
|
||||
if (dpp->base.ctx->dc->debug.use_max_lb) {
|
||||
if (scl_data->format == PIXEL_FORMAT_420BPP8
|
||||
|| scl_data->format == PIXEL_FORMAT_420BPP10)
|
||||
return LB_MEMORY_CONFIG_3;
|
||||
return LB_MEMORY_CONFIG_0;
|
||||
}
|
||||
|
||||
dpp->base.caps->dscl_calc_lb_num_partitions(
|
||||
scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c);
|
||||
|
@ -126,7 +126,7 @@ void dcn20_dccg_init(struct dce_hwseq *hws)
|
||||
REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0);
|
||||
|
||||
/* This value is dependent on the hardware pipeline delay so set once per SOC */
|
||||
REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c);
|
||||
REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0xe01003c);
|
||||
}
|
||||
void dcn20_display_init(struct dc *dc)
|
||||
{
|
||||
|
@ -163,7 +163,7 @@ enum irq_type
|
||||
};
|
||||
|
||||
#define DAL_VALID_IRQ_SRC_NUM(src) \
|
||||
((src) <= DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
|
||||
((src) < DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
|
||||
|
||||
/* Number of Page Flip IRQ Sources. */
|
||||
#define DAL_PFLIP_IRQ_SRC_NUM \
|
||||
|
@ -430,7 +430,7 @@ ARRAY_2D_DEPTH = 0x00000001,
|
||||
*/
|
||||
|
||||
typedef enum ENUM_NUM_SIMD_PER_CU {
|
||||
NUM_SIMD_PER_CU = 0x00000004,
|
||||
NUM_SIMD_PER_CU = 0x00000002,
|
||||
} ENUM_NUM_SIMD_PER_CU;
|
||||
|
||||
/*
|
||||
|
@ -922,6 +922,11 @@ static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
|
||||
.atomic_disable = malidp_de_plane_disable,
|
||||
};
|
||||
|
||||
static const uint64_t linear_only_modifiers[] = {
|
||||
DRM_FORMAT_MOD_LINEAR,
|
||||
DRM_FORMAT_MOD_INVALID
|
||||
};
|
||||
|
||||
int malidp_de_planes_init(struct drm_device *drm)
|
||||
{
|
||||
struct malidp_drm *malidp = drm->dev_private;
|
||||
@ -985,8 +990,8 @@ int malidp_de_planes_init(struct drm_device *drm)
|
||||
*/
|
||||
ret = drm_universal_plane_init(drm, &plane->base, crtcs,
|
||||
&malidp_de_plane_funcs, formats, n,
|
||||
(id == DE_SMART) ? NULL : modifiers, plane_type,
|
||||
NULL);
|
||||
(id == DE_SMART) ? linear_only_modifiers : modifiers,
|
||||
plane_type, NULL);
|
||||
|
||||
if (ret < 0)
|
||||
goto cleanup;
|
||||
|
@ -1026,7 +1026,7 @@ static ssize_t cdns_dsi_transfer(struct mipi_dsi_host *host,
|
||||
struct mipi_dsi_packet packet;
|
||||
int ret, i, tx_len, rx_len;
|
||||
|
||||
ret = pm_runtime_get_sync(host->dev);
|
||||
ret = pm_runtime_resume_and_get(host->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -240,7 +240,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
}
|
||||
|
||||
ret = pm_runtime_get_sync(crtc->dev->dev);
|
||||
ret = pm_runtime_resume_and_get(crtc->dev->dev);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("Failed to enable power domain: %d\n", ret);
|
||||
return ret;
|
||||
|
@ -88,8 +88,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
|
||||
if (mdp4_kms->rev > 1)
|
||||
mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
|
||||
|
||||
dev->mode_config.allow_fb_modifiers = true;
|
||||
|
||||
out:
|
||||
pm_runtime_put_sync(dev->dev);
|
||||
|
||||
|
@ -347,6 +347,12 @@ enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
|
||||
return mdp4_plane->pipe;
|
||||
}
|
||||
|
||||
static const uint64_t supported_format_modifiers[] = {
|
||||
DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
|
||||
DRM_FORMAT_MOD_LINEAR,
|
||||
DRM_FORMAT_MOD_INVALID
|
||||
};
|
||||
|
||||
/* initialize plane */
|
||||
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
|
||||
enum mdp4_pipe pipe_id, bool private_plane)
|
||||
@ -375,7 +381,7 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
|
||||
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
|
||||
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
|
||||
mdp4_plane->formats, mdp4_plane->nformats,
|
||||
NULL, type, NULL);
|
||||
supported_format_modifiers, type, NULL);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
|
@ -10,7 +10,6 @@ config DRM_MXSFB
|
||||
depends on COMMON_CLK
|
||||
select DRM_MXS
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_KMS_FB_HELPER
|
||||
select DRM_KMS_CMA_HELPER
|
||||
select DRM_PANEL
|
||||
help
|
||||
|
@ -1333,6 +1333,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
|
||||
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
|
||||
if (obj->import_attach) {
|
||||
DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
|
||||
drm_gem_object_put(obj);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
|
@ -231,7 +231,6 @@ struct dw_mipi_dsi_rockchip {
|
||||
struct dw_mipi_dsi *dmd;
|
||||
const struct rockchip_dw_dsi_chip_data *cdata;
|
||||
struct dw_mipi_dsi_plat_data pdata;
|
||||
int devcnt;
|
||||
};
|
||||
|
||||
struct dphy_pll_parameter_map {
|
||||
@ -1001,9 +1000,6 @@ static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct dw_mipi_dsi_rockchip *dsi = platform_get_drvdata(pdev);
|
||||
|
||||
if (dsi->devcnt == 0)
|
||||
component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops);
|
||||
|
||||
dw_mipi_dsi_remove(dsi->dmd);
|
||||
|
||||
return 0;
|
||||
|
@ -235,11 +235,16 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
|
||||
static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
|
||||
{
|
||||
struct drm_sched_job *job;
|
||||
struct dma_fence *f;
|
||||
int r;
|
||||
|
||||
while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
|
||||
struct drm_sched_fence *s_fence = job->s_fence;
|
||||
|
||||
/* Wait for all dependencies to avoid data corruptions */
|
||||
while ((f = job->sched->ops->dependency(job, entity)))
|
||||
dma_fence_wait(f, false);
|
||||
|
||||
drm_sched_fence_scheduled(s_fence);
|
||||
dma_fence_set_error(&s_fence->finished, -ESRCH);
|
||||
|
||||
|
@ -919,6 +919,11 @@ static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
|
||||
.atomic_disable = tegra_cursor_atomic_disable,
|
||||
};
|
||||
|
||||
static const uint64_t linear_modifiers[] = {
|
||||
DRM_FORMAT_MOD_LINEAR,
|
||||
DRM_FORMAT_MOD_INVALID
|
||||
};
|
||||
|
||||
static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
|
||||
struct tegra_dc *dc)
|
||||
{
|
||||
@ -947,7 +952,7 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
|
||||
|
||||
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
|
||||
&tegra_plane_funcs, formats,
|
||||
num_formats, NULL,
|
||||
num_formats, linear_modifiers,
|
||||
DRM_PLANE_TYPE_CURSOR, NULL);
|
||||
if (err < 0) {
|
||||
kfree(plane);
|
||||
@ -1065,7 +1070,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
|
||||
|
||||
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
|
||||
&tegra_plane_funcs, formats,
|
||||
num_formats, NULL, type, NULL);
|
||||
num_formats, linear_modifiers,
|
||||
type, NULL);
|
||||
if (err < 0) {
|
||||
kfree(plane);
|
||||
return ERR_PTR(err);
|
||||
|
@ -122,8 +122,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
|
||||
drm->mode_config.max_width = 4096;
|
||||
drm->mode_config.max_height = 4096;
|
||||
|
||||
drm->mode_config.allow_fb_modifiers = true;
|
||||
|
||||
drm->mode_config.normalize_zpos = true;
|
||||
|
||||
drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
|
||||
|
@ -750,7 +750,7 @@ bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
|
||||
void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
|
||||
void vc4_crtc_txp_armed(struct drm_crtc_state *state);
|
||||
void vc4_crtc_get_margins(struct drm_crtc_state *state,
|
||||
unsigned int *right, unsigned int *left,
|
||||
unsigned int *left, unsigned int *right,
|
||||
unsigned int *top, unsigned int *bottom);
|
||||
|
||||
/* vc4_debugfs.c */
|
||||
|
@ -218,6 +218,7 @@ err_ttm:
|
||||
err_vbufs:
|
||||
vgdev->vdev->config->del_vqs(vgdev->vdev);
|
||||
err_vqs:
|
||||
dev->dev_private = NULL;
|
||||
kfree(vgdev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -3,7 +3,6 @@ config DRM_ZTE
|
||||
tristate "DRM Support for ZTE SoCs"
|
||||
depends on DRM && ARCH_ZX
|
||||
select DRM_KMS_CMA_HELPER
|
||||
select DRM_KMS_FB_HELPER
|
||||
select DRM_KMS_HELPER
|
||||
select SND_SOC_HDMI_CODEC if SND_SOC
|
||||
select VIDEOMODE_HELPERS
|
||||
|
@ -528,7 +528,7 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
|
||||
buf_ptr = buf->data_pages[cur] + offset;
|
||||
*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
|
||||
|
||||
if (lost && *barrier) {
|
||||
if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
|
||||
*buf_ptr = *barrier;
|
||||
barrier++;
|
||||
}
|
||||
|
@ -2719,7 +2719,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
|
||||
|
||||
cma_init_resolve_route_work(work, id_priv);
|
||||
|
||||
route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
|
||||
if (!route->path_rec)
|
||||
route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
|
||||
if (!route->path_rec) {
|
||||
ret = -ENOMEM;
|
||||
goto err1;
|
||||
|
@ -295,6 +295,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
|
||||
pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
|
||||
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
|
||||
ret = -EINVAL;
|
||||
goto free_dma;
|
||||
}
|
||||
|
||||
|
@ -173,7 +173,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
|
||||
if (IS_ERR(umem)) {
|
||||
pr_warn("err %d from rxe_umem_get\n",
|
||||
(int)PTR_ERR(umem));
|
||||
err = -EINVAL;
|
||||
err = PTR_ERR(umem);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
|
@ -596,8 +596,11 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
out_err_bus_register:
|
||||
tpci200_uninstall(tpci200);
|
||||
/* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
|
||||
tpci200->info->cfg_regs = NULL;
|
||||
out_err_install:
|
||||
iounmap(tpci200->info->cfg_regs);
|
||||
if (tpci200->info->cfg_regs)
|
||||
iounmap(tpci200->info->cfg_regs);
|
||||
out_err_ioremap:
|
||||
pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
|
||||
out_err_pci_request:
|
||||
|
@ -2341,7 +2341,7 @@ static void __exit
|
||||
HFC_cleanup(void)
|
||||
{
|
||||
if (timer_pending(&hfc_tl))
|
||||
del_timer(&hfc_tl);
|
||||
del_timer_sync(&hfc_tl);
|
||||
|
||||
pci_unregister_driver(&hfc_driver);
|
||||
}
|
||||
|
@ -549,7 +549,8 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
|
||||
delete_at(n, index);
|
||||
}
|
||||
|
||||
*new_root = shadow_root(&spine);
|
||||
if (!r)
|
||||
*new_root = shadow_root(&spine);
|
||||
exit_shadow_spine(&spine);
|
||||
|
||||
return r;
|
||||
|
@ -171,6 +171,14 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
|
||||
* Any block we allocate has to be free in both the old and current ll.
|
||||
*/
|
||||
r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b);
|
||||
if (r == -ENOSPC) {
|
||||
/*
|
||||
* There's no free block between smd->begin and the end of the metadata device.
|
||||
* We search before smd->begin in case something has been freed.
|
||||
*/
|
||||
r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, 0, smd->begin, b);
|
||||
}
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -199,7 +207,6 @@ static int sm_disk_commit(struct dm_space_map *sm)
|
||||
return r;
|
||||
|
||||
memcpy(&smd->old_ll, &smd->ll, sizeof(smd->old_ll));
|
||||
smd->begin = 0;
|
||||
smd->nr_allocated_this_transaction = 0;
|
||||
|
||||
r = sm_disk_get_nr_free(sm, &nr_free);
|
||||
|
@ -452,6 +452,14 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
|
||||
* Any block we allocate has to be free in both the old and current ll.
|
||||
*/
|
||||
r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b);
|
||||
if (r == -ENOSPC) {
|
||||
/*
|
||||
* There's no free block between smm->begin and the end of the metadata device.
|
||||
* We search before smm->begin in case something has been freed.
|
||||
*/
|
||||
r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, 0, smm->begin, b);
|
||||
}
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -503,7 +511,6 @@ static int sm_metadata_commit(struct dm_space_map *sm)
|
||||
return r;
|
||||
|
||||
memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
|
||||
smm->begin = 0;
|
||||
smm->allocated_this_transaction = 0;
|
||||
|
||||
return 0;
|
||||
|
@ -380,7 +380,7 @@ static void saa6588_configure(struct saa6588 *s)
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
static long saa6588_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
|
||||
static long saa6588_command(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
|
||||
{
|
||||
struct saa6588 *s = to_saa6588(sd);
|
||||
struct saa6588_command *a = arg;
|
||||
@ -433,7 +433,7 @@ static int saa6588_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt)
|
||||
/* ----------------------------------------------------------------------- */
|
||||
|
||||
static const struct v4l2_subdev_core_ops saa6588_core_ops = {
|
||||
.ioctl = saa6588_ioctl,
|
||||
.command = saa6588_command,
|
||||
};
|
||||
|
||||
static const struct v4l2_subdev_tuner_ops saa6588_tuner_ops = {
|
||||
|
@ -3187,7 +3187,7 @@ static int radio_release(struct file *file)
|
||||
|
||||
btv->radio_user--;
|
||||
|
||||
bttv_call_all(btv, core, ioctl, SAA6588_CMD_CLOSE, &cmd);
|
||||
bttv_call_all(btv, core, command, SAA6588_CMD_CLOSE, &cmd);
|
||||
|
||||
if (btv->radio_user == 0)
|
||||
btv->has_radio_tuner = 0;
|
||||
@ -3268,7 +3268,7 @@ static ssize_t radio_read(struct file *file, char __user *data,
|
||||
cmd.result = -ENODEV;
|
||||
radio_enable(btv);
|
||||
|
||||
bttv_call_all(btv, core, ioctl, SAA6588_CMD_READ, &cmd);
|
||||
bttv_call_all(btv, core, command, SAA6588_CMD_READ, &cmd);
|
||||
|
||||
return cmd.result;
|
||||
}
|
||||
@ -3289,7 +3289,7 @@ static __poll_t radio_poll(struct file *file, poll_table *wait)
|
||||
cmd.instance = file;
|
||||
cmd.event_list = wait;
|
||||
cmd.poll_mask = res;
|
||||
bttv_call_all(btv, core, ioctl, SAA6588_CMD_POLL, &cmd);
|
||||
bttv_call_all(btv, core, command, SAA6588_CMD_POLL, &cmd);
|
||||
|
||||
return cmd.poll_mask;
|
||||
}
|
||||
|
@ -1179,7 +1179,7 @@ static int video_release(struct file *file)
|
||||
|
||||
saa_call_all(dev, tuner, standby);
|
||||
if (vdev->vfl_type == VFL_TYPE_RADIO)
|
||||
saa_call_all(dev, core, ioctl, SAA6588_CMD_CLOSE, &cmd);
|
||||
saa_call_all(dev, core, command, SAA6588_CMD_CLOSE, &cmd);
|
||||
mutex_unlock(&dev->lock);
|
||||
|
||||
return 0;
|
||||
@ -1198,7 +1198,7 @@ static ssize_t radio_read(struct file *file, char __user *data,
|
||||
cmd.result = -ENODEV;
|
||||
|
||||
mutex_lock(&dev->lock);
|
||||
saa_call_all(dev, core, ioctl, SAA6588_CMD_READ, &cmd);
|
||||
saa_call_all(dev, core, command, SAA6588_CMD_READ, &cmd);
|
||||
mutex_unlock(&dev->lock);
|
||||
|
||||
return cmd.result;
|
||||
@ -1214,7 +1214,7 @@ static __poll_t radio_poll(struct file *file, poll_table *wait)
|
||||
cmd.event_list = wait;
|
||||
cmd.poll_mask = 0;
|
||||
mutex_lock(&dev->lock);
|
||||
saa_call_all(dev, core, ioctl, SAA6588_CMD_POLL, &cmd);
|
||||
saa_call_all(dev, core, command, SAA6588_CMD_POLL, &cmd);
|
||||
mutex_unlock(&dev->lock);
|
||||
|
||||
return rc | cmd.poll_mask;
|
||||
|
@ -48,7 +48,7 @@ static int venc_is_second_field(struct vpbe_display *disp_dev)
|
||||
|
||||
ret = v4l2_subdev_call(vpbe_dev->venc,
|
||||
core,
|
||||
ioctl,
|
||||
command,
|
||||
VENC_GET_FLD,
|
||||
&val);
|
||||
if (ret < 0) {
|
||||
|
@ -521,9 +521,7 @@ static int venc_s_routing(struct v4l2_subdev *sd, u32 input, u32 output,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long venc_ioctl(struct v4l2_subdev *sd,
|
||||
unsigned int cmd,
|
||||
void *arg)
|
||||
static long venc_command(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
@ -542,7 +540,7 @@ static long venc_ioctl(struct v4l2_subdev *sd,
|
||||
}
|
||||
|
||||
static const struct v4l2_subdev_core_ops venc_core_ops = {
|
||||
.ioctl = venc_ioctl,
|
||||
.command = venc_command,
|
||||
};
|
||||
|
||||
static const struct v4l2_subdev_video_ops venc_video_ops = {
|
||||
|
@ -331,7 +331,8 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
|
||||
}
|
||||
|
||||
if (attr->query.prog_cnt != 0 && prog_ids && cnt)
|
||||
ret = bpf_prog_array_copy_to_user(progs, prog_ids, cnt);
|
||||
ret = bpf_prog_array_copy_to_user(progs, prog_ids,
|
||||
attr->query.prog_cnt);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&ir_raw_handler_lock);
|
||||
|
@ -26,6 +26,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
|
||||
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
|
||||
{
|
||||
struct dtv5100_state *st = d->priv;
|
||||
unsigned int pipe;
|
||||
u8 request;
|
||||
u8 type;
|
||||
u16 value;
|
||||
@ -34,6 +35,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
|
||||
switch (wlen) {
|
||||
case 1:
|
||||
/* write { reg }, read { value } */
|
||||
pipe = usb_rcvctrlpipe(d->udev, 0);
|
||||
request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_READ :
|
||||
DTV5100_TUNER_READ);
|
||||
type = USB_TYPE_VENDOR | USB_DIR_IN;
|
||||
@ -41,6 +43,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
|
||||
break;
|
||||
case 2:
|
||||
/* write { reg, value } */
|
||||
pipe = usb_sndctrlpipe(d->udev, 0);
|
||||
request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_WRITE :
|
||||
DTV5100_TUNER_WRITE);
|
||||
type = USB_TYPE_VENDOR | USB_DIR_OUT;
|
||||
@ -54,7 +57,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
|
||||
|
||||
memcpy(st->data, rbuf, rlen);
|
||||
msleep(1); /* avoid I2C errors */
|
||||
return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request,
|
||||
return usb_control_msg(d->udev, pipe, request,
|
||||
type, value, index, st->data, rlen,
|
||||
DTV5100_USB_TIMEOUT);
|
||||
}
|
||||
@ -141,7 +144,7 @@ static int dtv5100_probe(struct usb_interface *intf,
|
||||
|
||||
/* initialize non qt1010/zl10353 part? */
|
||||
for (i = 0; dtv5100_init[i].request; i++) {
|
||||
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
|
||||
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
|
||||
dtv5100_init[i].request,
|
||||
USB_TYPE_VENDOR | USB_DIR_OUT,
|
||||
dtv5100_init[i].value,
|
||||
|
@ -116,7 +116,7 @@ static int sq905_command(struct gspca_dev *gspca_dev, u16 index)
|
||||
}
|
||||
|
||||
ret = usb_control_msg(gspca_dev->dev,
|
||||
usb_sndctrlpipe(gspca_dev->dev, 0),
|
||||
usb_rcvctrlpipe(gspca_dev->dev, 0),
|
||||
USB_REQ_SYNCH_FRAME, /* request */
|
||||
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||
SQ905_PING, 0, gspca_dev->usb_buf, 1,
|
||||
|
@ -242,6 +242,10 @@ static void reg_r(struct gspca_dev *gspca_dev,
|
||||
gspca_err(gspca_dev, "reg_r: buffer overflow\n");
|
||||
return;
|
||||
}
|
||||
if (len == 0) {
|
||||
gspca_err(gspca_dev, "reg_r: zero-length read\n");
|
||||
return;
|
||||
}
|
||||
if (gspca_dev->usb_err < 0)
|
||||
return;
|
||||
ret = usb_control_msg(gspca_dev->dev,
|
||||
@ -250,7 +254,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
|
||||
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||
0, /* value */
|
||||
index,
|
||||
len ? gspca_dev->usb_buf : NULL, len,
|
||||
gspca_dev->usb_buf, len,
|
||||
500);
|
||||
if (ret < 0) {
|
||||
pr_err("reg_r err %d\n", ret);
|
||||
@ -727,7 +731,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
|
||||
case MegaImageVI:
|
||||
reg_w_riv(gspca_dev, 0xf0, 0, 0);
|
||||
spca504B_WaitCmdStatus(gspca_dev);
|
||||
reg_r(gspca_dev, 0xf0, 4, 0);
|
||||
reg_w_riv(gspca_dev, 0xf0, 4, 0);
|
||||
spca504B_WaitCmdStatus(gspca_dev);
|
||||
break;
|
||||
default:
|
||||
|
@ -124,10 +124,37 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
|
||||
static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
|
||||
struct uvc_streaming_control *ctrl)
|
||||
{
|
||||
static const struct usb_device_id elgato_cam_link_4k = {
|
||||
USB_DEVICE(0x0fd9, 0x0066)
|
||||
};
|
||||
struct uvc_format *format = NULL;
|
||||
struct uvc_frame *frame = NULL;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* The response of the Elgato Cam Link 4K is incorrect: The second byte
|
||||
* contains bFormatIndex (instead of being the second byte of bmHint).
|
||||
* The first byte is always zero. The third byte is always 1.
|
||||
*
|
||||
* The UVC 1.5 class specification defines the first five bits in the
|
||||
* bmHint bitfield. The remaining bits are reserved and should be zero.
|
||||
* Therefore a valid bmHint will be less than 32.
|
||||
*
|
||||
* Latest Elgato Cam Link 4K firmware as of 2021-03-23 needs this fix.
|
||||
* MCU: 20.02.19, FPGA: 67
|
||||
*/
|
||||
if (usb_match_one_id(stream->dev->intf, &elgato_cam_link_4k) &&
|
||||
ctrl->bmHint > 255) {
|
||||
u8 corrected_format_index = ctrl->bmHint >> 8;
|
||||
|
||||
/* uvc_dbg(stream->dev, VIDEO,
|
||||
"Correct USB video probe response from {bmHint: 0x%04x, bFormatIndex: %u} to {bmHint: 0x%04x, bFormatIndex: %u}\n",
|
||||
ctrl->bmHint, ctrl->bFormatIndex,
|
||||
1, corrected_format_index); */
|
||||
ctrl->bmHint = 1;
|
||||
ctrl->bFormatIndex = corrected_format_index;
|
||||
}
|
||||
|
||||
for (i = 0; i < stream->nformats; ++i) {
|
||||
if (stream->format[i].index == ctrl->bFormatIndex) {
|
||||
format = &stream->format[i];
|
||||
|
@ -1037,6 +1037,7 @@ static int zr364xx_start_readpipe(struct zr364xx_camera *cam)
|
||||
DBG("submitting URB %p\n", pipe_info->stream_urb);
|
||||
retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
|
||||
if (retval) {
|
||||
usb_free_urb(pipe_info->stream_urb);
|
||||
printk(KERN_ERR KBUILD_MODNAME ": start read pipe failed\n");
|
||||
return retval;
|
||||
}
|
||||
|
@ -953,11 +953,14 @@ int mmc_execute_tuning(struct mmc_card *card)
|
||||
|
||||
err = host->ops->execute_tuning(host, opcode);
|
||||
|
||||
if (err)
|
||||
if (err) {
|
||||
pr_err("%s: tuning execution failed: %d\n",
|
||||
mmc_hostname(host), err);
|
||||
else
|
||||
} else {
|
||||
host->retune_now = 0;
|
||||
host->need_retune = 0;
|
||||
mmc_retune_enable(host);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -793,11 +793,13 @@ try_again:
|
||||
return err;
|
||||
|
||||
/*
|
||||
* In case CCS and S18A in the response is set, start Signal Voltage
|
||||
* Switch procedure. SPI mode doesn't support CMD11.
|
||||
* In case the S18A bit is set in the response, let's start the signal
|
||||
* voltage switch procedure. SPI mode doesn't support CMD11.
|
||||
* Note that, according to the spec, the S18A bit is not valid unless
|
||||
* the CCS bit is set as well. We deliberately deviate from the spec in
|
||||
* regards to this, which allows UHS-I to be supported for SDSC cards.
|
||||
*/
|
||||
if (!mmc_host_is_spi(host) && rocr &&
|
||||
((*rocr & 0x41000000) == 0x41000000)) {
|
||||
if (!mmc_host_is_spi(host) && rocr && (*rocr & 0x01000000)) {
|
||||
err = mmc_set_uhs_voltage(host, pocr);
|
||||
if (err == -EAGAIN) {
|
||||
retries--;
|
||||
|
@ -1588,6 +1588,10 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host)
|
||||
u16 preset = 0;
|
||||
|
||||
switch (host->timing) {
|
||||
case MMC_TIMING_MMC_HS:
|
||||
case MMC_TIMING_SD_HS:
|
||||
preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
|
||||
break;
|
||||
case MMC_TIMING_UHS_SDR12:
|
||||
preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
|
||||
break;
|
||||
|
@ -261,6 +261,7 @@
|
||||
|
||||
/* 60-FB reserved */
|
||||
|
||||
#define SDHCI_PRESET_FOR_HIGH_SPEED 0x64
|
||||
#define SDHCI_PRESET_FOR_SDR12 0x66
|
||||
#define SDHCI_PRESET_FOR_SDR25 0x68
|
||||
#define SDHCI_PRESET_FOR_SDR50 0x6A
|
||||
|
@ -426,6 +426,10 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
|
||||
int id, ret;
|
||||
|
||||
pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!pres) {
|
||||
dev_err(&pdev->dev, "Invalid resource\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
memset(&res, 0, sizeof(res));
|
||||
memset(&ppd, 0, sizeof(ppd));
|
||||
|
||||
|
@ -1398,7 +1398,7 @@ static int e100_phy_check_without_mii(struct nic *nic)
|
||||
u8 phy_type;
|
||||
int without_mii;
|
||||
|
||||
phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
|
||||
phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f;
|
||||
|
||||
switch (phy_type) {
|
||||
case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
|
||||
@ -1518,7 +1518,7 @@ static int e100_phy_init(struct nic *nic)
|
||||
mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
|
||||
} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
|
||||
(mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
|
||||
(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
|
||||
(le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) {
|
||||
/* enable/disable MDI/MDI-X auto-switching. */
|
||||
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
|
||||
nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
|
||||
@ -2266,9 +2266,9 @@ static int e100_asf(struct nic *nic)
|
||||
{
|
||||
/* ASF can be enabled from eeprom */
|
||||
return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
|
||||
(nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
|
||||
!(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
|
||||
((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
|
||||
(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) &&
|
||||
!(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) &&
|
||||
((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE);
|
||||
}
|
||||
|
||||
static int e100_up(struct nic *nic)
|
||||
@ -2924,7 +2924,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
/* Wol magic packet can be enabled from eeprom */
|
||||
if ((nic->mac >= mac_82558_D101_A4) &&
|
||||
(nic->eeprom[eeprom_id] & eeprom_id_wol)) {
|
||||
(le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) {
|
||||
nic->flags |= wol_magic;
|
||||
device_set_wakeup_enable(&pdev->dev, true);
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ enum ice_aq_res_ids {
|
||||
/* FW update timeout definitions are in milliseconds */
|
||||
#define ICE_NVM_TIMEOUT 180000
|
||||
#define ICE_CHANGE_LOCK_TIMEOUT 1000
|
||||
#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
|
||||
#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 5000
|
||||
|
||||
enum ice_aq_res_access_type {
|
||||
ICE_RES_READ = 1,
|
||||
|
@ -2651,7 +2651,8 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
|
||||
}
|
||||
|
||||
input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
|
||||
input->filter.vlan_tci = match.key->vlan_priority;
|
||||
input->filter.vlan_tci =
|
||||
(__force __be16)match.key->vlan_priority;
|
||||
}
|
||||
}
|
||||
|
||||
@ -8255,7 +8256,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
|
||||
|
||||
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
|
||||
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
|
||||
vid = be16_to_cpu(rx_desc->wb.upper.vlan);
|
||||
vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
|
||||
else
|
||||
vid = le16_to_cpu(rx_desc->wb.upper.vlan);
|
||||
|
||||
|
@ -83,14 +83,14 @@ static int igbvf_desc_unused(struct igbvf_ring *ring)
|
||||
static void igbvf_receive_skb(struct igbvf_adapter *adapter,
|
||||
struct net_device *netdev,
|
||||
struct sk_buff *skb,
|
||||
u32 status, u16 vlan)
|
||||
u32 status, __le16 vlan)
|
||||
{
|
||||
u16 vid;
|
||||
|
||||
if (status & E1000_RXD_STAT_VP) {
|
||||
if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
|
||||
(status & E1000_RXDEXT_STATERR_LB))
|
||||
vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
|
||||
vid = be16_to_cpu((__force __be16)vlan) & E1000_RXD_SPC_VLAN_MASK;
|
||||
else
|
||||
vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
|
||||
if (test_bit(vid, adapter->active_vlans))
|
||||
|
@ -5740,6 +5740,10 @@ static int mvpp2_probe(struct platform_device *pdev)
|
||||
return PTR_ERR(priv->lms_base);
|
||||
} else {
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "Invalid resource\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (has_acpi_companion(&pdev->dev)) {
|
||||
/* In case the MDIO memory region is declared in
|
||||
* the ACPI, it can already appear as 'in-use'
|
||||
|
@ -1136,6 +1136,10 @@ static int ks8842_probe(struct platform_device *pdev)
|
||||
unsigned i;
|
||||
|
||||
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!iomem) {
|
||||
dev_err(&pdev->dev, "Invalid resource\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
|
||||
goto err_mem_region;
|
||||
|
||||
|
@ -107,7 +107,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
|
||||
{
|
||||
u8 *data = skb->data;
|
||||
unsigned int offset;
|
||||
u16 *hi, *id;
|
||||
u16 hi, id;
|
||||
u32 lo;
|
||||
|
||||
if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
|
||||
@ -118,14 +118,11 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
|
||||
if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
|
||||
return 0;
|
||||
|
||||
hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
|
||||
id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
|
||||
hi = get_unaligned_be16(data + offset + OFF_PTP_SOURCE_UUID + 0);
|
||||
lo = get_unaligned_be32(data + offset + OFF_PTP_SOURCE_UUID + 2);
|
||||
id = get_unaligned_be16(data + offset + OFF_PTP_SEQUENCE_ID);
|
||||
|
||||
memcpy(&lo, &hi[1], sizeof(lo));
|
||||
|
||||
return (uid_hi == *hi &&
|
||||
uid_lo == lo &&
|
||||
seqid == *id);
|
||||
return (uid_hi == hi && uid_lo == lo && seqid == id);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -135,7 +132,6 @@ pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
|
||||
struct pci_dev *pdev;
|
||||
u64 ns;
|
||||
u32 hi, lo, val;
|
||||
u16 uid, seq;
|
||||
|
||||
if (!adapter->hwts_rx_en)
|
||||
return;
|
||||
@ -151,10 +147,7 @@ pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
|
||||
lo = pch_src_uuid_lo_read(pdev);
|
||||
hi = pch_src_uuid_hi_read(pdev);
|
||||
|
||||
uid = hi & 0xffff;
|
||||
seq = (hi >> 16) & 0xffff;
|
||||
|
||||
if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
|
||||
if (!pch_ptp_match(skb, hi, lo, hi >> 16))
|
||||
goto out;
|
||||
|
||||
ns = pch_rx_snap_read(pdev);
|
||||
|
@ -5190,7 +5190,6 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
|
||||
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
|
||||
|
||||
rtl_pcie_state_l2l3_disable(tp);
|
||||
rtl_hw_aspm_clkreq_enable(tp, true);
|
||||
}
|
||||
|
||||
DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
|
||||
|
@ -403,12 +403,17 @@ fail1:
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Disable SRIOV and remove VFs
|
||||
* If some VFs are attached to a guest (using Xen, only) nothing is
|
||||
* done if force=false, and vports are freed if force=true (for the non
|
||||
* attachedc ones, only) but SRIOV is not disabled and VFs are not
|
||||
* removed in either case.
|
||||
*/
|
||||
static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
|
||||
{
|
||||
struct pci_dev *dev = efx->pci_dev;
|
||||
unsigned int vfs_assigned = 0;
|
||||
|
||||
vfs_assigned = pci_vfs_assigned(dev);
|
||||
unsigned int vfs_assigned = pci_vfs_assigned(dev);
|
||||
int rc = 0;
|
||||
|
||||
if (vfs_assigned && !force) {
|
||||
netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
|
||||
@ -418,10 +423,12 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
|
||||
|
||||
if (!vfs_assigned)
|
||||
pci_disable_sriov(dev);
|
||||
else
|
||||
rc = -EBUSY;
|
||||
|
||||
efx_ef10_sriov_free_vf_vswitching(efx);
|
||||
efx->vf_count = 0;
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs)
|
||||
@ -440,7 +447,6 @@ int efx_ef10_sriov_init(struct efx_nic *efx)
|
||||
void efx_ef10_sriov_fini(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
||||
unsigned int i;
|
||||
int rc;
|
||||
|
||||
if (!nic_data->vf) {
|
||||
@ -450,14 +456,7 @@ void efx_ef10_sriov_fini(struct efx_nic *efx)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Remove any VFs in the host */
|
||||
for (i = 0; i < efx->vf_count; ++i) {
|
||||
struct efx_nic *vf_efx = nic_data->vf[i].efx;
|
||||
|
||||
if (vf_efx)
|
||||
vf_efx->pci_dev->driver->remove(vf_efx->pci_dev);
|
||||
}
|
||||
|
||||
/* Disable SRIOV and remove any VFs in the host */
|
||||
rc = efx_ef10_pci_sriov_disable(efx, true);
|
||||
if (rc)
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
|
@ -1262,6 +1262,10 @@ static int fjes_probe(struct platform_device *plat_dev)
|
||||
adapter->interrupt_watch_enable = false;
|
||||
|
||||
res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
err = -EINVAL;
|
||||
goto err_free_control_wq;
|
||||
}
|
||||
hw->hw_res.start = res->start;
|
||||
hw->hw_res.size = resource_size(res);
|
||||
hw->hw_res.irq = platform_get_irq(plat_dev, 0);
|
||||
|
@ -1548,7 +1548,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
|
||||
if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
|
||||
virtio_is_little_endian(vi->vdev), false,
|
||||
0))
|
||||
BUG();
|
||||
return -EPROTO;
|
||||
|
||||
if (vi->mergeable_rx_bufs)
|
||||
hdr->num_buffers = 0;
|
||||
|
@ -3725,6 +3725,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct cfg80211_chan_def chandef;
|
||||
struct iwl_mvm_phy_ctxt *phy_ctxt;
|
||||
bool band_change_removal;
|
||||
int ret, i;
|
||||
|
||||
IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
|
||||
@ -3794,19 +3795,30 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
|
||||
cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
|
||||
|
||||
/*
|
||||
* Change the PHY context configuration as it is currently referenced
|
||||
* only by the P2P Device MAC
|
||||
* Check if the remain-on-channel is on a different band and that
|
||||
* requires context removal, see iwl_mvm_phy_ctxt_changed(). If
|
||||
* so, we'll need to release and then re-configure here, since we
|
||||
* must not remove a PHY context that's part of a binding.
|
||||
*/
|
||||
if (mvmvif->phy_ctxt->ref == 1) {
|
||||
band_change_removal =
|
||||
fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
|
||||
mvmvif->phy_ctxt->channel->band != chandef.chan->band;
|
||||
|
||||
if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) {
|
||||
/*
|
||||
* Change the PHY context configuration as it is currently
|
||||
* referenced only by the P2P Device MAC (and we can modify it)
|
||||
*/
|
||||
ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
|
||||
&chandef, 1, 1);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
} else {
|
||||
/*
|
||||
* The PHY context is shared with other MACs. Need to remove the
|
||||
* P2P Device from the binding, allocate an new PHY context and
|
||||
* create a new binding
|
||||
* The PHY context is shared with other MACs (or we're trying to
|
||||
* switch bands), so remove the P2P Device from the binding,
|
||||
* allocate an new PHY context and create a new binding.
|
||||
*/
|
||||
phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
|
||||
if (!phy_ctxt) {
|
||||
|
@ -63,7 +63,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
||||
struct iwl_prph_scratch *prph_scratch;
|
||||
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
|
||||
struct iwl_prph_info *prph_info;
|
||||
void *iml_img;
|
||||
u32 control_flags = 0;
|
||||
int ret;
|
||||
int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
|
||||
@ -162,14 +161,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
||||
trans_pcie->prph_scratch = prph_scratch;
|
||||
|
||||
/* Allocate IML */
|
||||
iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
|
||||
&trans_pcie->iml_dma_addr, GFP_KERNEL);
|
||||
if (!iml_img) {
|
||||
trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
|
||||
&trans_pcie->iml_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!trans_pcie->iml) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_ctxt_info;
|
||||
}
|
||||
|
||||
memcpy(iml_img, trans->iml, trans->iml_len);
|
||||
memcpy(trans_pcie->iml, trans->iml, trans->iml_len);
|
||||
|
||||
iwl_enable_fw_load_int_ctx_info(trans);
|
||||
|
||||
@ -242,6 +242,11 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
|
||||
trans_pcie->ctxt_info_dma_addr = 0;
|
||||
trans_pcie->ctxt_info_gen3 = NULL;
|
||||
|
||||
dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
|
||||
trans_pcie->iml_dma_addr);
|
||||
trans_pcie->iml_dma_addr = 0;
|
||||
trans_pcie->iml = NULL;
|
||||
|
||||
iwl_pcie_ctxt_info_free_fw_img(trans);
|
||||
|
||||
dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
|
||||
|
@ -475,6 +475,8 @@ struct cont_rec {
|
||||
* Context information addresses will be taken from here.
|
||||
* This is driver's local copy for keeping track of size and
|
||||
* count for allocating and freeing the memory.
|
||||
* @iml: image loader image virtual address
|
||||
* @iml_dma_addr: image loader image DMA address
|
||||
* @trans: pointer to the generic transport area
|
||||
* @scd_base_addr: scheduler sram base address in SRAM
|
||||
* @scd_bc_tbls: pointer to the byte count table of the scheduler
|
||||
@ -522,6 +524,7 @@ struct iwl_trans_pcie {
|
||||
};
|
||||
struct iwl_prph_info *prph_info;
|
||||
struct iwl_prph_scratch *prph_scratch;
|
||||
void *iml;
|
||||
dma_addr_t ctxt_info_dma_addr;
|
||||
dma_addr_t prph_info_dma_addr;
|
||||
dma_addr_t prph_scratch_dma_addr;
|
||||
|
@ -269,7 +269,8 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
|
||||
/* now that we got alive we can free the fw image & the context info.
|
||||
* paging memory cannot be freed included since FW will still use it
|
||||
*/
|
||||
iwl_pcie_ctxt_info_free(trans);
|
||||
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
|
||||
iwl_pcie_ctxt_info_free(trans);
|
||||
|
||||
/*
|
||||
* Re-enable all the interrupts, including the RF-Kill one, now that
|
||||
|
@ -840,22 +840,20 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
|
||||
int first_idx = 0, last_idx;
|
||||
int i, idx, count;
|
||||
bool fixed_rate, ack_timeout;
|
||||
bool probe, ampdu, cck = false;
|
||||
bool ampdu, cck = false;
|
||||
bool rs_idx;
|
||||
u32 rate_set_tsf;
|
||||
u32 final_rate, final_rate_flags, final_nss, txs;
|
||||
|
||||
fixed_rate = info->status.rates[0].count;
|
||||
probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
|
||||
|
||||
txs = le32_to_cpu(txs_data[1]);
|
||||
ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
|
||||
ampdu = txs & MT_TXS1_AMPDU;
|
||||
|
||||
txs = le32_to_cpu(txs_data[3]);
|
||||
count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
|
||||
last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);
|
||||
|
||||
txs = le32_to_cpu(txs_data[0]);
|
||||
fixed_rate = txs & MT_TXS0_FIXED_RATE;
|
||||
final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
|
||||
ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
|
||||
|
||||
@ -877,7 +875,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
|
||||
|
||||
first_idx = max_t(int, 0, last_idx - (count + 1) / MT7615_RATE_RETRY);
|
||||
|
||||
if (fixed_rate && !probe) {
|
||||
if (fixed_rate) {
|
||||
info->status.rates[0].count = count;
|
||||
i = 0;
|
||||
goto out;
|
||||
|
@ -853,15 +853,10 @@ struct rtl8192eu_efuse {
|
||||
u8 usb_optional_function;
|
||||
u8 res9[2];
|
||||
u8 mac_addr[ETH_ALEN]; /* 0xd7 */
|
||||
u8 res10[2];
|
||||
u8 vendor_name[7];
|
||||
u8 res11[2];
|
||||
u8 device_name[0x0b]; /* 0xe8 */
|
||||
u8 res12[2];
|
||||
u8 serial[0x0b]; /* 0xf5 */
|
||||
u8 res13[0x30];
|
||||
u8 device_info[80];
|
||||
u8 res11[3];
|
||||
u8 unknown[0x0d]; /* 0x130 */
|
||||
u8 res14[0xc3];
|
||||
u8 res12[0xc3];
|
||||
};
|
||||
|
||||
struct rtl8xxxu_reg8val {
|
||||
|
@ -554,9 +554,43 @@ rtl8192e_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
|
||||
}
|
||||
}
|
||||
|
||||
static void rtl8192eu_log_next_device_info(struct rtl8xxxu_priv *priv,
|
||||
char *record_name,
|
||||
char *device_info,
|
||||
unsigned int *record_offset)
|
||||
{
|
||||
char *record = device_info + *record_offset;
|
||||
|
||||
/* A record is [ total length | 0x03 | value ] */
|
||||
unsigned char l = record[0];
|
||||
|
||||
/*
|
||||
* The whole device info section seems to be 80 characters, make sure
|
||||
* we don't read further.
|
||||
*/
|
||||
if (*record_offset + l > 80) {
|
||||
dev_warn(&priv->udev->dev,
|
||||
"invalid record length %d while parsing \"%s\" at offset %u.\n",
|
||||
l, record_name, *record_offset);
|
||||
return;
|
||||
}
|
||||
|
||||
if (l >= 2) {
|
||||
char value[80];
|
||||
|
||||
memcpy(value, &record[2], l - 2);
|
||||
value[l - 2] = '\0';
|
||||
dev_info(&priv->udev->dev, "%s: %s\n", record_name, value);
|
||||
*record_offset = *record_offset + l;
|
||||
} else {
|
||||
dev_info(&priv->udev->dev, "%s not available.\n", record_name);
|
||||
}
|
||||
}
|
||||
|
||||
static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
|
||||
{
|
||||
struct rtl8192eu_efuse *efuse = &priv->efuse_wifi.efuse8192eu;
|
||||
unsigned int record_offset;
|
||||
int i;
|
||||
|
||||
if (efuse->rtl_id != cpu_to_le16(0x8129))
|
||||
@ -604,12 +638,25 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
|
||||
priv->has_xtalk = 1;
|
||||
priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;
|
||||
|
||||
dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
|
||||
dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name);
|
||||
if (memchr_inv(efuse->serial, 0xff, 11))
|
||||
dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial);
|
||||
else
|
||||
dev_info(&priv->udev->dev, "Serial not available.\n");
|
||||
/*
|
||||
* device_info section seems to be laid out as records
|
||||
* [ total length | 0x03 | value ] so:
|
||||
* - vendor length + 2
|
||||
* - 0x03
|
||||
* - vendor string (not null terminated)
|
||||
* - product length + 2
|
||||
* - 0x03
|
||||
* - product string (not null terminated)
|
||||
* Then there is one or 2 0x00 on all the 4 devices I own or found
|
||||
* dumped online.
|
||||
* As previous version of the code handled an optional serial
|
||||
* string, I now assume there may be a third record if the
|
||||
* length is not 0.
|
||||
*/
|
||||
record_offset = 0;
|
||||
rtl8192eu_log_next_device_info(priv, "Vendor", efuse->device_info, &record_offset);
|
||||
rtl8192eu_log_next_device_info(priv, "Product", efuse->device_info, &record_offset);
|
||||
rtl8192eu_log_next_device_info(priv, "Serial", efuse->device_info, &record_offset);
|
||||
|
||||
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
|
||||
unsigned char *raw = priv->efuse_wifi.raw;
|
||||
|
@ -60,6 +60,7 @@ static const struct sdio_device_id cw1200_sdio_ids[] = {
|
||||
{ SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) },
|
||||
{ /* end: all zeroes */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(sdio, cw1200_sdio_ids);
|
||||
|
||||
/* hwbus_ops implemetation */
|
||||
|
||||
|
@ -466,9 +466,12 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
|
||||
cmd->channels[i].channel = channels[i]->hw_value;
|
||||
}
|
||||
|
||||
cmd->params.ssid_len = ssid_len;
|
||||
if (ssid)
|
||||
memcpy(cmd->params.ssid, ssid, ssid_len);
|
||||
if (ssid) {
|
||||
int len = clamp_val(ssid_len, 0, IEEE80211_MAX_SSID_LEN);
|
||||
|
||||
cmd->params.ssid_len = len;
|
||||
memcpy(cmd->params.ssid, ssid, len);
|
||||
}
|
||||
|
||||
ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
|
||||
if (ret < 0) {
|
||||
|
@ -1503,6 +1503,13 @@ static int wl12xx_get_fuse_mac(struct wl1271 *wl)
|
||||
u32 mac1, mac2;
|
||||
int ret;
|
||||
|
||||
/* Device may be in ELP from the bootloader or kexec */
|
||||
ret = wlcore_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
usleep_range(500000, 700000);
|
||||
|
||||
ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
@ -318,15 +318,17 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
|
||||
continue;
|
||||
if (len < 2 * sizeof(u32)) {
|
||||
dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
|
||||
of_node_put(child);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cell = kzalloc(sizeof(*cell), GFP_KERNEL);
|
||||
if (!cell)
|
||||
if (!cell) {
|
||||
of_node_put(child);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cell->nvmem = nvmem;
|
||||
cell->np = of_node_get(child);
|
||||
cell->offset = be32_to_cpup(addr++);
|
||||
cell->bytes = be32_to_cpup(addr);
|
||||
cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
|
||||
@ -347,11 +349,12 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
|
||||
cell->name, nvmem->stride);
|
||||
/* Cells already added will be freed later. */
|
||||
kfree_const(cell->name);
|
||||
of_node_put(cell->np);
|
||||
kfree(cell);
|
||||
of_node_put(child);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cell->np = of_node_get(child);
|
||||
nvmem_cell_add(cell);
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,7 @@
|
||||
#define PIO_COMPLETION_STATUS_UR 1
|
||||
#define PIO_COMPLETION_STATUS_CRS 2
|
||||
#define PIO_COMPLETION_STATUS_CA 4
|
||||
#define PIO_NON_POSTED_REQ BIT(0)
|
||||
#define PIO_NON_POSTED_REQ BIT(10)
|
||||
#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
|
||||
#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
|
||||
#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
|
||||
@ -127,6 +127,7 @@
|
||||
#define LTSSM_MASK 0x3f
|
||||
#define LTSSM_L0 0x10
|
||||
#define RC_BAR_CONFIG 0x300
|
||||
#define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44)
|
||||
|
||||
/* PCIe core controller registers */
|
||||
#define CTRL_CORE_BASE_ADDR 0x18000
|
||||
@ -268,6 +269,16 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
|
||||
reg |= (IS_RC_MSK << IS_RC_SHIFT);
|
||||
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
|
||||
|
||||
/*
|
||||
* Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab.
|
||||
* VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor
|
||||
* id in high 16 bits. Updating this register changes readback value of
|
||||
* read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround
|
||||
* for erratum 4.1: "The value of device and vendor ID is incorrect".
|
||||
*/
|
||||
reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
|
||||
advk_writel(pcie, reg, VENDOR_ID_REG);
|
||||
|
||||
/* Set Advanced Error Capabilities and Control PF0 register */
|
||||
reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
|
||||
PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <linux/nvme.h>
|
||||
#include <linux/platform_data/x86/apple.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/switchtec.h>
|
||||
#include <asm/dma.h> /* isa_dma_bridge_buggy */
|
||||
#include "pci.h"
|
||||
@ -3667,6 +3668,16 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
|
||||
return;
|
||||
if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
|
||||
return;
|
||||
|
||||
/*
|
||||
* SXIO/SXFP/SXLF turns off power to the Thunderbolt controller.
|
||||
* We don't know how to turn it back on again, but firmware does,
|
||||
* so we can only use SXIO/SXFP/SXLF if we're suspending via
|
||||
* firmware.
|
||||
*/
|
||||
if (!pm_suspend_via_firmware())
|
||||
return;
|
||||
|
||||
bridge = ACPI_HANDLE(&dev->dev);
|
||||
if (!bridge)
|
||||
return;
|
||||
|
@ -958,6 +958,7 @@ static int amd_gpio_remove(struct platform_device *pdev)
|
||||
static const struct acpi_device_id amd_gpio_acpi_match[] = {
|
||||
{ "AMD0030", 0 },
|
||||
{ "AMDI0030", 0},
|
||||
{ "AMDI0031", 0},
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, amd_gpio_acpi_match);
|
||||
|
@ -459,6 +459,11 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
|
||||
if (mcp_read(mcp, MCP_INTF, &intf))
|
||||
goto unlock;
|
||||
|
||||
if (intf == 0) {
|
||||
/* There is no interrupt pending */
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (mcp_read(mcp, MCP_INTCAP, &intcap))
|
||||
goto unlock;
|
||||
|
||||
@ -476,11 +481,6 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
|
||||
mcp->cached_gpio = gpio;
|
||||
mutex_unlock(&mcp->lock);
|
||||
|
||||
if (intf == 0) {
|
||||
/* There is no interrupt pending */
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
dev_dbg(mcp->chip.parent,
|
||||
"intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n",
|
||||
intcap, intf, gpio_orig, gpio);
|
||||
|
@ -150,24 +150,27 @@ static ssize_t tcc_offset_degree_celsius_show(struct device *dev,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
val = (val >> 24) & 0xff;
|
||||
val = (val >> 24) & 0x3f;
|
||||
return sprintf(buf, "%d\n", (int)val);
|
||||
}
|
||||
|
||||
static int tcc_offset_update(int tcc)
|
||||
static int tcc_offset_update(unsigned int tcc)
|
||||
{
|
||||
u64 val;
|
||||
int err;
|
||||
|
||||
if (!tcc)
|
||||
if (tcc > 63)
|
||||
return -EINVAL;
|
||||
|
||||
err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
val &= ~GENMASK_ULL(31, 24);
|
||||
val |= (tcc & 0xff) << 24;
|
||||
if (val & BIT(31))
|
||||
return -EPERM;
|
||||
|
||||
val &= ~GENMASK_ULL(29, 24);
|
||||
val |= (tcc & 0x3f) << 24;
|
||||
|
||||
err = wrmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, val);
|
||||
if (err)
|
||||
@ -176,14 +179,15 @@ static int tcc_offset_update(int tcc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tcc_offset_save;
|
||||
static unsigned int tcc_offset_save;
|
||||
|
||||
static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
unsigned int tcc;
|
||||
u64 val;
|
||||
int tcc, err;
|
||||
int err;
|
||||
|
||||
err = rdmsrl_safe(MSR_PLATFORM_INFO, &val);
|
||||
if (err)
|
||||
@ -192,7 +196,7 @@ static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
|
||||
if (!(val & BIT(30)))
|
||||
return -EACCES;
|
||||
|
||||
if (kstrtoint(buf, 0, &tcc))
|
||||
if (kstrtouint(buf, 0, &tcc))
|
||||
return -EINVAL;
|
||||
|
||||
err = tcc_offset_update(tcc);
|
||||
|
@ -379,13 +379,8 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode,
|
||||
offsetof(struct fscrypt_nokey_name, sha256));
|
||||
BUILD_BUG_ON(BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX) > NAME_MAX);
|
||||
|
||||
if (hash) {
|
||||
nokey_name.dirhash[0] = hash;
|
||||
nokey_name.dirhash[1] = minor_hash;
|
||||
} else {
|
||||
nokey_name.dirhash[0] = 0;
|
||||
nokey_name.dirhash[1] = 0;
|
||||
}
|
||||
nokey_name.dirhash[0] = hash;
|
||||
nokey_name.dirhash[1] = minor_hash;
|
||||
if (iname->len <= sizeof(nokey_name.bytes)) {
|
||||
memcpy(nokey_name.bytes, iname->name, iname->len);
|
||||
size = offsetof(struct fscrypt_nokey_name, bytes[iname->len]);
|
||||
|
@ -151,7 +151,8 @@ void jfs_evict_inode(struct inode *inode)
|
||||
if (test_cflag(COMMIT_Freewmap, inode))
|
||||
jfs_free_zero_link(inode);
|
||||
|
||||
diFree(inode);
|
||||
if (JFS_SBI(inode->i_sb)->ipimap)
|
||||
diFree(inode);
|
||||
|
||||
/*
|
||||
* Free the inode from the quota allocation.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user