Merge 5.4.156 into android11-5.4-lts

Changes in 5.4.156
	parisc: math-emu: Fix fall-through warnings
	net: switchdev: do not propagate bridge updates across bridges
	tee: optee: Fix missing devices unregister during optee_remove
	ARM: dts: at91: sama5d2_som1_ek: disable ISC node by default
	xtensa: xtfpga: use CONFIG_USE_OF instead of CONFIG_OF
	xtensa: xtfpga: Try software restart before simulating CPU reset
	NFSD: Keep existing listeners on portlist error
	dma-debug: fix sg checks in debug_dma_map_sg()
	ASoC: wm8960: Fix clock configuration on slave mode
	netfilter: ipvs: make global sysctl readonly in non-init netns
	lan78xx: select CRC32
	net: dsa: lantiq_gswip: fix register definition
	NIOS2: irqflags: rename a redefined register name
	net: hns3: reset DWRR of unused tc to zero
	net: hns3: add limit ets dwrr bandwidth cannot be 0
	net: hns3: disable sriov before unload hclge layer
	net: stmmac: Fix E2E delay mechanism
	net: enetc: fix ethtool counter name for PM0_TERR
	can: rcar_can: fix suspend/resume
	can: peak_usb: pcan_usb_fd_decode_status(): fix back to ERROR_ACTIVE state notification
	can: peak_pci: peak_pci_remove(): fix UAF
	can: j1939: j1939_tp_rxtimer(): fix errant alert in j1939_tp_rxtimer
	can: j1939: j1939_netdev_start(): fix UAF for rx_kref of j1939_priv
	can: j1939: j1939_xtp_rx_dat_one(): cancel session if receive TP.DT with error length
	can: j1939: j1939_xtp_rx_rts_session_new(): abort TP less than 9 bytes
	ceph: fix handling of "meta" errors
	ocfs2: fix data corruption after conversion from inline format
	ocfs2: mount fails with buffer overflow in strlen
	elfcore: correct reference to CONFIG_UML
	vfs: check fd has read access in kernel_read_file_from_fd()
	ALSA: usb-audio: Provide quirk for Sennheiser GSP670 Headset
	ALSA: hda/realtek: Add quirk for Clevo PC50HS
	ASoC: DAPM: Fix missing kctl change notifications
	audit: fix possible null-pointer dereference in audit_filter_rules
	powerpc64/idle: Fix SP offsets when saving GPRs
	KVM: PPC: Book3S HV: Fix stack handling in idle_kvm_start_guest()
	KVM: PPC: Book3S HV: Make idle_kvm_start_guest() return 0 if it went to guest
	powerpc/idle: Don't corrupt back chain when going idle
	mm, slub: fix mismatch between reconstructed freelist depth and cnt
	mm, slub: fix potential memoryleak in kmem_cache_open()
	nfc: nci: fix the UAF of rf_conn_info object
	isdn: cpai: check ctr->cnr to avoid array index out of bound
	netfilter: Kconfig: use 'default y' instead of 'm' for bool config option
	selftests: netfilter: remove stray bash debug line
	gcc-plugins/structleak: add makefile var for disabling structleak
	btrfs: deal with errors when checking if a dir entry exists during log replay
	net: stmmac: add support for dwmac 3.40a
	ARM: dts: spear3xx: Fix gmac node
	isdn: mISDN: Fix sleeping function called from invalid context
	platform/x86: intel_scu_ipc: Update timeout value in comment
	ALSA: hda: avoid write to STATESTS if controller is in reset
	Input: snvs_pwrkey - add clk handling
	scsi: core: Fix shost->cmd_per_lun calculation in scsi_add_host_with_dma()
	net: mdiobus: Fix memory leak in __mdiobus_register
	tracing: Have all levels of checks prevent recursion
	ARM: 9122/1: select HAVE_FUTEX_CMPXCHG
	pinctrl: stm32: use valid pin identifier in stm32_pinctrl_resume()
	Linux 5.4.156

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ieddfb50beffee79c5ff9e9fc1d3241aa754929d0
This commit is contained in:
Greg Kroah-Hartman 2021-10-27 10:11:04 +02:00
commit 5f1f361447
65 changed files with 490 additions and 279 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 155
SUBLEVEL = 156
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -85,6 +85,7 @@ config ARM
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && (CC_IS_GCC || CLANG_VERSION >= 100000)
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_IDE if PCI || ISA || PCMCIA

View File

@ -69,7 +69,6 @@
isc: isc@f0008000 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_isc_base &pinctrl_isc_data_8bit &pinctrl_isc_data_9_10 &pinctrl_isc_data_11_12>;
status = "okay";
};
qspi1: spi@f0024000 {

View File

@ -47,7 +47,7 @@
};
gmac: eth@e0800000 {
compatible = "st,spear600-gmac";
compatible = "snps,dwmac-3.40a";
reg = <0xe0800000 0x8000>;
interrupts = <23 22>;
interrupt-names = "macirq", "eth_wake_irq";

View File

@ -9,7 +9,7 @@
static inline unsigned long arch_local_save_flags(void)
{
return RDCTL(CTL_STATUS);
return RDCTL(CTL_FSTATUS);
}
/*
@ -18,7 +18,7 @@ static inline unsigned long arch_local_save_flags(void)
*/
static inline void arch_local_irq_restore(unsigned long flags)
{
WRCTL(CTL_STATUS, flags);
WRCTL(CTL_FSTATUS, flags);
}
static inline void arch_local_irq_disable(void)

View File

@ -11,7 +11,7 @@
#endif
/* control register numbers */
#define CTL_STATUS 0
#define CTL_FSTATUS 0
#define CTL_ESTATUS 1
#define CTL_BSTATUS 2
#define CTL_IENABLE 3

View File

@ -310,12 +310,15 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
r1 &= ~3;
fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
fpregs[t] = fpregs[r1];
return(NOEXCEPTION);
}
BUG();
case 3: /* FABS */
switch (fmt) {
case 2: /* illegal */
@ -325,13 +328,16 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
r1 &= ~3;
fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
/* copy and clear sign bit */
fpregs[t] = fpregs[r1] & 0x7fffffff;
return(NOEXCEPTION);
}
BUG();
case 6: /* FNEG */
switch (fmt) {
case 2: /* illegal */
@ -341,13 +347,16 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
r1 &= ~3;
fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
/* copy and invert sign bit */
fpregs[t] = fpregs[r1] ^ 0x80000000;
return(NOEXCEPTION);
}
BUG();
case 7: /* FNEGABS */
switch (fmt) {
case 2: /* illegal */
@ -357,13 +366,16 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
r1 &= ~3;
fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
/* copy and set sign bit */
fpregs[t] = fpregs[r1] | 0x80000000;
return(NOEXCEPTION);
}
BUG();
case 4: /* FSQRT */
switch (fmt) {
case 0:
@ -376,6 +388,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 5: /* FRND */
switch (fmt) {
case 0:
@ -389,7 +402,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(MAJOR_0C_EXCP);
}
} /* end of switch (subop) */
BUG();
case 1: /* class 1 */
df = extru(ir,fpdfpos,2); /* get dest format */
if ((df & 2) || (fmt & 2)) {
@ -419,6 +432,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* dbl/dbl */
return(MAJOR_0C_EXCP);
}
BUG();
case 1: /* FCNVXF */
switch(fmt) {
case 0: /* sgl/sgl */
@ -434,6 +448,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvxf(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 2: /* FCNVFX */
switch(fmt) {
case 0: /* sgl/sgl */
@ -449,6 +464,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvfx(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 3: /* FCNVFXT */
switch(fmt) {
case 0: /* sgl/sgl */
@ -464,6 +480,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 5: /* FCNVUF (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
@ -479,6 +496,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvuf(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 6: /* FCNVFU (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
@ -494,6 +512,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvfu(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 7: /* FCNVFUT (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
@ -509,10 +528,11 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvfut(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 4: /* undefined */
return(MAJOR_0C_EXCP);
} /* end of switch subop */
BUG();
case 2: /* class 2 */
fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS];
r2 = extru(ir, fpr2pos, 5) * sizeof(double)/sizeof(u_int);
@ -590,6 +610,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 1: /* FTEST */
switch (fmt) {
case 0:
@ -609,8 +630,10 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3:
return(MAJOR_0C_EXCP);
}
BUG();
} /* end of switch subop */
} /* end of else for PA1.0 & PA1.1 */
BUG();
case 3: /* class 3 */
r2 = extru(ir,fpr2pos,5) * sizeof(double)/sizeof(u_int);
if (r2 == 0)
@ -633,6 +656,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 1: /* FSUB */
switch (fmt) {
case 0:
@ -645,6 +669,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 2: /* FMPY */
switch (fmt) {
case 0:
@ -657,6 +682,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 3: /* FDIV */
switch (fmt) {
case 0:
@ -669,6 +695,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 4: /* FREM */
switch (fmt) {
case 0:
@ -681,6 +708,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
} /* end of class 3 switch */
} /* end of switch(class) */
@ -736,10 +764,12 @@ u_int fpregs[];
return(MAJOR_0E_EXCP);
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
fpregs[t] = fpregs[r1];
return(NOEXCEPTION);
}
BUG();
case 3: /* FABS */
switch (fmt) {
case 2:
@ -747,10 +777,12 @@ u_int fpregs[];
return(MAJOR_0E_EXCP);
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
fpregs[t] = fpregs[r1] & 0x7fffffff;
return(NOEXCEPTION);
}
BUG();
case 6: /* FNEG */
switch (fmt) {
case 2:
@ -758,10 +790,12 @@ u_int fpregs[];
return(MAJOR_0E_EXCP);
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
fpregs[t] = fpregs[r1] ^ 0x80000000;
return(NOEXCEPTION);
}
BUG();
case 7: /* FNEGABS */
switch (fmt) {
case 2:
@ -769,10 +803,12 @@ u_int fpregs[];
return(MAJOR_0E_EXCP);
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
fpregs[t] = fpregs[r1] | 0x80000000;
return(NOEXCEPTION);
}
BUG();
case 4: /* FSQRT */
switch (fmt) {
case 0:
@ -785,6 +821,7 @@ u_int fpregs[];
case 3:
return(MAJOR_0E_EXCP);
}
BUG();
case 5: /* FRMD */
switch (fmt) {
case 0:
@ -798,7 +835,7 @@ u_int fpregs[];
return(MAJOR_0E_EXCP);
}
} /* end of switch (subop */
BUG();
case 1: /* class 1 */
df = extru(ir,fpdfpos,2); /* get dest format */
/*
@ -826,6 +863,7 @@ u_int fpregs[];
case 3: /* dbl/dbl */
return(MAJOR_0E_EXCP);
}
BUG();
case 1: /* FCNVXF */
switch(fmt) {
case 0: /* sgl/sgl */
@ -841,6 +879,7 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvxf(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 2: /* FCNVFX */
switch(fmt) {
case 0: /* sgl/sgl */
@ -856,6 +895,7 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvfx(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 3: /* FCNVFXT */
switch(fmt) {
case 0: /* sgl/sgl */
@ -871,6 +911,7 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 5: /* FCNVUF (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
@ -886,6 +927,7 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvuf(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 6: /* FCNVFU (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
@ -901,6 +943,7 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvfu(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 7: /* FCNVFUT (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
@ -916,9 +959,11 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvfut(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 4: /* undefined */
return(MAJOR_0C_EXCP);
} /* end of switch subop */
BUG();
case 2: /* class 2 */
/*
* Be careful out there.
@ -994,6 +1039,7 @@ u_int fpregs[];
}
} /* end of switch subop */
} /* end of else for PA1.0 & PA1.1 */
BUG();
case 3: /* class 3 */
/*
* Be careful out there.
@ -1026,6 +1072,7 @@ u_int fpregs[];
return(dbl_fadd(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
}
BUG();
case 1: /* FSUB */
switch (fmt) {
case 0:
@ -1035,6 +1082,7 @@ u_int fpregs[];
return(dbl_fsub(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
}
BUG();
case 2: /* FMPY or XMPYU */
/*
* check for integer multiply (x bit set)
@ -1071,6 +1119,7 @@ u_int fpregs[];
&fpregs[r2],&fpregs[t],status));
}
}
BUG();
case 3: /* FDIV */
switch (fmt) {
case 0:
@ -1080,6 +1129,7 @@ u_int fpregs[];
return(dbl_fdiv(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
}
BUG();
case 4: /* FREM */
switch (fmt) {
case 0:

View File

@ -50,28 +50,32 @@ _GLOBAL(isa300_idle_stop_mayloss)
std r1,PACAR1(r13)
mflr r4
mfcr r5
/* use stack red zone rather than a new frame for saving regs */
std r2,-8*0(r1)
std r14,-8*1(r1)
std r15,-8*2(r1)
std r16,-8*3(r1)
std r17,-8*4(r1)
std r18,-8*5(r1)
std r19,-8*6(r1)
std r20,-8*7(r1)
std r21,-8*8(r1)
std r22,-8*9(r1)
std r23,-8*10(r1)
std r24,-8*11(r1)
std r25,-8*12(r1)
std r26,-8*13(r1)
std r27,-8*14(r1)
std r28,-8*15(r1)
std r29,-8*16(r1)
std r30,-8*17(r1)
std r31,-8*18(r1)
std r4,-8*19(r1)
std r5,-8*20(r1)
/*
* Use the stack red zone rather than a new frame for saving regs since
* in the case of no GPR loss the wakeup code branches directly back to
* the caller without deallocating the stack frame first.
*/
std r2,-8*1(r1)
std r14,-8*2(r1)
std r15,-8*3(r1)
std r16,-8*4(r1)
std r17,-8*5(r1)
std r18,-8*6(r1)
std r19,-8*7(r1)
std r20,-8*8(r1)
std r21,-8*9(r1)
std r22,-8*10(r1)
std r23,-8*11(r1)
std r24,-8*12(r1)
std r25,-8*13(r1)
std r26,-8*14(r1)
std r27,-8*15(r1)
std r28,-8*16(r1)
std r29,-8*17(r1)
std r30,-8*18(r1)
std r31,-8*19(r1)
std r4,-8*20(r1)
std r5,-8*21(r1)
/* 168 bytes */
PPC_STOP
b . /* catch bugs */
@ -87,8 +91,8 @@ _GLOBAL(isa300_idle_stop_mayloss)
*/
_GLOBAL(idle_return_gpr_loss)
ld r1,PACAR1(r13)
ld r4,-8*19(r1)
ld r5,-8*20(r1)
ld r4,-8*20(r1)
ld r5,-8*21(r1)
mtlr r4
mtcr r5
/*
@ -96,38 +100,40 @@ _GLOBAL(idle_return_gpr_loss)
* from PACATOC. This could be avoided for that less common case
* if KVM saved its r2.
*/
ld r2,-8*0(r1)
ld r14,-8*1(r1)
ld r15,-8*2(r1)
ld r16,-8*3(r1)
ld r17,-8*4(r1)
ld r18,-8*5(r1)
ld r19,-8*6(r1)
ld r20,-8*7(r1)
ld r21,-8*8(r1)
ld r22,-8*9(r1)
ld r23,-8*10(r1)
ld r24,-8*11(r1)
ld r25,-8*12(r1)
ld r26,-8*13(r1)
ld r27,-8*14(r1)
ld r28,-8*15(r1)
ld r29,-8*16(r1)
ld r30,-8*17(r1)
ld r31,-8*18(r1)
ld r2,-8*1(r1)
ld r14,-8*2(r1)
ld r15,-8*3(r1)
ld r16,-8*4(r1)
ld r17,-8*5(r1)
ld r18,-8*6(r1)
ld r19,-8*7(r1)
ld r20,-8*8(r1)
ld r21,-8*9(r1)
ld r22,-8*10(r1)
ld r23,-8*11(r1)
ld r24,-8*12(r1)
ld r25,-8*13(r1)
ld r26,-8*14(r1)
ld r27,-8*15(r1)
ld r28,-8*16(r1)
ld r29,-8*17(r1)
ld r30,-8*18(r1)
ld r31,-8*19(r1)
blr
/*
* This is the sequence required to execute idle instructions, as
* specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
*
* The 0(r1) slot is used to save r2 in isa206, so use that here.
* We have to store a GPR somewhere, ptesync, then reload it, and create
* a false dependency on the result of the load. It doesn't matter which
* GPR we store, or where we store it. We have already stored r2 to the
* stack at -8(r1) in isa206_idle_insn_mayloss, so use that.
*/
#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
/* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
std r2,0(r1); \
std r2,-8(r1); \
ptesync; \
ld r2,0(r1); \
ld r2,-8(r1); \
236: cmpd cr0,r2,r2; \
bne 236b; \
IDLE_INST; \
@ -152,28 +158,32 @@ _GLOBAL(isa206_idle_insn_mayloss)
std r1,PACAR1(r13)
mflr r4
mfcr r5
/* use stack red zone rather than a new frame for saving regs */
std r2,-8*0(r1)
std r14,-8*1(r1)
std r15,-8*2(r1)
std r16,-8*3(r1)
std r17,-8*4(r1)
std r18,-8*5(r1)
std r19,-8*6(r1)
std r20,-8*7(r1)
std r21,-8*8(r1)
std r22,-8*9(r1)
std r23,-8*10(r1)
std r24,-8*11(r1)
std r25,-8*12(r1)
std r26,-8*13(r1)
std r27,-8*14(r1)
std r28,-8*15(r1)
std r29,-8*16(r1)
std r30,-8*17(r1)
std r31,-8*18(r1)
std r4,-8*19(r1)
std r5,-8*20(r1)
/*
* Use the stack red zone rather than a new frame for saving regs since
* in the case of no GPR loss the wakeup code branches directly back to
* the caller without deallocating the stack frame first.
*/
std r2,-8*1(r1)
std r14,-8*2(r1)
std r15,-8*3(r1)
std r16,-8*4(r1)
std r17,-8*5(r1)
std r18,-8*6(r1)
std r19,-8*7(r1)
std r20,-8*8(r1)
std r21,-8*9(r1)
std r22,-8*10(r1)
std r23,-8*11(r1)
std r24,-8*12(r1)
std r25,-8*13(r1)
std r26,-8*14(r1)
std r27,-8*15(r1)
std r28,-8*16(r1)
std r29,-8*17(r1)
std r30,-8*18(r1)
std r31,-8*19(r1)
std r4,-8*20(r1)
std r5,-8*21(r1)
cmpwi r3,PNV_THREAD_NAP
bne 1f
IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)

View File

@ -292,13 +292,16 @@ kvm_novcpu_exit:
* r3 contains the SRR1 wakeup value, SRR1 is trashed.
*/
_GLOBAL(idle_kvm_start_guest)
ld r4,PACAEMERGSP(r13)
mfcr r5
mflr r0
std r1,0(r4)
std r5,8(r4)
std r0,16(r4)
subi r1,r4,STACK_FRAME_OVERHEAD
std r5, 8(r1) // Save CR in caller's frame
std r0, 16(r1) // Save LR in caller's frame
// Create frame on emergency stack
ld r4, PACAEMERGSP(r13)
stdu r1, -SWITCH_FRAME_SIZE(r4)
// Switch to new frame on emergency stack
mr r1, r4
std r3, 32(r1) // Save SRR1 wakeup value
SAVE_NVGPRS(r1)
/*
@ -350,6 +353,10 @@ kvm_unsplit_wakeup:
kvm_secondary_got_guest:
// About to go to guest, clear saved SRR1
li r0, 0
std r0, 32(r1)
/* Set HSTATE_DSCR(r13) to something sensible */
ld r6, PACA_DSCR_DEFAULT(r13)
std r6, HSTATE_DSCR(r13)
@ -441,13 +448,12 @@ kvm_no_guest:
mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4
/* set up r3 for return */
mfspr r3,SPRN_SRR1
// Return SRR1 wakeup value, or 0 if we went into the guest
ld r3, 32(r1)
REST_NVGPRS(r1)
addi r1, r1, STACK_FRAME_OVERHEAD
ld r0, 16(r1)
ld r5, 8(r1)
ld r1, 0(r1)
ld r1, 0(r1) // Switch back to caller stack
ld r0, 16(r1) // Reload LR
ld r5, 8(r1) // Reload CR
mtlr r0
mtcr r5
blr

View File

@ -50,8 +50,12 @@ void platform_power_off(void)
void platform_restart(void)
{
/* Flush and reset the mmu, simulate a processor reset, and
* jump to the reset vector. */
/* Try software reset first. */
WRITE_ONCE(*(u32 *)XTFPGA_SWRST_VADDR, 0xdead);
/* If software reset did not work, flush and reset the mmu,
* simulate a processor reset, and jump to the reset vector.
*/
cpu_reset();
/* control never gets here */
}
@ -81,7 +85,7 @@ void __init platform_calibrate_ccount(void)
#endif
#ifdef CONFIG_OF
#ifdef CONFIG_USE_OF
static void __init xtfpga_clk_setup(struct device_node *np)
{
@ -299,4 +303,4 @@ static int __init xtavnet_init(void)
*/
arch_initcall(xtavnet_init);
#endif /* CONFIG_OF */
#endif /* CONFIG_USE_OF */

View File

@ -3,6 +3,7 @@
// Driver for the IMX SNVS ON/OFF Power Key
// Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
@ -81,6 +82,11 @@ static irqreturn_t imx_snvs_pwrkey_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
static void imx_snvs_pwrkey_disable_clk(void *data)
{
clk_disable_unprepare(data);
}
static void imx_snvs_pwrkey_act(void *pdata)
{
struct pwrkey_drv_data *pd = pdata;
@ -93,6 +99,7 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
struct pwrkey_drv_data *pdata = NULL;
struct input_dev *input = NULL;
struct device_node *np;
struct clk *clk;
int error;
/* Get SNVS register Page */
@ -115,6 +122,28 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "KEY_POWER without setting in dts\n");
}
clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Failed to get snvs clock (%pe)\n", clk);
return PTR_ERR(clk);
}
error = clk_prepare_enable(clk);
if (error) {
dev_err(&pdev->dev, "Failed to enable snvs clock (%pe)\n",
ERR_PTR(error));
return error;
}
error = devm_add_action_or_reset(&pdev->dev,
imx_snvs_pwrkey_disable_clk, clk);
if (error) {
dev_err(&pdev->dev,
"Failed to register clock cleanup handler (%pe)\n",
ERR_PTR(error));
return error;
}
pdata->wakeup = of_property_read_bool(np, "wakeup-source");
pdata->irq = platform_get_irq(pdev, 0);

View File

@ -565,6 +565,11 @@ int detach_capi_ctr(struct capi_ctr *ctr)
ctr_down(ctr, CAPI_CTR_DETACHED);
if (ctr->cnr < 1 || ctr->cnr - 1 >= CAPI_MAXCONTR) {
err = -EINVAL;
goto unlock_out;
}
if (capi_controller[ctr->cnr - 1] != ctr) {
err = -EINVAL;
goto unlock_out;

View File

@ -949,8 +949,8 @@ nj_release(struct tiger_hw *card)
nj_disable_hwirq(card);
mode_tiger(&card->bc[0], ISDN_P_NONE);
mode_tiger(&card->bc[1], ISDN_P_NONE);
card->isac.release(&card->isac);
spin_unlock_irqrestore(&card->lock, flags);
card->isac.release(&card->isac);
release_region(card->base, card->base_s);
card->base_s = 0;
}

View File

@ -848,10 +848,12 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
struct rcar_can_priv *priv = netdev_priv(ndev);
u16 ctlr;
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
}
if (!netif_running(ndev))
return 0;
netif_stop_queue(ndev);
netif_device_detach(ndev);
ctlr = readw(&priv->regs->ctlr);
ctlr |= RCAR_CAN_CTLR_CANM_HALT;
writew(ctlr, &priv->regs->ctlr);
@ -870,6 +872,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
u16 ctlr;
int err;
if (!netif_running(ndev))
return 0;
err = clk_enable(priv->clk);
if (err) {
netdev_err(ndev, "clk_enable() failed, error %d\n", err);
@ -883,10 +888,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
writew(ctlr, &priv->regs->ctlr);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) {
netif_device_attach(ndev);
netif_start_queue(ndev);
}
netif_device_attach(ndev);
netif_start_queue(ndev);
return 0;
}

View File

@ -731,16 +731,15 @@ static void peak_pci_remove(struct pci_dev *pdev)
struct net_device *prev_dev = chan->prev_dev;
dev_info(&pdev->dev, "removing device %s\n", dev->name);
/* do that only for first channel */
if (!prev_dev && chan->pciec_card)
peak_pciec_remove(chan->pciec_card);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
dev = prev_dev;
if (!dev) {
/* do that only for first channel */
if (chan->pciec_card)
peak_pciec_remove(chan->pciec_card);
if (!dev)
break;
}
priv = netdev_priv(dev);
chan = priv->priv;
}

View File

@ -551,11 +551,10 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
} else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
new_state = CAN_STATE_ERROR_WARNING;
} else {
/* no error bit (so, no error skb, back to active state) */
dev->can.state = CAN_STATE_ERROR_ACTIVE;
/* back to (or still in) ERROR_ACTIVE state */
new_state = CAN_STATE_ERROR_ACTIVE;
pdev->bec.txerr = 0;
pdev->bec.rxerr = 0;
return 0;
}
/* state hasn't changed */

View File

@ -229,7 +229,7 @@
#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
#define GSWIP_TABLE_ACTIVE_VLAN 0x01
#define GSWIP_TABLE_VLAN_MAPPING 0x02

View File

@ -155,7 +155,7 @@ static const struct {
{ ENETC_PM0_TFRM, "MAC tx frames" },
{ ENETC_PM0_TFCS, "MAC tx fcs errors" },
{ ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
{ ENETC_PM0_TERR, "MAC tx frames" },
{ ENETC_PM0_TERR, "MAC tx frame errors" },
{ ENETC_PM0_TUCA, "MAC tx unicast frames" },
{ ENETC_PM0_TMCA, "MAC tx multicast frames" },
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },

View File

@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list);
static LIST_HEAD(hnae3_client_list);
static LIST_HEAD(hnae3_ae_dev_list);
void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
{
const struct pci_device_id *pci_id;
struct hnae3_ae_dev *ae_dev;
if (!ae_algo)
return;
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
if (!pci_id)
continue;
if (IS_ENABLED(CONFIG_PCI_IOV))
pci_disable_sriov(ae_dev->pdev);
}
}
EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
/* we are keeping things simple and using single lock for all the
* list. This is a non-critical code so other updations, if happen
* in parallel, can wait.

View File

@ -666,6 +666,7 @@ struct hnae3_handle {
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);

View File

@ -132,6 +132,15 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
*changed = true;
break;
case IEEE_8021QAZ_TSA_ETS:
/* The hardware will switch to sp mode if bandwidth is
* 0, so limit ets bandwidth must be greater than 0.
*/
if (!ets->tc_tx_bw[i]) {
dev_err(&hdev->pdev->dev,
"tc%u ets bw cannot be 0\n", i);
return -EINVAL;
}
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
HCLGE_SCH_MODE_DWRR)
*changed = true;

View File

@ -10274,6 +10274,7 @@ static int hclge_init(void)
static void hclge_exit(void)
{
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
}
module_init(hclge_init);

View File

@ -633,6 +633,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
for (k = 0; k < hdev->tm_info.num_tc; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
for (; k < HNAE3_MAX_TC; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
}
}

View File

@ -71,6 +71,7 @@ err_remove_config_dt:
static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "st,spear600-gmac"},
{ .compatible = "snps,dwmac-3.40a"},
{ .compatible = "snps,dwmac-3.50a"},
{ .compatible = "snps,dwmac-3.610"},
{ .compatible = "snps,dwmac-3.70a"},

View File

@ -604,7 +604,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
if (priv->synopsys_id != DWMAC_CORE_5_10)
if (priv->synopsys_id < DWMAC_CORE_4_10)
ts_event_en = PTP_TCR_TSEVNTENA;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;

View File

@ -505,6 +505,14 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->pmt = 1;
}
if (of_device_is_compatible(np, "snps,dwmac-3.40a")) {
plat->has_gmac = 1;
plat->enh_desc = 1;
plat->tx_coe = 1;
plat->bugged_jumbo = 1;
plat->pmt = 1;
}
if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
of_device_is_compatible(np, "snps,dwmac-4.10a") ||
of_device_is_compatible(np, "snps,dwmac-4.20a")) {

View File

@ -395,6 +395,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
err = device_register(&bus->dev);
if (err) {
pr_err("mii_bus %s failed to register\n", bus->id);
put_device(&bus->dev);
return -EINVAL;
}

View File

@ -117,6 +117,7 @@ config USB_LAN78XX
select PHYLIB
select MICROCHIP_PHY
select FIXED_PHY
select CRC32
help
This option adds support for Microchip LAN78XX based USB 2
& USB 3 10/100/1000 Ethernet adapters.

View File

@ -1554,8 +1554,8 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev)
struct stm32_pinctrl_group *g = pctl->groups;
int i;
for (i = g->pin; i < g->pin + pctl->ngroups; i++)
stm32_pinctrl_restore_gpio_regs(pctl, i);
for (i = 0; i < pctl->ngroups; i++, g++)
stm32_pinctrl_restore_gpio_regs(pctl, g->pin);
return 0;
}

View File

@ -181,7 +181,7 @@ static inline int busy_loop(struct intel_scu_ipc_dev *scu)
return 0;
}
/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
{
int status;

View File

@ -219,7 +219,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail;
}
shost->cmd_per_lun = min_t(short, shost->cmd_per_lun,
/* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
shost->can_queue);
error = scsi_init_sense_cache(shost);

View File

@ -582,6 +582,9 @@ static struct optee *optee_probe(struct device_node *np)
if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
pool = optee_config_dyn_shm();
/* Unregister OP-TEE specific client devices on TEE bus */
optee_unregister_devices();
/*
* If dynamic shared memory is not available or failed - try static one
*/

View File

@ -65,6 +65,13 @@ static int get_devices(struct tee_context *ctx, u32 session,
return 0;
}
static void optee_release_device(struct device *dev)
{
struct tee_client_device *optee_device = to_tee_client_device(dev);
kfree(optee_device);
}
static int optee_register_device(const uuid_t *device_uuid, u32 device_id)
{
struct tee_client_device *optee_device = NULL;
@ -75,6 +82,7 @@ static int optee_register_device(const uuid_t *device_uuid, u32 device_id)
return -ENOMEM;
optee_device->dev.bus = &tee_bus_type;
optee_device->dev.release = optee_release_device;
dev_set_name(&optee_device->dev, "optee-clnt%u", device_id);
uuid_copy(&optee_device->id.uuid, device_uuid);
@ -158,3 +166,17 @@ out_ctx:
return rc;
}
static int __optee_unregister_device(struct device *dev, void *data)
{
if (!strncmp(dev_name(dev), "optee-clnt", strlen("optee-clnt")))
device_unregister(dev);
return 0;
}
void optee_unregister_devices(void)
{
bus_for_each_dev(&tee_bus_type, NULL, NULL,
__optee_unregister_device);
}

View File

@ -175,6 +175,7 @@ void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
size_t page_offset);
int optee_enumerate_devices(void);
void optee_unregister_devices(void);
/*
* Small helpers

View File

@ -900,9 +900,11 @@ out:
}
/*
* helper function to see if a given name and sequence number found
* in an inode back reference are already in a directory and correctly
* point to this inode
* See if a given name and sequence number found in an inode back reference are
* already in a directory and correctly point to this inode.
*
* Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
* exists.
*/
static noinline int inode_in_dir(struct btrfs_root *root,
struct btrfs_path *path,
@ -911,29 +913,35 @@ static noinline int inode_in_dir(struct btrfs_root *root,
{
struct btrfs_dir_item *di;
struct btrfs_key location;
int match = 0;
int ret = 0;
di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
index, name, name_len, 0);
if (di && !IS_ERR(di)) {
if (IS_ERR(di)) {
if (PTR_ERR(di) != -ENOENT)
ret = PTR_ERR(di);
goto out;
} else if (di) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
if (location.objectid != objectid)
goto out;
} else
} else {
goto out;
btrfs_release_path(path);
}
btrfs_release_path(path);
di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
if (di && !IS_ERR(di)) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
if (location.objectid != objectid)
goto out;
} else
if (IS_ERR(di)) {
ret = PTR_ERR(di);
goto out;
match = 1;
} else if (di) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
if (location.objectid == objectid)
ret = 1;
}
out:
btrfs_release_path(path);
return match;
return ret;
}
/*
@ -1500,10 +1508,12 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
if (ret)
goto out;
/* if we already have a perfect match, we're done */
if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
btrfs_ino(BTRFS_I(inode)), ref_index,
name, namelen)) {
ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
btrfs_ino(BTRFS_I(inode)), ref_index,
name, namelen);
if (ret < 0) {
goto out;
} else if (ret == 0) {
/*
* look for a conflicting back reference in the
* metadata. if we find one we have to unlink that name
@ -1561,6 +1571,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
btrfs_update_inode(trans, root, inode);
}
/* Else, ret == 1, we already have a perfect match, we're done. */
ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
kfree(name);

View File

@ -2249,7 +2249,6 @@ static int unsafe_request_wait(struct inode *inode)
int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file->f_mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
u64 flush_tid;
@ -2280,14 +2279,9 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
if (err < 0)
ret = err;
if (errseq_check(&ci->i_meta_err, READ_ONCE(fi->meta_err))) {
spin_lock(&file->f_lock);
err = errseq_check_and_advance(&ci->i_meta_err,
&fi->meta_err);
spin_unlock(&file->f_lock);
if (err < 0)
ret = err;
}
err = file_check_and_advance_wb_err(file);
if (err < 0)
ret = err;
out:
dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
return ret;

View File

@ -234,7 +234,6 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
fi->fmode = fmode;
spin_lock_init(&fi->rw_contexts_lock);
INIT_LIST_HEAD(&fi->rw_contexts);
fi->meta_err = errseq_sample(&ci->i_meta_err);
fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
return 0;

View File

@ -515,8 +515,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ceph_fscache_inode_init(ci);
ci->i_meta_err = 0;
return &ci->vfs_inode;
}

View File

@ -1272,7 +1272,6 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
{
struct ceph_mds_request *req;
struct rb_node *p;
struct ceph_inode_info *ci;
dout("cleanup_session_requests mds%d\n", session->s_mds);
mutex_lock(&mdsc->mutex);
@ -1281,16 +1280,10 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
struct ceph_mds_request, r_unsafe_item);
pr_warn_ratelimited(" dropping unsafe request %llu\n",
req->r_tid);
if (req->r_target_inode) {
/* dropping unsafe change of inode's attributes */
ci = ceph_inode(req->r_target_inode);
errseq_set(&ci->i_meta_err, -EIO);
}
if (req->r_unsafe_dir) {
/* dropping unsafe directory operation */
ci = ceph_inode(req->r_unsafe_dir);
errseq_set(&ci->i_meta_err, -EIO);
}
if (req->r_target_inode)
mapping_set_error(req->r_target_inode->i_mapping, -EIO);
if (req->r_unsafe_dir)
mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
__unregister_request(mdsc, req);
}
/* zero r_attempts, so kick_requests() will re-send requests */
@ -1436,7 +1429,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
spin_unlock(&mdsc->cap_dirty_lock);
if (dirty_dropped) {
errseq_set(&ci->i_meta_err, -EIO);
mapping_set_error(inode->i_mapping, -EIO);
if (ci->i_wrbuffer_ref_head == 0 &&
ci->i_wr_ref == 0 &&

View File

@ -402,8 +402,6 @@ struct ceph_inode_info {
struct fscache_cookie *fscache;
u32 i_fscache_gen;
#endif
errseq_t i_meta_err;
struct inode vfs_inode; /* at end */
};
@ -712,7 +710,6 @@ struct ceph_file_info {
spinlock_t rw_contexts_lock;
struct list_head rw_contexts;
errseq_t meta_err;
u32 filp_gen;
atomic_t num_locks;
};

View File

@ -988,7 +988,7 @@ int kernel_read_file_from_fd(int fd, void **buf, loff_t *size, loff_t max_size,
struct fd f = fdget(fd);
int ret = -EBADF;
if (!f.file)
if (!f.file || !(f.file->f_mode & FMODE_READ))
goto out;
ret = kernel_read_file(f.file, buf, size, max_size, id);

View File

@ -792,7 +792,10 @@ out_close:
svc_xprt_put(xprt);
}
out_err:
nfsd_destroy(net);
if (!list_empty(&nn->nfsd_serv->sv_permsocks))
nn->nfsd_serv->sv_nrthreads--;
else
nfsd_destroy(net);
return err;
}

View File

@ -7048,7 +7048,7 @@ void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct buffer_head *di_bh)
{
int ret, i, has_data, num_pages = 0;
int ret, has_data, num_pages = 0;
int need_free = 0;
u32 bit_off, num;
handle_t *handle;
@ -7057,26 +7057,17 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_alloc_context *data_ac = NULL;
struct page **pages = NULL;
loff_t end = osb->s_clustersize;
struct page *page = NULL;
struct ocfs2_extent_tree et;
int did_quota = 0;
has_data = i_size_read(inode) ? 1 : 0;
if (has_data) {
pages = kcalloc(ocfs2_pages_per_cluster(osb->sb),
sizeof(struct page *), GFP_NOFS);
if (pages == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
return ret;
}
ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
if (ret) {
mlog_errno(ret);
goto free_pages;
goto out;
}
}
@ -7096,7 +7087,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
}
if (has_data) {
unsigned int page_end;
unsigned int page_end = min_t(unsigned, PAGE_SIZE,
osb->s_clustersize);
u64 phys;
ret = dquot_alloc_space_nodirty(inode,
@ -7120,15 +7112,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
*/
block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
/*
* Non sparse file systems zero on extend, so no need
* to do that now.
*/
if (!ocfs2_sparse_alloc(osb) &&
PAGE_SIZE < osb->s_clustersize)
end = PAGE_SIZE;
ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page,
&num_pages);
if (ret) {
mlog_errno(ret);
need_free = 1;
@ -7139,20 +7124,15 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
* This should populate the 1st page for us and mark
* it up to date.
*/
ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
ret = ocfs2_read_inline_data(inode, page, di_bh);
if (ret) {
mlog_errno(ret);
need_free = 1;
goto out_unlock;
}
page_end = PAGE_SIZE;
if (PAGE_SIZE > osb->s_clustersize)
page_end = osb->s_clustersize;
for (i = 0; i < num_pages; i++)
ocfs2_map_and_dirty_page(inode, handle, 0, page_end,
pages[i], i > 0, &phys);
ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0,
&phys);
}
spin_lock(&oi->ip_lock);
@ -7183,8 +7163,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
}
out_unlock:
if (pages)
ocfs2_unlock_and_free_pages(pages, num_pages);
if (page)
ocfs2_unlock_and_free_pages(&page, num_pages);
out_commit:
if (ret < 0 && did_quota)
@ -7208,8 +7188,6 @@ out_commit:
out:
if (data_ac)
ocfs2_free_alloc_context(data_ac);
free_pages:
kfree(pages);
return ret;
}

View File

@ -2150,11 +2150,17 @@ static int ocfs2_initialize_super(struct super_block *sb,
}
if (ocfs2_clusterinfo_valid(osb)) {
/*
* ci_stack and ci_cluster in ocfs2_cluster_info may not be null
* terminated, so make sure no overflow happens here by using
* memcpy. Destination strings will always be null terminated
* because osb is allocated using kzalloc.
*/
osb->osb_stackflags =
OCFS2_RAW_SB(di)->s_cluster_info.ci_stackflags;
strlcpy(osb->osb_cluster_stack,
memcpy(osb->osb_cluster_stack,
OCFS2_RAW_SB(di)->s_cluster_info.ci_stack,
OCFS2_STACK_LABEL_LEN + 1);
OCFS2_STACK_LABEL_LEN);
if (strlen(osb->osb_cluster_stack) != OCFS2_STACK_LABEL_LEN) {
mlog(ML_ERROR,
"couldn't mount because of an invalid "
@ -2163,9 +2169,9 @@ static int ocfs2_initialize_super(struct super_block *sb,
status = -EINVAL;
goto bail;
}
strlcpy(osb->osb_cluster_name,
memcpy(osb->osb_cluster_name,
OCFS2_RAW_SB(di)->s_cluster_info.ci_cluster,
OCFS2_CLUSTER_NAME_LEN + 1);
OCFS2_CLUSTER_NAME_LEN);
} else {
/* The empty string is identical with classic tools that
* don't know about s_cluster_info. */

View File

@ -58,7 +58,7 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse
}
#endif
#if defined(CONFIG_UM) || defined(CONFIG_IA64)
#if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64)
/*
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the gate DSO contents. Dumping its

View File

@ -624,7 +624,7 @@ static int audit_filter_rules(struct task_struct *tsk,
result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
break;
case AUDIT_SADDR_FAM:
if (ctx->sockaddr)
if (ctx && ctx->sockaddr)
result = audit_comparator(ctx->sockaddr->ss_family,
f->op, f->val);
break;

View File

@ -1354,6 +1354,12 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
if (unlikely(dma_debug_disabled()))
return;
for_each_sg(sg, s, nents, i) {
check_for_stack(dev, sg_page(s), s->offset);
if (!PageHighMem(sg_page(s)))
check_for_illegal_area(dev, sg_virt(s), s->length);
}
for_each_sg(sg, s, mapped_ents, i) {
entry = dma_entry_alloc();
if (!entry)
@ -1369,12 +1375,6 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
entry->sg_call_ents = nents;
entry->sg_mapped_ents = mapped_ents;
check_for_stack(dev, sg_page(s), s->offset);
if (!PageHighMem(sg_page(s))) {
check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
}
check_sg_segment(dev, s);
add_dma_entry(entry);

View File

@ -6336,7 +6336,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op;
int bit;
bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
bit = trace_test_and_set_recursion(TRACE_LIST_START);
if (bit < 0)
return;
@ -6411,7 +6411,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
{
int bit;
bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
bit = trace_test_and_set_recursion(TRACE_LIST_START);
if (bit < 0)
return;

View File

@ -518,23 +518,8 @@ struct tracer {
* When function tracing occurs, the following steps are made:
* If arch does not support a ftrace feature:
* call internal function (uses INTERNAL bits) which calls...
* If callback is registered to the "global" list, the list
* function is called and recursion checks the GLOBAL bits.
* then this function calls...
* The function callback, which can use the FTRACE bits to
* check for recursion.
*
* Now if the arch does not suppport a feature, and it calls
* the global list function which calls the ftrace callback
* all three of these steps will do a recursion protection.
* There's no reason to do one if the previous caller already
* did. The recursion that we are protecting against will
* go through the same steps again.
*
* To prevent the multiple recursion checks, if a recursion
* bit is set that is higher than the MAX bit of the current
* check, then we know that the check was made by the previous
* caller, and we can skip the current check.
*/
enum {
TRACE_BUFFER_BIT,
@ -547,12 +532,14 @@ enum {
TRACE_FTRACE_NMI_BIT,
TRACE_FTRACE_IRQ_BIT,
TRACE_FTRACE_SIRQ_BIT,
TRACE_FTRACE_TRANSITION_BIT,
/* INTERNAL_BITs must be greater than FTRACE_BITs */
/* Internal use recursion bits */
TRACE_INTERNAL_BIT,
TRACE_INTERNAL_NMI_BIT,
TRACE_INTERNAL_IRQ_BIT,
TRACE_INTERNAL_SIRQ_BIT,
TRACE_INTERNAL_TRANSITION_BIT,
TRACE_BRANCH_BIT,
/*
@ -592,12 +579,6 @@ enum {
* function is called to clear it.
*/
TRACE_GRAPH_NOTRACE_BIT,
/*
* When transitioning between context, the preempt_count() may
* not be correct. Allow for a single recursion to cover this case.
*/
TRACE_TRANSITION_BIT,
};
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
@ -617,12 +598,18 @@ enum {
#define TRACE_CONTEXT_BITS 4
#define TRACE_FTRACE_START TRACE_FTRACE_BIT
#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
#define TRACE_LIST_START TRACE_INTERNAL_BIT
#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
#define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
enum {
TRACE_CTX_NMI,
TRACE_CTX_IRQ,
TRACE_CTX_SOFTIRQ,
TRACE_CTX_NORMAL,
TRACE_CTX_TRANSITION,
};
static __always_inline int trace_get_context_bit(void)
{
@ -630,59 +617,48 @@ static __always_inline int trace_get_context_bit(void)
if (in_interrupt()) {
if (in_nmi())
bit = 0;
bit = TRACE_CTX_NMI;
else if (in_irq())
bit = 1;
bit = TRACE_CTX_IRQ;
else
bit = 2;
bit = TRACE_CTX_SOFTIRQ;
} else
bit = 3;
bit = TRACE_CTX_NORMAL;
return bit;
}
static __always_inline int trace_test_and_set_recursion(int start, int max)
static __always_inline int trace_test_and_set_recursion(int start)
{
unsigned int val = current->trace_recursion;
int bit;
/* A previous recursion check was made */
if ((val & TRACE_CONTEXT_MASK) > max)
return 0;
bit = trace_get_context_bit() + start;
if (unlikely(val & (1 << bit))) {
/*
* It could be that preempt_count has not been updated during
* a switch between contexts. Allow for a single recursion.
*/
bit = TRACE_TRANSITION_BIT;
bit = start + TRACE_CTX_TRANSITION;
if (trace_recursion_test(bit))
return -1;
trace_recursion_set(bit);
barrier();
return bit + 1;
return bit;
}
/* Normal check passed, clear the transition to allow it again */
trace_recursion_clear(TRACE_TRANSITION_BIT);
val |= 1 << bit;
current->trace_recursion = val;
barrier();
return bit + 1;
return bit;
}
static __always_inline void trace_clear_recursion(int bit)
{
unsigned int val = current->trace_recursion;
if (!bit)
return;
bit--;
bit = 1 << bit;
val &= ~bit;

View File

@ -138,7 +138,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
pc = preempt_count();
preempt_disable_notrace();
bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
bit = trace_test_and_set_recursion(TRACE_FTRACE_START);
if (bit < 0)
goto out;

View File

@ -1455,7 +1455,8 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
}
static inline bool slab_free_freelist_hook(struct kmem_cache *s,
void **head, void **tail)
void **head, void **tail,
int *cnt)
{
void *object;
@ -1490,6 +1491,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
*head = object;
if (!*tail)
*tail = object;
} else {
/*
* Adjust the reconstructed freelist depth
* accordingly if object's reuse is delayed.
*/
--(*cnt);
}
} while (object != old_tail);
@ -3049,7 +3056,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
* With KASAN enabled slab_free_freelist_hook modifies the freelist
* to remove objects, whose reuse must be delayed.
*/
if (slab_free_freelist_hook(s, &head, &tail))
if (slab_free_freelist_hook(s, &head, &tail, &cnt))
do_slab_free(s, page, head, tail, cnt, addr);
}
@ -3727,8 +3734,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
if (alloc_kmem_cache_cpus(s))
return 0;
free_kmem_cache_nodes(s);
error:
__kmem_cache_release(s);
return -EINVAL;
}

View File

@ -326,6 +326,7 @@ int j1939_session_activate(struct j1939_session *session);
void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec);
void j1939_session_timers_cancel(struct j1939_session *session);
#define J1939_MIN_TP_PACKET_SIZE 9
#define J1939_MAX_TP_PACKET_SIZE (7 * 0xff)
#define J1939_MAX_ETP_PACKET_SIZE (7 * 0x00ffffff)

View File

@ -255,11 +255,14 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
struct j1939_priv *priv, *priv_new;
int ret;
priv = j1939_priv_get_by_ndev(ndev);
spin_lock(&j1939_netdev_lock);
priv = j1939_priv_get_by_ndev_locked(ndev);
if (priv) {
kref_get(&priv->rx_kref);
spin_unlock(&j1939_netdev_lock);
return priv;
}
spin_unlock(&j1939_netdev_lock);
priv = j1939_priv_create(ndev);
if (!priv)
@ -275,10 +278,10 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
/* Someone was faster than us, use their priv and roll
* back our's.
*/
kref_get(&priv_new->rx_kref);
spin_unlock(&j1939_netdev_lock);
dev_put(ndev);
kfree(priv);
kref_get(&priv_new->rx_kref);
return priv_new;
}
j1939_priv_set(ndev, priv);

View File

@ -1230,12 +1230,11 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
session->err = -ETIME;
j1939_session_deactivate(session);
} else {
netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
__func__, session);
j1939_session_list_lock(session->priv);
if (session->state >= J1939_SESSION_ACTIVE &&
session->state < J1939_SESSION_ACTIVE_MAX) {
netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
__func__, session);
j1939_session_get(session);
hrtimer_start(&session->rxtimer,
ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS),
@ -1597,6 +1596,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
abort = J1939_XTP_ABORT_FAULT;
else if (len > priv->tp_max_packet_size)
abort = J1939_XTP_ABORT_RESOURCE;
else if (len < J1939_MIN_TP_PACKET_SIZE)
abort = J1939_XTP_ABORT_FAULT;
}
if (abort != J1939_XTP_NO_ABORT) {
@ -1771,6 +1772,7 @@ static void j1939_xtp_rx_dpo(struct j1939_priv *priv, struct sk_buff *skb,
static void j1939_xtp_rx_dat_one(struct j1939_session *session,
struct sk_buff *skb)
{
enum j1939_xtp_abort abort = J1939_XTP_ABORT_FAULT;
struct j1939_priv *priv = session->priv;
struct j1939_sk_buff_cb *skcb;
struct sk_buff *se_skb = NULL;
@ -1785,9 +1787,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
skcb = j1939_skb_to_cb(skb);
dat = skb->data;
if (skb->len <= 1)
if (skb->len != 8) {
/* makes no sense */
abort = J1939_XTP_ABORT_UNEXPECTED_DATA;
goto out_session_cancel;
}
switch (session->last_cmd) {
case 0xff:
@ -1885,7 +1889,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
out_session_cancel:
kfree_skb(se_skb);
j1939_session_timers_cancel(session);
j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
j1939_session_cancel(session, abort);
j1939_session_put(session);
}

View File

@ -94,7 +94,7 @@ config NF_CONNTRACK_MARK
config NF_CONNTRACK_SECMARK
bool 'Connection tracking security mark support'
depends on NETWORK_SECMARK
default m if NETFILTER_ADVANCED=n
default y if NETFILTER_ADVANCED=n
help
This option enables security markings to be applied to
connections. Typically they are copied to connections from

View File

@ -4047,6 +4047,11 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
tbl[idx++].data = &ipvs->sysctl_schedule_icmp;
tbl[idx++].data = &ipvs->sysctl_ignore_tunneled;
#ifdef CONFIG_IP_VS_DEBUG
/* Global sysctls must be ro in non-init netns */
if (!net_eq(net, &init_net))
tbl[idx++].mode = 0444;
#endif
ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
if (ipvs->sysctl_hdr == NULL) {

View File

@ -277,6 +277,8 @@ static void nci_core_conn_close_rsp_packet(struct nci_dev *ndev,
ndev->cur_conn_id);
if (conn_info) {
list_del(&conn_info->list);
if (conn_info == ndev->rf_conn_info)
ndev->rf_conn_info = NULL;
devm_kfree(&ndev->nfc_dev->dev, conn_info);
}
}

View File

@ -476,6 +476,9 @@ static int __switchdev_handle_port_obj_add(struct net_device *dev,
* necessary to go through this helper.
*/
netdev_for_each_lower_dev(dev, lower_dev, iter) {
if (netif_is_bridge_master(lower_dev))
continue;
err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
check_cb, add_cb);
if (err && err != -EOPNOTSUPP)
@ -528,6 +531,9 @@ static int __switchdev_handle_port_obj_del(struct net_device *dev,
* necessary to go through this helper.
*/
netdev_for_each_lower_dev(dev, lower_dev, iter) {
if (netif_is_bridge_master(lower_dev))
continue;
err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
check_cb, del_cb);
if (err && err != -EOPNOTSUPP)
@ -579,6 +585,9 @@ static int __switchdev_handle_port_attr_set(struct net_device *dev,
* necessary to go through this helper.
*/
netdev_for_each_lower_dev(dev, lower_dev, iter) {
if (netif_is_bridge_master(lower_dev))
continue;
err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
check_cb, set_cb);
if (err && err != -EOPNOTSUPP)

View File

@ -19,6 +19,10 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF) \
+= -fplugin-arg-structleak_plugin-byref
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL) \
+= -fplugin-arg-structleak_plugin-byref-all
ifdef CONFIG_GCC_PLUGIN_STRUCTLEAK
DISABLE_STRUCTLEAK_PLUGIN += -fplugin-arg-structleak_plugin-disable
endif
export DISABLE_STRUCTLEAK_PLUGIN
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) \
+= -DSTRUCTLEAK_PLUGIN

View File

@ -395,8 +395,9 @@ int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
if (!full_reset)
goto skip_reset;
/* clear STATESTS */
snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
/* clear STATESTS if not in reset */
if (snd_hdac_chip_readb(bus, GCTL) & AZX_GCTL_RESET)
snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
/* reset controller */
snd_hdac_bus_enter_link_reset(bus);

View File

@ -2537,6 +2537,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),

View File

@ -752,9 +752,16 @@ static int wm8960_configure_clocking(struct snd_soc_component *component)
int i, j, k;
int ret;
if (!(iface1 & (1<<6))) {
dev_dbg(component->dev,
"Codec is slave mode, no need to configure clock\n");
/*
* For Slave mode clocking should still be configured,
* so this if statement should be removed, but some platform
* may not work if the sysclk is not configured, to avoid such
* compatible issue, just add '!wm8960->sysclk' condition in
* this if statement.
*/
if (!(iface1 & (1 << 6)) && !wm8960->sysclk) {
dev_warn(component->dev,
"slave mode, but proceeding with no clock configuration\n");
return 0;
}

View File

@ -2546,6 +2546,7 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
const char *pin, int status)
{
struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
int ret = 0;
dapm_assert_locked(dapm);
@ -2558,13 +2559,14 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
dapm_mark_dirty(w, "pin configuration");
dapm_widget_invalidate_input_paths(w);
dapm_widget_invalidate_output_paths(w);
ret = 1;
}
w->connected = status;
if (status == 0)
w->force = 0;
return 0;
return ret;
}
/**
@ -3580,14 +3582,15 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
const char *pin = (const char *)kcontrol->private_value;
int ret;
if (ucontrol->value.integer.value[0])
snd_soc_dapm_enable_pin(&card->dapm, pin);
ret = snd_soc_dapm_enable_pin(&card->dapm, pin);
else
snd_soc_dapm_disable_pin(&card->dapm, pin);
ret = snd_soc_dapm_disable_pin(&card->dapm, pin);
snd_soc_dapm_sync(&card->dapm);
return 0;
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_put_pin_switch);
@ -4029,7 +4032,7 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
rtd->params_select = ucontrol->value.enumerated.item[0];
return 0;
return 1;
}
static void

View File

@ -3806,5 +3806,37 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
}
}
},
{
/*
* Sennheiser GSP670
* Change order of interfaces loaded
*/
USB_DEVICE(0x1395, 0x0300),
.bInterfaceClass = USB_CLASS_PER_INTERFACE,
.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
.ifnum = QUIRK_ANY_INTERFACE,
.type = QUIRK_COMPOSITE,
.data = &(const struct snd_usb_audio_quirk[]) {
// Communication
{
.ifnum = 3,
.type = QUIRK_AUDIO_STANDARD_INTERFACE
},
// Recording
{
.ifnum = 4,
.type = QUIRK_AUDIO_STANDARD_INTERFACE
},
// Main
{
.ifnum = 1,
.type = QUIRK_AUDIO_STANDARD_INTERFACE
},
{
.ifnum = -1
}
}
}
},
#undef USB_DEVICE_VENDOR_SPEC

View File

@ -174,7 +174,6 @@ fi
ip netns exec ns1 ping -c 1 -q 10.0.2.99 > /dev/null
if [ $? -ne 0 ];then
echo "ERROR: ns1 cannot reach ns2" 1>&2
bash
exit 1
fi