This is the 5.4.15 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl4tVVcACgkQONu9yGCS
 aT6IZQ/+J/hKVxK9S0E4nfHy8IC87wRnjmIBsjnZ8jx9+KAhYyHsL5iUL5U0YQPj
 O1ZYO2Yly8DzzU1RLwkMgZ+eGYBnNuSGtZN/v9IQrQYrV77F7fNM0S59f/ucQJLh
 lAMbaAbttR05bb48YieZm1HksoRsHmFEg0LsUbQqjm74CWJ+/JA+bZcdnTi9iiJm
 HELavBOM5NoO/g8Iuh0Xn5Y3W1mOTv3lG7Vn51TynUtJjlyJaaO9cVxDJzDBLabO
 SKYqH5X2yCBmKw3rH6F4KTDXAiM+v+EzvDwM12aEvG0TkkPEwNcFrkA4hgDFXUWi
 QEe24R/UP4J2W/jAH46VaeEELo0cNLzt0e9sVi6BsxtkTaf/KknxE93PSOyY40TF
 CM/nMJAlVv5KYmhQYPa9ZTEoUBNGcAVjsI2Pi7t86oLsFtaN6Sb1BvJTdHPwLA5Z
 OIi64ZBLy3jWHC4We3ajXI+PD6qlbzyTrjAE6Se5Zfmy05m936XNAfMup4mFMoBv
 MDEAG0f5XyyAXwARugq46xTlfjI1QO6XOnufxzFCaFETbtr+yYvmdmzWE1I+qyst
 Xugd94gchuWVH62YPbf+r9H2FpoHZjAroQHTV3hJ+pt/tJqYCcvISG2uv2pJePvm
 oRt/DO9CA2N5ls0z7WC55Kk746E5NSgsLmF4nktphnshqZR5VFs=
 =iz+j
 -----END PGP SIGNATURE-----

Merge 5.4.15 into android-5.4

Changes in 5.4.15
	drm/i915: Fix pid leak with banned clients
	libbpf: Fix compatibility for kernels without need_wakeup
	libbpf: Fix memory leak/double free issue
	libbpf: Fix potential overflow issue
	libbpf: Fix another potential overflow issue in bpf_prog_linfo
	libbpf: Make btf__resolve_size logic always check size error condition
	bpf: Force .BTF section start to zero when dumping from vmlinux
	samples: bpf: update map definition to new syntax BTF-defined map
	samples/bpf: Fix broken xdp_rxq_info due to map order assumptions
	ARM: dts: logicpd-torpedo-37xx-devkit-28: Reference new DRM panel
	ARM: OMAP2+: Add missing put_device() call in omapdss_init_of()
	xfs: Sanity check flags of Q_XQUOTARM call
	i2c: stm32f7: rework slave_id allocation
	i2c: i2c-stm32f7: fix 10-bits check in slave free id search loop
	mfd: intel-lpss: Add default I2C device properties for Gemini Lake
	SUNRPC: Fix svcauth_gss_proxy_init()
	SUNRPC: Fix backchannel latency metrics
	powerpc/security: Fix debugfs data leak on 32-bit
	powerpc/pseries: Enable support for ibm,drc-info property
	powerpc/kasan: Fix boot failure with RELOCATABLE && FSL_BOOKE
	powerpc/archrandom: fix arch_get_random_seed_int()
	tipc: reduce sensitive to retransmit failures
	tipc: update mon's self addr when node addr generated
	tipc: fix potential memory leak in __tipc_sendmsg()
	tipc: fix wrong socket reference counter after tipc_sk_timeout() returns
	tipc: fix wrong timeout input for tipc_wait_for_cond()
	net/mlx5e: Fix free peer_flow when refcount is 0
	phy: lantiq: vrx200-pcie: fix error return code in ltq_vrx200_pcie_phy_power_on()
	net: phy: broadcom: Fix RGMII delays configuration for BCM54210E
	phy: ti: gmii-sel: fix mac tx internal delay for rgmii-rxid
	mt76: mt76u: fix endpoint definition order
	mt7601u: fix bbp version check in mt7601u_wait_bbp_ready
	ice: fix stack leakage
	s390/pkey: fix memory leak within _copy_apqns_from_user()
	nfsd: depend on CRYPTO_MD5 for legacy client tracking
	crypto: amcc - restore CRYPTO_AES dependency
	crypto: sun4i-ss - fix big endian issues
	perf map: No need to adjust the long name of modules
	leds: tlc591xx: update the maximum brightness
	soc/tegra: pmc: Fix crashes for hierarchical interrupts
	soc: qcom: llcc: Name regmaps to avoid collisions
	soc: renesas: Add missing check for non-zero product register address
	soc: aspeed: Fix snoop_file_poll()'s return type
	watchdog: sprd: Fix the incorrect pointer getting from driver data
	ipmi: Fix memory leak in __ipmi_bmc_register
	sched/core: Further clarify sched_class::set_next_task()
	gpiolib: No need to call gpiochip_remove_pin_ranges() twice
	rtw88: fix beaconing mode rsvd_page memory violation issue
	rtw88: fix error handling when setup efuse info
	drm/panfrost: Add missing check for pfdev->regulator
	drm: panel-lvds: Potential Oops in probe error handling
	drm/amdgpu: remove excess function parameter description
	hwrng: omap3-rom - Fix missing clock by probing with device tree
	dpaa2-eth: Fix minor bug in ethtool stats reporting
	drm/rockchip: Round up _before_ giving to the clock framework
	software node: Get reference to parent swnode in get_parent op
	PCI: mobiveil: Fix csr_read()/write() build issue
	drm: rcar_lvds: Fix color mismatches on R-Car H2 ES2.0 and later
	net: netsec: Correct dma sync for XDP_TX frames
	ACPI: platform: Unregister stale platform devices
	pwm: sun4i: Fix incorrect calculation of duty_cycle/period
	regulator: bd70528: Add MODULE_ALIAS to allow module auto loading
	drm/amdgpu/vi: silence an uninitialized variable warning
	power: supply: bd70528: Add MODULE_ALIAS to allow module auto loading
	firmware: imx: Remove call to devm_of_platform_populate
	libbpf: Don't use kernel-side u32 type in xsk.c
	rcu: Fix uninitialized variable in nocb_gp_wait()
	dpaa_eth: perform DMA unmapping before read
	dpaa_eth: avoid timestamp read on error paths
	scsi: ufs: delete redundant function ufshcd_def_desc_sizes()
	net: openvswitch: don't unlock mutex when changing the user_features fails
	hv_netvsc: flag software created hash value
	rt2800: remove errornous duplicate condition
	net: neigh: use long type to store jiffies delta
	net: axienet: Fix error return code in axienet_probe()
	selftests: gen_kselftest_tar.sh: Do not clobber kselftest/
	rtc: bd70528: fix module alias to autoload module
	packet: fix data-race in fanout_flow_is_huge()
	i2c: stm32f7: report dma error during probe
	kselftests: cgroup: Avoid the reuse of fd after it is deallocated
	firmware: arm_scmi: Fix doorbell ring logic for !CONFIG_64BIT
	mmc: sdio: fix wl1251 vendor id
	mmc: core: fix wl1251 sdio quirks
	tee: optee: Fix dynamic shm pool allocations
	tee: optee: fix device enumeration error handling
	workqueue: Add RCU annotation for pwq list walk
	SUNRPC: Fix another issue with MIC buffer space
	sched/cpufreq: Move the cfs_rq_util_change() call to cpufreq_update_util()
	mt76: mt76u: rely on usb_interface instead of usb_dev
	dma-direct: don't check swiotlb=force in dma_direct_map_resource
	afs: Remove set but not used variables 'before', 'after'
	dmaengine: ti: edma: fix missed failure handling
	drm/radeon: fix bad DMA from INTERRUPT_CNTL2
	xdp: Fix cleanup on map free for devmap_hash map type
	platform/chrome: wilco_ec: fix use after free issue
	block: fix memleak of bio integrity data
	s390/qeth: fix dangling IO buffers after halt/clear
	net-sysfs: Call dev_hold always in netdev_queue_add_kobject
	gpio: aspeed: avoid return type warning
	phy/rockchip: inno-hdmi: round clock rate down to closest 1000 Hz
	optee: Fix multi page dynamic shm pool alloc
	Linux 5.4.15

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I28b2a19657d40804406dc0e7c266296ce8768eb7
This commit is contained in:
Greg Kroah-Hartman 2020-01-26 11:14:46 +01:00
commit 33c8a1b2d0
132 changed files with 1135 additions and 728 deletions

View File

@ -0,0 +1,27 @@
OMAP ROM RNG driver binding
Secure SoCs may provide RNG via secure ROM calls like Nokia N900 does. The
implementation can depend on the SoC secure ROM used.
- compatible:
Usage: required
Value type: <string>
Definition: must be "nokia,n900-rom-rng"
- clocks:
Usage: required
Value type: <prop-encoded-array>
Definition: reference to the the RNG interface clock
- clock-names:
Usage: required
Value type: <stringlist>
Definition: must be "ick"
Example:
rom_rng: rng {
compatible = "nokia,n900-rom-rng";
clocks = <&rng_ick>;
clock-names = "ick";
};

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 14
SUBLEVEL = 15
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -11,22 +11,6 @@
#include "logicpd-torpedo-37xx-devkit.dts"
&lcd0 {
label = "28";
panel-timing {
clock-frequency = <9000000>;
hactive = <480>;
vactive = <272>;
hfront-porch = <3>;
hback-porch = <2>;
hsync-len = <42>;
vback-porch = <3>;
vfront-porch = <2>;
vsync-len = <11>;
hsync-active = <1>;
vsync-active = <1>;
de-active = <1>;
pixelclk-active = <0>;
};
/* To make it work, set CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4 */
compatible = "logicpd,type28";
};

View File

@ -155,6 +155,12 @@
pwms = <&pwm9 0 26316 0>; /* 38000 Hz */
};
rom_rng: rng {
compatible = "nokia,n900-rom-rng";
clocks = <&rng_ick>;
clock-names = "ick";
};
/* controlled (enabled/disabled) directly by bcm2048 and wl1251 */
vctcxo: vctcxo {
compatible = "fixed-clock";

View File

@ -265,6 +265,7 @@ static int __init omapdss_init_of(void)
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
if (r) {
pr_err("Unable to populate DSS submodule devices\n");
put_device(&pdev->dev);
return r;
}

View File

@ -268,14 +268,6 @@ static void __init am3517_evm_legacy_init(void)
am35xx_emac_reset();
}
static struct platform_device omap3_rom_rng_device = {
.name = "omap3-rom-rng",
.id = -1,
.dev = {
.platform_data = rx51_secure_rng_call,
},
};
static void __init nokia_n900_legacy_init(void)
{
hsmmc2_internal_input_clk();
@ -291,9 +283,6 @@ static void __init nokia_n900_legacy_init(void)
pr_warn("RX-51: Not enabling ARM errata 430973 workaround\n");
pr_warn("Thumb binaries may crash randomly without this workaround\n");
}
pr_info("RX-51: Registering OMAP3 HWRNG device\n");
platform_device_register(&omap3_rom_rng_device);
}
}
@ -538,6 +527,7 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL),
OF_DEV_AUXDATA("ti,am3517-emac", 0x5c000000, "davinci_emac.0",
&am35xx_emac_pdata),
OF_DEV_AUXDATA("nokia,n900-rom-rng", 0, NULL, rx51_secure_rng_call),
/* McBSP modules with sidetone core */
#if IS_ENABLED(CONFIG_SND_SOC_OMAP_MCBSP)
OF_DEV_AUXDATA("ti,omap3-mcbsp", 0x49022000, "49022000.mcbsp", &mcbsp_pdata),

View File

@ -28,7 +28,7 @@ static inline int arch_get_random_seed_int(unsigned int *v)
unsigned long val;
int rc;
rc = arch_get_random_long(&val);
rc = arch_get_random_seed_long(&val);
if (rc)
*v = val;

View File

@ -9,7 +9,7 @@
#define _ASM_POWERPC_SECURITY_FEATURES_H
extern unsigned long powerpc_security_features;
extern u64 powerpc_security_features;
extern bool rfi_flush;
/* These are bit flags */
@ -24,17 +24,17 @@ void setup_stf_barrier(void);
void do_stf_barrier_fixups(enum stf_barrier_type types);
void setup_count_cache_flush(void);
static inline void security_ftr_set(unsigned long feature)
static inline void security_ftr_set(u64 feature)
{
powerpc_security_features |= feature;
}
static inline void security_ftr_clear(unsigned long feature)
static inline void security_ftr_clear(u64 feature)
{
powerpc_security_features &= ~feature;
}
static inline bool security_ftr_enabled(unsigned long feature)
static inline bool security_ftr_enabled(u64 feature)
{
return !!(powerpc_security_features & feature);
}

View File

@ -238,6 +238,9 @@ set_ivor:
bl early_init
#ifdef CONFIG_KASAN
bl kasan_early_init
#endif
#ifdef CONFIG_RELOCATABLE
mr r3,r30
mr r4,r31
@ -264,9 +267,6 @@ set_ivor:
/*
* Decide what sort of machine this is and initialize the MMU.
*/
#ifdef CONFIG_KASAN
bl kasan_early_init
#endif
mr r3,r30
mr r4,r31
bl machine_init

View File

@ -1053,7 +1053,7 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
.reserved2 = 0,
.reserved3 = 0,
.subprocessors = 1,
.byte22 = OV5_FEAT(OV5_DRMEM_V2),
.byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO),
.intarch = 0,
.mmu = 0,
.hash_ext = 0,

View File

@ -16,7 +16,7 @@
#include <asm/setup.h>
unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
enum count_cache_flush_type {
COUNT_CACHE_FLUSH_NONE = 0x1,
@ -109,7 +109,7 @@ device_initcall(barrier_nospec_debugfs_init);
static __init int security_feature_debugfs_init(void)
{
debugfs_create_x64("security_features", 0400, powerpc_debugfs_root,
(u64 *)&powerpc_security_features);
&powerpc_security_features);
return 0;
}
device_initcall(security_feature_debugfs_init);

View File

@ -87,7 +87,7 @@ EXPORT_SYMBOL(bio_integrity_alloc);
* Description: Used to free the integrity portion of a bio. Usually
* called from bio_free().
*/
static void bio_integrity_free(struct bio *bio)
void bio_integrity_free(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_set *bs = bio->bi_pool;

View File

@ -236,6 +236,9 @@ void bio_uninit(struct bio *bio)
bio_disassociate_blkg(bio);
bio_crypt_free_ctx(bio);
if (bio_integrity(bio))
bio_integrity_free(bio);
}
EXPORT_SYMBOL(bio_uninit);

View File

@ -122,6 +122,7 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
#ifdef CONFIG_BLK_DEV_INTEGRITY
void blk_flush_integrity(void);
bool __bio_integrity_endio(struct bio *);
void bio_integrity_free(struct bio *bio);
static inline bool bio_integrity_endio(struct bio *bio)
{
if (bio_integrity(bio))
@ -167,6 +168,9 @@ static inline bool bio_integrity_endio(struct bio *bio)
{
return true;
}
static inline void bio_integrity_free(struct bio *bio)
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
unsigned long blk_rq_timeout(unsigned long timeout);

View File

@ -31,6 +31,44 @@ static const struct acpi_device_id forbidden_id_list[] = {
{"", 0},
};
static struct platform_device *acpi_platform_device_find_by_companion(struct acpi_device *adev)
{
struct device *dev;
dev = bus_find_device_by_acpi_dev(&platform_bus_type, adev);
return dev ? to_platform_device(dev) : NULL;
}
static int acpi_platform_device_remove_notify(struct notifier_block *nb,
unsigned long value, void *arg)
{
struct acpi_device *adev = arg;
struct platform_device *pdev;
switch (value) {
case ACPI_RECONFIG_DEVICE_ADD:
/* Nothing to do here */
break;
case ACPI_RECONFIG_DEVICE_REMOVE:
if (!acpi_device_enumerated(adev))
break;
pdev = acpi_platform_device_find_by_companion(adev);
if (!pdev)
break;
platform_device_unregister(pdev);
put_device(&pdev->dev);
break;
}
return NOTIFY_OK;
}
static struct notifier_block acpi_platform_notifier = {
.notifier_call = acpi_platform_device_remove_notify,
};
static void acpi_platform_fill_resource(struct acpi_device *adev,
const struct resource *src, struct resource *dest)
{
@ -130,3 +168,8 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
return pdev;
}
EXPORT_SYMBOL_GPL(acpi_create_platform_device);
void __init acpi_platform_init(void)
{
acpi_reconfig_notifier_register(&acpi_platform_notifier);
}

View File

@ -2174,6 +2174,7 @@ int __init acpi_scan_init(void)
acpi_pci_root_init();
acpi_pci_link_init();
acpi_processor_init();
acpi_platform_init();
acpi_lpss_init();
acpi_apd_init();
acpi_cmos_rtc_init();

View File

@ -520,7 +520,10 @@ software_node_get_parent(const struct fwnode_handle *fwnode)
{
struct swnode *swnode = to_swnode(fwnode);
return swnode ? (swnode->parent ? &swnode->parent->fwnode : NULL) : NULL;
if (!swnode || !swnode->parent)
return NULL;
return fwnode_handle_get(&swnode->parent->fwnode);
}
static struct fwnode_handle *

View File

@ -20,6 +20,8 @@
#include <linux/workqueue.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#define RNG_RESET 0x01
@ -86,14 +88,18 @@ static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
static struct hwrng omap3_rom_rng_ops = {
.name = "omap3-rom",
.read = omap3_rom_rng_read,
};
static int omap3_rom_rng_probe(struct platform_device *pdev)
{
int ret = 0;
pr_info("initializing\n");
omap3_rom_rng_ops.read = of_device_get_match_data(&pdev->dev);
if (!omap3_rom_rng_ops.read) {
dev_err(&pdev->dev, "missing rom code handler\n");
return -ENODEV;
}
omap3_rom_rng_call = pdev->dev.platform_data;
if (!omap3_rom_rng_call) {
@ -126,9 +132,16 @@ static int omap3_rom_rng_remove(struct platform_device *pdev)
return 0;
}
static const struct of_device_id omap_rom_rng_match[] = {
{ .compatible = "nokia,n900-rom-rng", .data = omap3_rom_rng_read, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, omap_rom_rng_match);
static struct platform_driver omap3_rom_rng_driver = {
.driver = {
.name = "omap3-rom-rng",
.of_match_table = omap_rom_rng_match,
},
.probe = omap3_rom_rng_probe,
.remove = omap3_rom_rng_remove,

View File

@ -3039,8 +3039,11 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
bmc->pdev.name = "ipmi_bmc";
rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
if (rv < 0)
if (rv < 0) {
kfree(bmc);
goto out;
}
bmc->pdev.dev.driver = &ipmidriver.driver;
bmc->pdev.id = rv;
bmc->pdev.dev.release = release_bmc_device;

View File

@ -333,6 +333,7 @@ config CRYPTO_DEV_PPC4XX
depends on PPC && 4xx
select CRYPTO_HASH
select CRYPTO_AEAD
select CRYPTO_AES
select CRYPTO_LIB_AES
select CRYPTO_CCM
select CRYPTO_CTR

View File

@ -175,7 +175,7 @@ static int sun4i_hash(struct ahash_request *areq)
*/
unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo;
unsigned int in_i = 0;
u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, wb = 0, v, ivmode = 0;
u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0;
struct sun4i_req_ctx *op = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@ -184,6 +184,7 @@ static int sun4i_hash(struct ahash_request *areq)
struct sg_mapping_iter mi;
int in_r, err = 0;
size_t copied = 0;
__le32 wb = 0;
dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
__func__, crypto_tfm_alg_name(areq->base.tfm),
@ -395,7 +396,7 @@ hash_final:
nbw = op->len - 4 * nwait;
if (nbw) {
wb = *(u32 *)(op->buf + nwait * 4);
wb = cpu_to_le32(*(u32 *)(op->buf + nwait * 4));
wb &= GENMASK((nbw * 8) - 1, 0);
op->byte_count += nbw;
@ -404,7 +405,7 @@ hash_final:
/* write the remaining bytes of the nbw buffer */
wb |= ((1 << 7) << (nbw * 8));
bf[j++] = wb;
bf[j++] = le32_to_cpu(wb);
/*
* number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
@ -423,13 +424,13 @@ hash_final:
/* write the length of data */
if (op->mode == SS_OP_SHA1) {
__be64 bits = cpu_to_be64(op->byte_count << 3);
bf[j++] = lower_32_bits(bits);
bf[j++] = upper_32_bits(bits);
__be64 *bits = (__be64 *)&bf[j];
*bits = cpu_to_be64(op->byte_count << 3);
j += 2;
} else {
__le64 bits = op->byte_count << 3;
bf[j++] = lower_32_bits(bits);
bf[j++] = upper_32_bits(bits);
__le64 *bits = (__le64 *)&bf[j];
*bits = cpu_to_le64(op->byte_count << 3);
j += 2;
}
writesl(ss->base + SS_RXFIFO, bf, j);
@ -471,7 +472,7 @@ hash_final:
}
} else {
for (i = 0; i < 4; i++) {
v = readl(ss->base + SS_MD0 + i * 4);
v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
memcpy(areq->result + i * 4, &v, 4);
}
}

View File

@ -2403,8 +2403,10 @@ static int edma_probe(struct platform_device *pdev)
ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
sizeof(*ecc->tc_list), GFP_KERNEL);
if (!ecc->tc_list)
return -ENOMEM;
if (!ecc->tc_list) {
ret = -ENOMEM;
goto err_reg1;
}
for (i = 0;; i++) {
ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",

View File

@ -323,7 +323,7 @@ static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
if (db->mask)
val = ioread64_hi_lo(db->addr) & db->mask;
iowrite64_hi_lo(db->set, db->addr);
iowrite64_hi_lo(db->set | val, db->addr);
}
#endif
}

View File

@ -114,7 +114,7 @@ static int imx_dsp_probe(struct platform_device *pdev)
dev_info(dev, "NXP i.MX DSP IPC initialized\n");
return devm_of_platform_populate(dev);
return 0;
out:
kfree(chan_name);
for (j = 0; j < i; j++) {

View File

@ -909,16 +909,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
of_node_get(chip->of_node);
ret = of_gpiochip_scan_gpios(chip);
if (ret) {
if (ret)
of_node_put(chip->of_node);
gpiochip_remove_pin_ranges(chip);
}
return ret;
}
void of_gpiochip_remove(struct gpio_chip *chip)
{
gpiochip_remove_pin_ranges(chip);
of_node_put(chip->of_node);
}

View File

@ -1452,6 +1452,7 @@ err_remove_of_chip:
gpiochip_free_hogs(chip);
of_gpiochip_remove(chip);
err_free_gpiochip_mask:
gpiochip_remove_pin_ranges(chip);
gpiochip_free_valid_mask(chip);
err_remove_from_list:
spin_lock_irqsave(&gpio_lock, flags);
@ -1507,8 +1508,8 @@ void gpiochip_remove(struct gpio_chip *chip)
gdev->chip = NULL;
gpiochip_irqchip_remove(chip);
acpi_gpiochip_remove(chip);
gpiochip_remove_pin_ranges(chip);
of_gpiochip_remove(chip);
gpiochip_remove_pin_ranges(chip);
gpiochip_free_valid_mask(chip);
/*
* We accept no more calls into the driver from this point, so

View File

@ -107,7 +107,7 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio,
return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS;
default:
/* acturally if code runs to here, it's an error case */
BUG_ON(1);
BUG();
}
}

View File

@ -423,7 +423,6 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
* vcn_v2_5_disable_clock_gating - disable VCN clock gating
*
* @adev: amdgpu_device pointer
* @sw: enable SW clock gating
*
* Disable clock gating for VCN block
*/
@ -542,7 +541,6 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
* vcn_v2_5_enable_clock_gating - enable VCN clock gating
*
* @adev: amdgpu_device pointer
* @sw: enable SW clock gating
*
* Enable clock gating for VCN block
*/

View File

@ -1421,6 +1421,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
{
struct pp_hwmgr *hwmgr = handle;
*cap = false;
if (!hwmgr)
return -EINVAL;

View File

@ -2094,8 +2094,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
ext_data.fpriv = file->driver_priv;
if (client_is_banned(ext_data.fpriv)) {
DRM_DEBUG("client %s[%d] banned from creating ctx\n",
current->comm,
pid_nr(get_task_pid(current, PIDTYPE_PID)));
current->comm, task_pid_nr(current));
return -EIO;
}

View File

@ -197,7 +197,6 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
static int panel_lvds_probe(struct platform_device *pdev)
{
struct panel_lvds *lvds;
struct device_node *np;
int ret;
lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
@ -243,14 +242,9 @@ static int panel_lvds_probe(struct platform_device *pdev)
return ret;
}
np = of_parse_phandle(lvds->dev->of_node, "backlight", 0);
if (np) {
lvds->backlight = of_find_backlight_by_node(np);
of_node_put(np);
if (!lvds->backlight)
return -EPROBE_DEFER;
}
lvds->backlight = devm_of_find_backlight(lvds->dev);
if (IS_ERR(lvds->backlight))
return PTR_ERR(lvds->backlight);
/*
* TODO: Handle all power supplies specified in the DT node in a generic
@ -266,14 +260,10 @@ static int panel_lvds_probe(struct platform_device *pdev)
ret = drm_panel_add(&lvds->panel);
if (ret < 0)
goto error;
return ret;
dev_set_drvdata(lvds->dev, lvds);
return 0;
error:
put_device(&lvds->backlight->dev);
return ret;
}
static int panel_lvds_remove(struct platform_device *pdev)
@ -284,9 +274,6 @@ static int panel_lvds_remove(struct platform_device *pdev)
panel_lvds_disable(&lvds->panel);
if (lvds->backlight)
put_device(&lvds->backlight->dev);
return 0;
}

View File

@ -53,8 +53,10 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
if (err) {
dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
err);
regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
pfdev->devfreq.cur_volt);
if (pfdev->regulator)
regulator_set_voltage(pfdev->regulator,
pfdev->devfreq.cur_volt,
pfdev->devfreq.cur_volt);
return err;
}

View File

@ -6969,8 +6969,8 @@ static int cik_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
/* XXX this should actually be a bus address, not an MC address. same on older asics */
WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
/* set dummy read address to dummy page address */
WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN

View File

@ -3696,8 +3696,8 @@ int r600_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
/* set dummy read address to ring address */
WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
/* set dummy read address to dummy page address */
WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN

View File

@ -5997,8 +5997,8 @@ static int si_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
/* set dummy read address to ring address */
WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
/* set dummy read address to dummy page address */
WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN

View File

@ -16,6 +16,7 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@ -842,8 +843,23 @@ static int rcar_lvds_get_clocks(struct rcar_lvds *lvds)
return 0;
}
static const struct rcar_lvds_device_info rcar_lvds_r8a7790es1_info = {
.gen = 2,
.quirks = RCAR_LVDS_QUIRK_LANES,
.pll_setup = rcar_lvds_pll_setup_gen2,
};
static const struct soc_device_attribute lvds_quirk_matches[] = {
{
.soc_id = "r8a7790", .revision = "ES1.*",
.data = &rcar_lvds_r8a7790es1_info,
},
{ /* sentinel */ }
};
static int rcar_lvds_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *attr;
struct rcar_lvds *lvds;
struct resource *mem;
int ret;
@ -857,6 +873,10 @@ static int rcar_lvds_probe(struct platform_device *pdev)
lvds->dev = &pdev->dev;
lvds->info = of_device_get_match_data(&pdev->dev);
attr = soc_device_match(lvds_quirk_matches);
if (attr)
lvds->info = attr->data;
ret = rcar_lvds_parse_dt(lvds);
if (ret < 0)
return ret;
@ -893,12 +913,6 @@ static const struct rcar_lvds_device_info rcar_lvds_gen2_info = {
.pll_setup = rcar_lvds_pll_setup_gen2,
};
static const struct rcar_lvds_device_info rcar_lvds_r8a7790_info = {
.gen = 2,
.quirks = RCAR_LVDS_QUIRK_LANES,
.pll_setup = rcar_lvds_pll_setup_gen2,
};
static const struct rcar_lvds_device_info rcar_lvds_gen3_info = {
.gen = 3,
.quirks = RCAR_LVDS_QUIRK_PWD,
@ -930,7 +944,7 @@ static const struct of_device_id rcar_lvds_of_table[] = {
{ .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info },
{ .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info },
{ .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info },

View File

@ -1040,10 +1040,41 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct vop *vop = to_vop(crtc);
unsigned long rate;
adjusted_mode->clock =
DIV_ROUND_UP(clk_round_rate(vop->dclk,
adjusted_mode->clock * 1000), 1000);
/*
* Clock craziness.
*
* Key points:
*
* - DRM works in in kHz.
* - Clock framework works in Hz.
* - Rockchip's clock driver picks the clock rate that is the
* same _OR LOWER_ than the one requested.
*
* Action plan:
*
* 1. When DRM gives us a mode, we should add 999 Hz to it. That way
* if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to
* make 60000 kHz then the clock framework will actually give us
* the right clock.
*
* NOTE: if the PLL (maybe through a divider) could actually make
* a clock rate 999 Hz higher instead of the one we want then this
* could be a problem. Unfortunately there's not much we can do
* since it's baked into DRM to use kHz. It shouldn't matter in
* practice since Rockchip PLLs are controlled by tables and
* even if there is a divider in the middle I wouldn't expect PLL
* rates in the table that are just a few kHz different.
*
* 2. Get the clock framework to round the rate for us to tell us
* what it will actually make.
*
* 3. Store the rounded up rate so that we don't need to worry about
* this in the actual clk_set_rate().
*/
rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999);
adjusted_mode->clock = DIV_ROUND_UP(rate, 1000);
return true;
}

View File

@ -20,13 +20,13 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
if (!dma)
return NULL;
return ERR_PTR(-ENOMEM);
/* Request and configure I2C TX dma channel */
dma->chan_tx = dma_request_slave_channel(dev, "tx");
if (!dma->chan_tx) {
dma->chan_tx = dma_request_chan(dev, "tx");
if (IS_ERR(dma->chan_tx)) {
dev_dbg(dev, "can't request DMA tx channel\n");
ret = -EINVAL;
ret = PTR_ERR(dma->chan_tx);
goto fail_al;
}
@ -42,10 +42,10 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
}
/* Request and configure I2C RX dma channel */
dma->chan_rx = dma_request_slave_channel(dev, "rx");
if (!dma->chan_rx) {
dma->chan_rx = dma_request_chan(dev, "rx");
if (IS_ERR(dma->chan_rx)) {
dev_err(dev, "can't request DMA rx channel\n");
ret = -EINVAL;
ret = PTR_ERR(dma->chan_rx);
goto fail_tx;
}
@ -75,7 +75,7 @@ fail_al:
devm_kfree(dev, dma);
dev_info(dev, "can't use DMA\n");
return NULL;
return ERR_PTR(ret);
}
void stm32_i2c_dma_free(struct stm32_i2c_dma *dma)

View File

@ -1267,8 +1267,8 @@ static int stm32f7_i2c_get_free_slave_id(struct stm32f7_i2c_dev *i2c_dev,
* slave[0] supports 7-bit and 10-bit slave address
* slave[1] supports 7-bit slave address only
*/
for (i = 0; i < STM32F7_I2C_MAX_SLAVE; i++) {
if (i == 1 && (slave->flags & I2C_CLIENT_PEC))
for (i = STM32F7_I2C_MAX_SLAVE - 1; i >= 0; i--) {
if (i == 1 && (slave->flags & I2C_CLIENT_TEN))
continue;
if (!i2c_dev->slave[i]) {
*id = i;
@ -1955,6 +1955,15 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
i2c_dev->dma = stm32_i2c_dma_request(i2c_dev->dev, phy_addr,
STM32F7_I2C_TXDR,
STM32F7_I2C_RXDR);
if (PTR_ERR(i2c_dev->dma) == -ENODEV)
i2c_dev->dma = NULL;
else if (IS_ERR(i2c_dev->dma)) {
ret = PTR_ERR(i2c_dev->dma);
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev,
"Failed to request dma error %i\n", ret);
goto clk_free;
}
platform_set_drvdata(pdev, i2c_dev);

View File

@ -13,6 +13,7 @@
#include <linux/slab.h>
#define TLC591XX_MAX_LEDS 16
#define TLC591XX_MAX_BRIGHTNESS 256
#define TLC591XX_REG_MODE1 0x00
#define MODE1_RESPON_ADDR_MASK 0xF0
@ -112,11 +113,11 @@ tlc591xx_brightness_set(struct led_classdev *led_cdev,
struct tlc591xx_priv *priv = led->priv;
int err;
switch (brightness) {
switch ((int)brightness) {
case 0:
err = tlc591xx_set_ledout(priv, led, LEDOUT_OFF);
break;
case LED_FULL:
case TLC591XX_MAX_BRIGHTNESS:
err = tlc591xx_set_ledout(priv, led, LEDOUT_ON);
break;
default:
@ -157,7 +158,7 @@ tlc591xx_configure(struct device *dev,
led->priv = priv;
led->led_no = i;
led->ldev.brightness_set_blocking = tlc591xx_brightness_set;
led->ldev.max_brightness = LED_FULL;
led->ldev.max_brightness = TLC591XX_MAX_BRIGHTNESS;
err = led_classdev_register(dev, &led->ldev);
if (err < 0) {
dev_err(dev, "couldn't register LED %s\n",

View File

@ -122,6 +122,18 @@ static const struct intel_lpss_platform_info apl_i2c_info = {
.properties = apl_i2c_properties,
};
static struct property_entry glk_i2c_properties[] = {
PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 313),
PROPERTY_ENTRY_U32("i2c-sda-falling-time-ns", 171),
PROPERTY_ENTRY_U32("i2c-scl-falling-time-ns", 290),
{ },
};
static const struct intel_lpss_platform_info glk_i2c_info = {
.clk_rate = 133000000,
.properties = glk_i2c_properties,
};
static const struct intel_lpss_platform_info cnl_i2c_info = {
.clk_rate = 216000000,
.properties = spt_i2c_properties,
@ -174,14 +186,14 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
/* GLK */
{ PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31b0), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31b2), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31b4), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31b6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31b8), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31ba), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31b0), (kernel_ulong_t)&glk_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31b2), (kernel_ulong_t)&glk_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31b4), (kernel_ulong_t)&glk_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31b6), (kernel_ulong_t)&glk_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31b8), (kernel_ulong_t)&glk_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31ba), (kernel_ulong_t)&glk_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31bc), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x31be), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x31c0), (kernel_ulong_t)&bxt_uart_info },

View File

@ -119,7 +119,14 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = {
END_FIXUP
};
static const struct mmc_fixup sdio_fixup_methods[] = {
SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
add_quirk, MMC_QUIRK_DISABLE_CD),
SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),

View File

@ -1600,13 +1600,15 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
* Skb freeing is not handled here.
*
* This function may be called on error paths in the Tx function, so guard
* against cases when not all fd relevant fields were filled in.
* against cases when not all fd relevant fields were filled in. To avoid
* reading the invalid transmission timestamp for the error paths set ts to
* false.
*
* Return the skb backpointer, since for S/G frames the buffer containing it
* gets freed here.
*/
static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
const struct qm_fd *fd)
const struct qm_fd *fd, bool ts)
{
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
struct device *dev = priv->net_dev->dev.parent;
@ -1620,18 +1622,6 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
skbh = (struct sk_buff **)phys_to_virt(addr);
skb = *skbh;
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
&ns)) {
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(skb, &shhwtstamps);
} else {
dev_warn(dev, "fman_port_get_tstamp failed!\n");
}
}
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
nr_frags = skb_shinfo(skb)->nr_frags;
dma_unmap_single(dev, addr,
@ -1654,14 +1644,29 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
qm_sg_entry_get_len(&sgt[i]), dma_dir);
}
/* Free the page frag that we allocated on Tx */
skb_free_frag(phys_to_virt(addr));
} else {
dma_unmap_single(dev, addr,
skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
}
/* DMA unmapping is required before accessing the HW provided info */
if (ts && priv->tx_tstamp &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
&ns)) {
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(skb, &shhwtstamps);
} else {
dev_warn(dev, "fman_port_get_tstamp failed!\n");
}
}
if (qm_fd_get_format(fd) == qm_fd_sg)
/* Free the page frag that we allocated on Tx */
skb_free_frag(phys_to_virt(addr));
return skb;
}
@ -2114,7 +2119,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
return NETDEV_TX_OK;
dpaa_cleanup_tx_fd(priv, &fd);
dpaa_cleanup_tx_fd(priv, &fd, false);
skb_to_fd_failed:
enomem:
percpu_stats->tx_errors++;
@ -2160,7 +2165,7 @@ static void dpaa_tx_error(struct net_device *net_dev,
percpu_priv->stats.tx_errors++;
skb = dpaa_cleanup_tx_fd(priv, fd);
skb = dpaa_cleanup_tx_fd(priv, fd, false);
dev_kfree_skb(skb);
}
@ -2200,7 +2205,7 @@ static void dpaa_tx_conf(struct net_device *net_dev,
percpu_priv->tx_confirm++;
skb = dpaa_cleanup_tx_fd(priv, fd);
skb = dpaa_cleanup_tx_fd(priv, fd, true);
consume_skb(skb);
}
@ -2430,7 +2435,7 @@ static void egress_ern(struct qman_portal *portal,
percpu_priv->stats.tx_fifo_errors++;
count_ern(percpu_priv, msg);
skb = dpaa_cleanup_tx_fd(priv, fd);
skb = dpaa_cleanup_tx_fd(priv, fd, false);
dev_kfree_skb_any(skb);
}

View File

@ -216,7 +216,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
if (err == -EINVAL)
/* Older firmware versions don't support all pages */
memset(&dpni_stats, 0, sizeof(dpni_stats));
else
else if (err)
netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
num_cnt = dpni_stats_page_size[j] / sizeof(u64);

View File

@ -1873,8 +1873,8 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
struct ice_eth_stats stats = { 0 };
struct ice_pf *pf = vf->pf;
struct ice_eth_stats stats;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@ -1893,7 +1893,6 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
memset(&stats, 0, sizeof(struct ice_eth_stats));
ice_update_eth_stats(vsi);
stats = vsi->eth_stats;

View File

@ -1615,8 +1615,11 @@ static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
flow_flag_clear(flow, DUP);
mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
kfree(flow->peer_flow);
if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
kfree(flow->peer_flow);
}
flow->peer_flow = NULL;
}

View File

@ -847,8 +847,8 @@ static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
enum dma_data_direction dma_dir =
page_pool_get_dma_dir(rx_ring->page_pool);
dma_handle = page_pool_get_dma_addr(page) +
NETSEC_RXBUF_HEADROOM;
dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom +
sizeof(*xdpf);
dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len,
dma_dir);
tx_desc.buf_type = TYPE_NETSEC_XDP_TX;

View File

@ -1790,10 +1790,6 @@ static int axienet_probe(struct platform_device *pdev)
/* Check for these resources directly on the Ethernet node. */
struct resource *res = platform_get_resource(pdev,
IORESOURCE_MEM, 1);
if (!res) {
dev_err(&pdev->dev, "unable to get DMA memory resource\n");
goto free_netdev;
}
lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
lp->rx_irq = platform_get_irq(pdev, 1);
lp->tx_irq = platform_get_irq(pdev, 0);

View File

@ -285,9 +285,9 @@ static inline u32 netvsc_get_hash(
else if (flow.basic.n_proto == htons(ETH_P_IPV6))
hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
else
hash = 0;
return 0;
skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
__skb_set_sw_hash(skb, hash, false);
}
return hash;
@ -795,8 +795,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
skb->protocol == htons(ETH_P_IP))
netvsc_comp_ipcsum(skb);
/* Do L4 checksum offload if enabled and present.
*/
/* Do L4 checksum offload if enabled and present. */
if (csum_info && (net->features & NETIF_F_RXCSUM)) {
if (csum_info->receive.tcp_checksum_succeeded ||
csum_info->receive.udp_checksum_succeeded)

View File

@ -26,18 +26,13 @@ MODULE_DESCRIPTION("Broadcom PHY driver");
MODULE_AUTHOR("Maciej W. Rozycki");
MODULE_LICENSE("GPL");
static int bcm54xx_config_clock_delay(struct phy_device *phydev);
static int bcm54210e_config_init(struct phy_device *phydev)
{
int val;
val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
val |= MII_BCM54XX_AUXCTL_MISC_WREN;
bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, val);
val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
bcm54xx_config_clock_delay(phydev);
if (phydev->dev_flags & PHY_BRCM_EN_MASTER_MODE) {
val = phy_read(phydev, MII_CTRL1000);

View File

@ -367,8 +367,8 @@ enum mt76u_in_ep {
enum mt76u_out_ep {
MT_EP_OUT_INBAND_CMD,
MT_EP_OUT_AC_BK,
MT_EP_OUT_AC_BE,
MT_EP_OUT_AC_BK,
MT_EP_OUT_AC_VI,
MT_EP_OUT_AC_VO,
MT_EP_OUT_HCCA,
@ -799,7 +799,8 @@ static inline int
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
int timeout)
{
struct usb_device *udev = to_usb_device(dev->dev);
struct usb_interface *uintf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(uintf);
struct mt76_usb *usb = &dev->usb;
unsigned int pipe;

View File

@ -226,7 +226,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
u32 mac_rev;
int ret;
mdev = mt76_alloc_device(&usb_dev->dev, sizeof(*dev), &mt76x0u_ops,
mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
&drv_ops);
if (!mdev)
return -ENOMEM;

View File

@ -39,7 +39,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
struct mt76_dev *mdev;
int err;
mdev = mt76_alloc_device(&udev->dev, sizeof(*dev), &mt76x2u_ops,
mdev = mt76_alloc_device(&intf->dev, sizeof(*dev), &mt76x2u_ops,
&drv_ops);
if (!mdev)
return -ENOMEM;

View File

@ -20,7 +20,8 @@ static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
void *buf, size_t len)
{
struct usb_device *udev = to_usb_device(dev->dev);
struct usb_interface *uintf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(uintf);
unsigned int pipe;
int i, ret;
@ -235,7 +236,8 @@ mt76u_rd_rp(struct mt76_dev *dev, u32 base,
static bool mt76u_check_sg(struct mt76_dev *dev)
{
struct usb_device *udev = to_usb_device(dev->dev);
struct usb_interface *uintf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(uintf);
return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
(udev->bus->no_sg_constraint ||
@ -370,7 +372,8 @@ mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
struct urb *urb, usb_complete_t complete_fn,
void *context)
{
struct usb_device *udev = to_usb_device(dev->dev);
struct usb_interface *uintf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(uintf);
unsigned int pipe;
if (dir == USB_DIR_IN)
@ -952,6 +955,7 @@ int mt76u_init(struct mt76_dev *dev,
.rd_rp = mt76u_rd_rp,
.type = MT76_BUS_USB,
};
struct usb_device *udev = interface_to_usbdev(intf);
struct mt76_usb *usb = &dev->usb;
tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
@ -965,6 +969,8 @@ int mt76u_init(struct mt76_dev *dev,
dev->bus = &mt76u_ops;
dev->queue_ops = &usb_queue_ops;
dev_set_drvdata(&udev->dev, dev);
usb->sg_en = mt76u_check_sg(dev);
return mt76u_set_endpoints(intf, usb);

View File

@ -213,7 +213,7 @@ int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev)
do {
val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION);
if (val && ~val)
if (val && val != 0xff)
break;
} while (--i);

View File

@ -5839,8 +5839,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_TXBF_CFG_0, 0x8000fc21);
rt2800_register_write(rt2x00dev, TX_TXBF_CFG_3, 0x00009c40);
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
rt2x00_rt(rt2x00dev, RT6352)) {
rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@ -5854,8 +5853,6 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
rt2800_register_write(rt2x00dev, MIMO_PS_CFG, 0x00000002);
rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x00150F0F);
rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000);
rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0);
rt2800_register_write(rt2x00dev, TX1_BB_GAIN_ATTEN, 0x0);

View File

@ -498,9 +498,6 @@ static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
{
struct sk_buff *skb = rsvd_pkt->skb;
if (rsvd_pkt->add_txdesc)
rtw_fill_rsvd_page_desc(rtwdev, skb);
if (page >= 1)
memcpy(buf + page_margin + page_size * (page - 1),
skb->data, skb->len);
@ -625,16 +622,37 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt->type);
if (!iter) {
rtw_err(rtwdev, "fail to build rsvd packet\n");
rtw_err(rtwdev, "failed to build rsvd packet\n");
goto release_skb;
}
/* Fill the tx_desc for the rsvd pkt that requires one.
* And iter->len will be added with size of tx_desc_sz.
*/
if (rsvd_pkt->add_txdesc)
rtw_fill_rsvd_page_desc(rtwdev, iter);
rsvd_pkt->skb = iter;
rsvd_pkt->page = total_page;
if (rsvd_pkt->add_txdesc)
/* Reserved page is downloaded via TX path, and TX path will
* generate a tx_desc at the header to describe length of
* the buffer. If we are not counting page numbers with the
* size of tx_desc added at the first rsvd_pkt (usually a
* beacon, firmware default refer to the first page as the
* content of beacon), we could generate a buffer which size
* is smaller than the actual size of the whole rsvd_page
*/
if (total_page == 0) {
if (rsvd_pkt->type != RSVD_BEACON) {
rtw_err(rtwdev, "first page should be a beacon\n");
goto release_skb;
}
total_page += rtw_len_to_page(iter->len + tx_desc_sz,
page_size);
else
} else {
total_page += rtw_len_to_page(iter->len, page_size);
}
}
if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
@ -647,13 +665,24 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
if (!buf)
goto release_skb;
/* Copy the content of each rsvd_pkt to the buf, and they should
* be aligned to the pages.
*
* Note that the first rsvd_pkt is a beacon no matter what vif->type.
* And that rsvd_pkt does not require tx_desc because when it goes
* through TX path, the TX path will generate one for it.
*/
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
page, buf, rsvd_pkt);
page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
}
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
if (page == 0)
page += rtw_len_to_page(rsvd_pkt->skb->len +
tx_desc_sz, page_size);
else
page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
kfree_skb(rsvd_pkt->skb);
}
return buf;
@ -706,6 +735,11 @@ int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
goto free;
}
/* The last thing is to download the *ONLY* beacon again, because
* the previous tx_desc is to describe the total rsvd page. Download
* the beacon again to replace the TX desc header, and we will get
* a correct tx_desc for the beacon in the rsvd page.
*/
ret = rtw_download_beacon(rtwdev, vif);
if (ret) {
rtw_err(rtwdev, "failed to download beacon\n");

View File

@ -1048,19 +1048,19 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
/* power on mac to read efuse */
ret = rtw_chip_efuse_enable(rtwdev);
if (ret)
goto out;
goto out_unlock;
ret = rtw_parse_efuse_map(rtwdev);
if (ret)
goto out;
goto out_disable;
ret = rtw_dump_hw_feature(rtwdev);
if (ret)
goto out;
goto out_disable;
ret = rtw_check_supported_rfe(rtwdev);
if (ret)
goto out;
goto out_disable;
if (efuse->crystal_cap == 0xff)
efuse->crystal_cap = 0;
@ -1087,9 +1087,10 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0;
efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0;
out_disable:
rtw_chip_efuse_disable(rtwdev);
out:
out_unlock:
mutex_unlock(&rtwdev->mutex);
return ret;
}

View File

@ -235,7 +235,7 @@ static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
static u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
{
void *addr;
u32 val;
@ -250,7 +250,8 @@ static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
return val;
}
static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
static void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
size_t size)
{
void *addr;
int ret;
@ -262,19 +263,19 @@ static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
dev_err(&pcie->pdev->dev, "write CSR address failed\n");
}
static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
static u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off)
{
return csr_read(pcie, off, 0x4);
return mobiveil_csr_read(pcie, off, 0x4);
}
static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
static void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
{
csr_write(pcie, val, off, 0x4);
mobiveil_csr_write(pcie, val, off, 0x4);
}
static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
{
return (csr_readl(pcie, LTSSM_STATUS) &
return (mobiveil_csr_readl(pcie, LTSSM_STATUS) &
LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
}
@ -323,7 +324,7 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
return pcie->config_axi_slave_base + where;
}
@ -353,13 +354,14 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
chained_irq_enter(chip, desc);
/* read INTx status */
val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
mask = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
intr_status = val & mask;
/* Handle INTx */
if (intr_status & PAB_INTP_INTX_MASK) {
shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
shifted_status = mobiveil_csr_readl(pcie,
PAB_INTP_AMBA_MISC_STAT);
shifted_status &= PAB_INTP_INTX_MASK;
shifted_status >>= PAB_INTX_START;
do {
@ -373,12 +375,13 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
bit);
/* clear interrupt handled */
csr_writel(pcie, 1 << (PAB_INTX_START + bit),
PAB_INTP_AMBA_MISC_STAT);
mobiveil_csr_writel(pcie,
1 << (PAB_INTX_START + bit),
PAB_INTP_AMBA_MISC_STAT);
}
shifted_status = csr_readl(pcie,
PAB_INTP_AMBA_MISC_STAT);
shifted_status = mobiveil_csr_readl(pcie,
PAB_INTP_AMBA_MISC_STAT);
shifted_status &= PAB_INTP_INTX_MASK;
shifted_status >>= PAB_INTX_START;
} while (shifted_status != 0);
@ -413,7 +416,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
}
/* Clear the interrupt status */
csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
mobiveil_csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
chained_irq_exit(chip, desc);
}
@ -474,24 +477,24 @@ static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
return;
}
value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
(lower_32_bits(size64) & WIN_SIZE_MASK);
csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
csr_writel(pcie, upper_32_bits(size64),
PAB_EXT_PEX_AMAP_SIZEN(win_num));
mobiveil_csr_writel(pcie, upper_32_bits(size64),
PAB_EXT_PEX_AMAP_SIZEN(win_num));
csr_writel(pcie, lower_32_bits(cpu_addr),
PAB_PEX_AMAP_AXI_WIN(win_num));
csr_writel(pcie, upper_32_bits(cpu_addr),
PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr),
PAB_PEX_AMAP_AXI_WIN(win_num));
mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
csr_writel(pcie, lower_32_bits(pci_addr),
PAB_PEX_AMAP_PEX_WIN_L(win_num));
csr_writel(pcie, upper_32_bits(pci_addr),
PAB_PEX_AMAP_PEX_WIN_H(win_num));
mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
PAB_PEX_AMAP_PEX_WIN_L(win_num));
mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
PAB_PEX_AMAP_PEX_WIN_H(win_num));
pcie->ib_wins_configured++;
}
@ -515,27 +518,29 @@ static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
* program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
* to 4 KB in PAB_AXI_AMAP_CTRL register
*/
value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
(lower_32_bits(size64) & WIN_SIZE_MASK);
csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
mobiveil_csr_writel(pcie, upper_32_bits(size64),
PAB_EXT_AXI_AMAP_SIZE(win_num));
/*
* program AXI window base with appropriate value in
* PAB_AXI_AMAP_AXI_WIN0 register
*/
csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
PAB_AXI_AMAP_AXI_WIN(win_num));
csr_writel(pcie, upper_32_bits(cpu_addr),
PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
mobiveil_csr_writel(pcie,
lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
PAB_AXI_AMAP_AXI_WIN(win_num));
mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
csr_writel(pcie, lower_32_bits(pci_addr),
PAB_AXI_AMAP_PEX_WIN_L(win_num));
csr_writel(pcie, upper_32_bits(pci_addr),
PAB_AXI_AMAP_PEX_WIN_H(win_num));
mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
PAB_AXI_AMAP_PEX_WIN_L(win_num));
mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
PAB_AXI_AMAP_PEX_WIN_H(win_num));
pcie->ob_wins_configured++;
}
@ -579,42 +584,42 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
struct resource_entry *win;
/* setup bus numbers */
value = csr_readl(pcie, PCI_PRIMARY_BUS);
value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS);
value &= 0xff000000;
value |= 0x00ff0100;
csr_writel(pcie, value, PCI_PRIMARY_BUS);
mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS);
/*
* program Bus Master Enable Bit in Command Register in PAB Config
* Space
*/
value = csr_readl(pcie, PCI_COMMAND);
value = mobiveil_csr_readl(pcie, PCI_COMMAND);
value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
csr_writel(pcie, value, PCI_COMMAND);
mobiveil_csr_writel(pcie, value, PCI_COMMAND);
/*
* program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
* register
*/
pab_ctrl = csr_readl(pcie, PAB_CTRL);
pab_ctrl = mobiveil_csr_readl(pcie, PAB_CTRL);
pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
csr_writel(pcie, pab_ctrl, PAB_CTRL);
mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL);
csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
PAB_INTP_AMBA_MISC_ENB);
mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
PAB_INTP_AMBA_MISC_ENB);
/*
* program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
* PAB_AXI_PIO_CTRL Register
*/
value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
value = mobiveil_csr_readl(pcie, PAB_AXI_PIO_CTRL);
value |= APIO_EN_MASK;
csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
mobiveil_csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
/* Enable PCIe PIO master */
value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
value = mobiveil_csr_readl(pcie, PAB_PEX_PIO_CTRL);
value |= 1 << PIO_ENABLE_SHIFT;
csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
mobiveil_csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
/*
* we'll program one outbound window for config reads and
@ -647,10 +652,10 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
}
/* fixup for PCIe class register */
value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
value &= 0xff;
value |= (PCI_CLASS_BRIDGE_PCI << 16);
csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
/* setup MSI hardware registers */
mobiveil_pcie_enable_msi(pcie);
@ -668,9 +673,9 @@ static void mobiveil_mask_intx_irq(struct irq_data *data)
pcie = irq_desc_get_chip_data(desc);
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val &= ~mask;
csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
}
@ -684,9 +689,9 @@ static void mobiveil_unmask_intx_irq(struct irq_data *data)
pcie = irq_desc_get_chip_data(desc);
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val |= mask;
csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
}

View File

@ -323,7 +323,8 @@ static int ltq_vrx200_pcie_phy_power_on(struct phy *phy)
goto err_disable_pdi_clk;
/* Check if we are in "startup ready" status */
if (ltq_vrx200_pcie_phy_wait_for_pll(phy) != 0)
ret = ltq_vrx200_pcie_phy_wait_for_pll(phy);
if (ret)
goto err_disable_phy_clk;
ltq_vrx200_pcie_phy_apply_workarounds(phy);

View File

@ -603,6 +603,8 @@ static long inno_hdmi_phy_rk3228_clk_round_rate(struct clk_hw *hw,
{
const struct pre_pll_config *cfg = pre_pll_cfg_table;
rate = (rate / 1000) * 1000;
for (; cfg->pixclock != 0; cfg++)
if (cfg->pixclock == rate && !cfg->fracdiv)
break;
@ -755,6 +757,8 @@ static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw,
{
const struct pre_pll_config *cfg = pre_pll_cfg_table;
rate = (rate / 1000) * 1000;
for (; cfg->pixclock != 0; cfg++)
if (cfg->pixclock == rate)
break;

View File

@ -69,11 +69,11 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_RXID:
gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII;
rgmii_id = 1;

View File

@ -406,8 +406,8 @@ static int telem_device_remove(struct platform_device *pdev)
struct telem_device_data *dev_data = platform_get_drvdata(pdev);
cdev_device_del(&dev_data->cdev, &dev_data->dev);
put_device(&dev_data->dev);
ida_simple_remove(&telem_ida, MINOR(dev_data->dev.devt));
put_device(&dev_data->dev);
return 0;
}

View File

@ -741,3 +741,4 @@ module_platform_driver(bd70528_power);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD70528 power-supply driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:bd70528-power");

View File

@ -137,10 +137,10 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
val = sun4i_pwm_readl(sun4i_pwm, PWM_CH_PRD(pwm->hwpwm));
tmp = prescaler * NSEC_PER_SEC * PWM_REG_DTY(val);
tmp = (u64)prescaler * NSEC_PER_SEC * PWM_REG_DTY(val);
state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
tmp = prescaler * NSEC_PER_SEC * PWM_REG_PRD(val);
tmp = (u64)prescaler * NSEC_PER_SEC * PWM_REG_PRD(val);
state->period = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
}

View File

@ -285,3 +285,4 @@ module_platform_driver(bd70528_regulator);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD70528 voltage regulator driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:bd70528-pmic");

View File

@ -491,4 +491,4 @@ module_platform_driver(bd70528_rtc);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD70528 RTC driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platofrm:bd70528-rtc");
MODULE_ALIAS("platform:bd70528-rtc");

View File

@ -740,8 +740,10 @@ static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
kapqns = kmalloc(nbytes, GFP_KERNEL);
if (!kapqns)
return ERR_PTR(-ENOMEM);
if (copy_from_user(kapqns, uapqns, nbytes))
if (copy_from_user(kapqns, uapqns, nbytes)) {
kfree(kapqns);
return ERR_PTR(-EFAULT);
}
}
return kapqns;

View File

@ -620,6 +620,7 @@ struct qeth_ipato {
struct qeth_channel {
struct ccw_device *ccwdev;
struct qeth_cmd_buffer *active_cmd;
enum qeth_channel_states state;
atomic_t irq_pending;
};
@ -1024,6 +1025,8 @@ int qeth_do_run_thread(struct qeth_card *, unsigned long);
void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
int qeth_stop_channel(struct qeth_channel *channel);
void qeth_print_status_message(struct qeth_card *);
int qeth_init_qdio_queues(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,

View File

@ -515,7 +515,9 @@ static int __qeth_issue_next_read(struct qeth_card *card)
QETH_CARD_TEXT(card, 6, "noirqpnd");
rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
if (rc) {
if (!rc) {
channel->active_cmd = iob;
} else {
QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
rc, CARD_DEVID(card));
atomic_set(&channel->irq_pending, 0);
@ -986,8 +988,21 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
QETH_CARD_TEXT(card, 5, "data");
}
if (qeth_intparm_is_iob(intparm))
iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
if (intparm == 0) {
QETH_CARD_TEXT(card, 5, "irqunsol");
} else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
QETH_CARD_TEXT(card, 5, "irqunexp");
dev_err(&cdev->dev,
"Received IRQ with intparm %lx, expected %px\n",
intparm, channel->active_cmd);
if (channel->active_cmd)
qeth_cancel_cmd(channel->active_cmd, -EIO);
} else {
iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
}
channel->active_cmd = NULL;
rc = qeth_check_irb_error(card, cdev, irb);
if (rc) {
@ -1007,15 +1022,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
channel->state = CH_STATE_HALTED;
if (intparm == QETH_CLEAR_CHANNEL_PARM) {
QETH_CARD_TEXT(card, 6, "clrchpar");
/* we don't have to handle this further */
intparm = 0;
}
if (intparm == QETH_HALT_CHANNEL_PARM) {
QETH_CARD_TEXT(card, 6, "hltchpar");
/* we don't have to handle this further */
intparm = 0;
if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
SCSW_FCTL_HALT_FUNC))) {
qeth_cancel_cmd(iob, -ECANCELED);
iob = NULL;
}
cstat = irb->scsw.cmd.cstat;
@ -1408,7 +1418,7 @@ static int qeth_clear_channel(struct qeth_card *card,
QETH_CARD_TEXT(card, 3, "clearch");
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc)
@ -1430,7 +1440,7 @@ static int qeth_halt_channel(struct qeth_card *card,
QETH_CARD_TEXT(card, 3, "haltch");
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc)
@ -1444,6 +1454,25 @@ static int qeth_halt_channel(struct qeth_card *card,
return 0;
}
int qeth_stop_channel(struct qeth_channel *channel)
{
struct ccw_device *cdev = channel->ccwdev;
int rc;
rc = ccw_device_set_offline(cdev);
spin_lock_irq(get_ccwdev_lock(cdev));
if (channel->active_cmd) {
dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
channel->active_cmd);
channel->active_cmd = NULL;
}
spin_unlock_irq(get_ccwdev_lock(cdev));
return rc;
}
EXPORT_SYMBOL_GPL(qeth_stop_channel);
static int qeth_halt_channels(struct qeth_card *card)
{
int rc1 = 0, rc2 = 0, rc3 = 0;
@ -1747,6 +1776,8 @@ static int qeth_send_control_data(struct qeth_card *card,
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
(addr_t) iob, 0, 0, timeout);
if (!rc)
channel->active_cmd = iob;
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
@ -4625,12 +4656,12 @@ EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
static void qeth_determine_capabilities(struct qeth_card *card)
{
struct qeth_channel *channel = &card->data;
struct ccw_device *ddev = channel->ccwdev;
int rc;
struct ccw_device *ddev;
int ddev_offline = 0;
QETH_CARD_TEXT(card, 2, "detcapab");
ddev = CARD_DDEV(card);
if (!ddev->online) {
ddev_offline = 1;
rc = ccw_device_set_online(ddev);
@ -4669,7 +4700,7 @@ static void qeth_determine_capabilities(struct qeth_card *card)
out_offline:
if (ddev_offline == 1)
ccw_device_set_offline(ddev);
qeth_stop_channel(channel);
out:
return;
}
@ -4870,9 +4901,9 @@ retry:
QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
CARD_DEVID(card));
rc = qeth_qdio_clear_card(card, !IS_IQD(card));
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
qeth_stop_channel(&card->data);
qeth_stop_channel(&card->write);
qeth_stop_channel(&card->read);
qdio_free(CARD_DDEV(card));
rc = ccw_device_set_online(CARD_RDEV(card));
if (rc)

View File

@ -28,20 +28,6 @@ extern unsigned char IPA_PDU_HEADER[];
#define QETH_TIMEOUT (10 * HZ)
#define QETH_IPA_TIMEOUT (45 * HZ)
#define QETH_CLEAR_CHANNEL_PARM -10
#define QETH_HALT_CHANNEL_PARM -11
static inline bool qeth_intparm_is_iob(unsigned long intparm)
{
switch (intparm) {
case QETH_CLEAR_CHANNEL_PARM:
case QETH_HALT_CHANNEL_PARM:
case 0:
return false;
}
return true;
}
/*****************************************************************************/
/* IP Assist related definitions */
/*****************************************************************************/

View File

@ -877,9 +877,9 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
out_remove:
qeth_l2_stop_card(card);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
qeth_stop_channel(&card->data);
qeth_stop_channel(&card->write);
qeth_stop_channel(&card->read);
qdio_free(CARD_DDEV(card));
mutex_unlock(&card->conf_mutex);
@ -910,9 +910,9 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
rtnl_unlock();
qeth_l2_stop_card(card);
rc = ccw_device_set_offline(CARD_DDEV(card));
rc2 = ccw_device_set_offline(CARD_WDEV(card));
rc3 = ccw_device_set_offline(CARD_RDEV(card));
rc = qeth_stop_channel(&card->data);
rc2 = qeth_stop_channel(&card->write);
rc3 = qeth_stop_channel(&card->read);
if (!rc)
rc = (rc2) ? rc2 : rc3;
if (rc)

View File

@ -2383,9 +2383,9 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
return 0;
out_remove:
qeth_l3_stop_card(card);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
qeth_stop_channel(&card->data);
qeth_stop_channel(&card->write);
qeth_stop_channel(&card->read);
qdio_free(CARD_DDEV(card));
mutex_unlock(&card->conf_mutex);
@ -2421,9 +2421,10 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
rtnl_unlock();
}
rc = ccw_device_set_offline(CARD_DDEV(card));
rc2 = ccw_device_set_offline(CARD_WDEV(card));
rc3 = ccw_device_set_offline(CARD_RDEV(card));
rc = qeth_stop_channel(&card->data);
rc2 = qeth_stop_channel(&card->write);
rc3 = qeth_stop_channel(&card->read);
if (!rc)
rc = (rc2) ? rc2 : rc3;
if (rc)

View File

@ -6926,23 +6926,13 @@ static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
&hba->desc_size.geom_desc);
if (err)
hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
&hba->desc_size.hlth_desc);
if (err)
hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
}
static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
{
hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
}
static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
{19200000, REF_CLK_FREQ_19_2_MHZ},
{26000000, REF_CLK_FREQ_26_MHZ},
@ -8440,9 +8430,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
/* Set descriptor lengths to specification defaults */
ufshcd_def_desc_sizes(hba);
err = ufshcd_hba_init(hba);
if (err)
goto out_error;

View File

@ -97,13 +97,13 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
return ret ? ret : copied;
}
static unsigned int snoop_file_poll(struct file *file,
static __poll_t snoop_file_poll(struct file *file,
struct poll_table_struct *pt)
{
struct aspeed_lpc_snoop_channel *chan = snoop_file_to_chan(file);
poll_wait(file, &chan->wq, pt);
return !kfifo_is_empty(&chan->fifo) ? POLLIN : 0;
return !kfifo_is_empty(&chan->fifo) ? EPOLLIN : 0;
}
static const struct file_operations snoop_fops = {

View File

@ -48,7 +48,7 @@
static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
static const struct regmap_config llcc_regmap_config = {
static struct regmap_config llcc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
@ -323,6 +323,7 @@ static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
if (IS_ERR(base))
return ERR_CAST(base);
llcc_regmap_config.name = name;
return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config);
}

View File

@ -326,7 +326,7 @@ static int __init renesas_soc_init(void)
if (np) {
chipid = of_iomap(np, 0);
of_node_put(np);
} else if (soc->id) {
} else if (soc->id && family->reg) {
chipid = ioremap(family->reg, 4);
}
if (chipid) {

View File

@ -1899,6 +1899,20 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
event->id,
&pmc->irq, pmc);
/*
* GPIOs don't have an equivalent interrupt in the
* parent controller (GIC). However some code, such
* as the one in irq_get_irqchip_state(), require a
* valid IRQ chip to be set. Make sure that's the
* case by passing NULL here, which will install a
* dummy IRQ chip for the interrupt in the parent
* domain.
*/
if (domain->parent)
irq_domain_set_hwirq_and_chip(domain->parent,
virq, 0, NULL,
NULL);
break;
}
}
@ -1908,10 +1922,22 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
* dummy hardware IRQ number. This is used in the ->irq_set_type()
* and ->irq_set_wake() callbacks to return early for these IRQs.
*/
if (i == soc->num_wake_events)
if (i == soc->num_wake_events) {
err = irq_domain_set_hwirq_and_chip(domain, virq, ULONG_MAX,
&pmc->irq, pmc);
/*
* Interrupts without a wake event don't have a corresponding
* interrupt in the parent controller (GIC). Pass NULL for the
* chip here, which causes a dummy IRQ chip to be installed
* for the interrupt in the parent domain, to make this
* explicit.
*/
if (domain->parent)
irq_domain_set_hwirq_and_chip(domain->parent, virq, 0,
NULL, NULL);
}
return err;
}

View File

@ -554,6 +554,13 @@ static int check_mem_type(unsigned long start, size_t num_pages)
struct mm_struct *mm = current->mm;
int rc;
/*
* Allow kernel address to register with OP-TEE as kernel
* pages are configured as normal memory only.
*/
if (virt_addr_valid(start))
return 0;
down_read(&mm->mmap_sem);
rc = __check_mem_type(find_vma(mm, start),
start + num_pages * PAGE_SIZE);

View File

@ -643,11 +643,6 @@ static struct optee *optee_probe(struct device_node *np)
if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
pr_info("dynamic shared memory is enabled\n");
rc = optee_enumerate_devices();
if (rc)
goto err;
pr_info("initialized driver\n");
return optee;
err:
if (optee) {
@ -702,9 +697,10 @@ static struct optee *optee_svc;
static int __init optee_driver_init(void)
{
struct device_node *fw_np;
struct device_node *np;
struct optee *optee;
struct device_node *fw_np = NULL;
struct device_node *np = NULL;
struct optee *optee = NULL;
int rc = 0;
/* Node is supposed to be below /firmware */
fw_np = of_find_node_by_name(NULL, "firmware");
@ -723,6 +719,14 @@ static int __init optee_driver_init(void)
if (IS_ERR(optee))
return PTR_ERR(optee);
rc = optee_enumerate_devices();
if (rc) {
optee_remove(optee);
return rc;
}
pr_info("initialized driver\n");
optee_svc = optee;
return 0;

View File

@ -17,6 +17,7 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
{
unsigned int order = get_order(size);
struct page *page;
int rc = 0;
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!page)
@ -26,12 +27,34 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
shm->paddr = page_to_phys(page);
shm->size = PAGE_SIZE << order;
return 0;
if (shm->flags & TEE_SHM_DMA_BUF) {
unsigned int nr_pages = 1 << order, i;
struct page **pages;
pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
if (!pages)
return -ENOMEM;
for (i = 0; i < nr_pages; i++) {
pages[i] = page;
page++;
}
shm->flags |= TEE_SHM_REGISTER;
rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
(unsigned long)shm->kaddr);
kfree(pages);
}
return rc;
}
static void pool_op_free(struct tee_shm_pool_mgr *poolm,
struct tee_shm *shm)
{
if (shm->flags & TEE_SHM_DMA_BUF)
optee_shm_unregister(shm->ctx, shm);
free_pages((unsigned long)shm->kaddr, get_order(shm->size));
shm->kaddr = NULL;
}

View File

@ -327,10 +327,9 @@ static int sprd_wdt_probe(struct platform_device *pdev)
static int __maybe_unused sprd_wdt_pm_suspend(struct device *dev)
{
struct watchdog_device *wdd = dev_get_drvdata(dev);
struct sprd_wdt *wdt = dev_get_drvdata(dev);
if (watchdog_active(wdd))
if (watchdog_active(&wdt->wdd))
sprd_wdt_stop(&wdt->wdd);
sprd_wdt_disable(wdt);
@ -339,7 +338,6 @@ static int __maybe_unused sprd_wdt_pm_suspend(struct device *dev)
static int __maybe_unused sprd_wdt_pm_resume(struct device *dev)
{
struct watchdog_device *wdd = dev_get_drvdata(dev);
struct sprd_wdt *wdt = dev_get_drvdata(dev);
int ret;
@ -347,7 +345,7 @@ static int __maybe_unused sprd_wdt_pm_resume(struct device *dev)
if (ret)
return ret;
if (watchdog_active(wdd)) {
if (watchdog_active(&wdt->wdd)) {
ret = sprd_wdt_start(&wdt->wdd);
if (ret) {
sprd_wdt_disable(wdt);

View File

@ -68,13 +68,11 @@ static int afs_find_contig_bits(union afs_xdr_dir_block *block, unsigned int nr_
static void afs_set_contig_bits(union afs_xdr_dir_block *block,
int bit, unsigned int nr_slots)
{
u64 mask, before, after;
u64 mask;
mask = (1 << nr_slots) - 1;
mask <<= bit;
before = *(u64 *)block->hdr.bitmap;
block->hdr.bitmap[0] |= (u8)(mask >> 0 * 8);
block->hdr.bitmap[1] |= (u8)(mask >> 1 * 8);
block->hdr.bitmap[2] |= (u8)(mask >> 2 * 8);
@ -83,8 +81,6 @@ static void afs_set_contig_bits(union afs_xdr_dir_block *block,
block->hdr.bitmap[5] |= (u8)(mask >> 5 * 8);
block->hdr.bitmap[6] |= (u8)(mask >> 6 * 8);
block->hdr.bitmap[7] |= (u8)(mask >> 7 * 8);
after = *(u64 *)block->hdr.bitmap;
}
/*
@ -93,13 +89,11 @@ static void afs_set_contig_bits(union afs_xdr_dir_block *block,
static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
int bit, unsigned int nr_slots)
{
u64 mask, before, after;
u64 mask;
mask = (1 << nr_slots) - 1;
mask <<= bit;
before = *(u64 *)block->hdr.bitmap;
block->hdr.bitmap[0] &= ~(u8)(mask >> 0 * 8);
block->hdr.bitmap[1] &= ~(u8)(mask >> 1 * 8);
block->hdr.bitmap[2] &= ~(u8)(mask >> 2 * 8);
@ -108,8 +102,6 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
block->hdr.bitmap[5] &= ~(u8)(mask >> 5 * 8);
block->hdr.bitmap[6] &= ~(u8)(mask >> 6 * 8);
block->hdr.bitmap[7] &= ~(u8)(mask >> 7 * 8);
after = *(u64 *)block->hdr.bitmap;
}
/*

View File

@ -73,6 +73,7 @@ config NFSD_V4
select NFSD_V3
select FS_POSIX_ACL
select SUNRPC_GSS
select CRYPTO_MD5
select CRYPTO_SHA256
select GRACE_PERIOD
help

View File

@ -201,6 +201,9 @@ xfs_fs_rm_xquota(
if (XFS_IS_QUOTA_ON(mp))
return -EINVAL;
if (uflags & ~(FS_USER_QUOTA | FS_GROUP_QUOTA | FS_PROJ_QUOTA))
return -EINVAL;
if (uflags & FS_USER_QUOTA)
flags |= XFS_DQ_USER;
if (uflags & FS_GROUP_QUOTA)

View File

@ -71,6 +71,8 @@
#define SDIO_VENDOR_ID_TI 0x0097
#define SDIO_DEVICE_ID_TI_WL1271 0x4076
#define SDIO_VENDOR_ID_TI_WL1251 0x104c
#define SDIO_DEVICE_ID_TI_WL1251 0x9066
#define SDIO_VENDOR_ID_STE 0x0020
#define SDIO_DEVICE_ID_STE_CW1200 0x2280

View File

@ -74,7 +74,7 @@ struct bpf_dtab_netdev {
struct bpf_dtab {
struct bpf_map map;
struct bpf_dtab_netdev **netdev_map;
struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
struct list_head __percpu *flush_list;
struct list_head list;
@ -101,6 +101,12 @@ static struct hlist_head *dev_map_create_hash(unsigned int entries)
return hash;
}
static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
int idx)
{
return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
}
static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
{
int err, cpu;
@ -120,8 +126,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
bpf_map_init_from_attr(&dtab->map, attr);
/* make sure page count doesn't overflow */
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
cost += sizeof(struct list_head) * num_possible_cpus();
cost = (u64) sizeof(struct list_head) * num_possible_cpus();
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
@ -129,6 +134,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
if (!dtab->n_buckets) /* Overflow check */
return -EINVAL;
cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
} else {
cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
}
/* if map size is larger than memlock limit, reject it */
@ -143,24 +150,22 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
sizeof(struct bpf_dtab_netdev *),
dtab->map.numa_node);
if (!dtab->netdev_map)
goto free_percpu;
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
if (!dtab->dev_index_head)
goto free_map_area;
goto free_percpu;
spin_lock_init(&dtab->index_lock);
} else {
dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
sizeof(struct bpf_dtab_netdev *),
dtab->map.numa_node);
if (!dtab->netdev_map)
goto free_percpu;
}
return 0;
free_map_area:
bpf_map_area_free(dtab->netdev_map);
free_percpu:
free_percpu(dtab->flush_list);
free_charge:
@ -228,21 +233,40 @@ static void dev_map_free(struct bpf_map *map)
cond_resched();
}
for (i = 0; i < dtab->map.max_entries; i++) {
struct bpf_dtab_netdev *dev;
if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
for (i = 0; i < dtab->n_buckets; i++) {
struct bpf_dtab_netdev *dev;
struct hlist_head *head;
struct hlist_node *next;
dev = dtab->netdev_map[i];
if (!dev)
continue;
head = dev_map_index_hash(dtab, i);
free_percpu(dev->bulkq);
dev_put(dev->dev);
kfree(dev);
hlist_for_each_entry_safe(dev, next, head, index_hlist) {
hlist_del_rcu(&dev->index_hlist);
free_percpu(dev->bulkq);
dev_put(dev->dev);
kfree(dev);
}
}
kfree(dtab->dev_index_head);
} else {
for (i = 0; i < dtab->map.max_entries; i++) {
struct bpf_dtab_netdev *dev;
dev = dtab->netdev_map[i];
if (!dev)
continue;
free_percpu(dev->bulkq);
dev_put(dev->dev);
kfree(dev);
}
bpf_map_area_free(dtab->netdev_map);
}
free_percpu(dtab->flush_list);
bpf_map_area_free(dtab->netdev_map);
kfree(dtab->dev_index_head);
kfree(dtab);
}
@ -263,12 +287,6 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
return 0;
}
static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
int idx)
{
return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
}
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);

View File

@ -375,7 +375,7 @@ dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
{
dma_addr_t dma_addr = paddr;
if (unlikely(!dma_direct_possible(dev, dma_addr, size))) {
if (unlikely(!dma_capable(dev, dma_addr, size))) {
report_addr(dev, dma_addr, size);
return DMA_MAPPING_ERROR;
}

View File

@ -1946,7 +1946,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
int __maybe_unused cpu = my_rdp->cpu;
unsigned long cur_gp_seq;
unsigned long flags;
bool gotcbs;
bool gotcbs = false;
unsigned long j = jiffies;
bool needwait_gp = false; // This prevents actual uninitialized use.
bool needwake;

View File

@ -1743,13 +1743,16 @@ static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
}
#endif
static void set_next_task_dl(struct rq *rq, struct task_struct *p)
static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
{
p->se.exec_start = rq_clock_task(rq);
/* You can't push away the running task */
dequeue_pushable_dl_task(rq, p);
if (!first)
return;
if (hrtick_enabled(rq))
start_hrtick_dl(rq, p);
@ -1785,7 +1788,7 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
dl_se = pick_next_dl_entity(rq, dl_rq);
BUG_ON(!dl_se);
p = dl_task_of(dl_se);
set_next_task_dl(rq, p);
set_next_task_dl(rq, p, true);
return p;
}

View File

@ -3504,9 +3504,6 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
cfs_rq->load_last_update_time_copy = sa->last_update_time;
#endif
if (decayed)
cfs_rq_util_change(cfs_rq, 0);
return decayed;
}
@ -3616,8 +3613,12 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
update_tg_load_avg(cfs_rq, 0);
} else if (decayed && (flags & UPDATE_TG))
update_tg_load_avg(cfs_rq, 0);
} else if (decayed) {
cfs_rq_util_change(cfs_rq, 0);
if (flags & UPDATE_TG)
update_tg_load_avg(cfs_rq, 0);
}
}
#ifndef CONFIG_64BIT
@ -7578,6 +7579,28 @@ static inline bool others_have_blocked(struct rq *rq) { return false; }
static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
#endif
static bool __update_blocked_others(struct rq *rq, bool *done)
{
const struct sched_class *curr_class;
u64 now = rq_clock_pelt(rq);
bool decayed;
/*
* update_load_avg() can call cpufreq_update_util(). Make sure that RT,
* DL and IRQ signals have been updated before updating CFS.
*/
curr_class = rq->curr->sched_class;
decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
update_irq_load_avg(rq, 0);
if (others_have_blocked(rq))
*done = false;
return decayed;
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
@ -7597,29 +7620,11 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
return true;
}
static void update_blocked_averages(int cpu)
static bool __update_blocked_fair(struct rq *rq, bool *done)
{
struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq, *pos;
const struct sched_class *curr_class;
struct rq_flags rf;
bool done = true;
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
/*
* update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
* that RT, DL and IRQ signals have been updated before updating CFS.
*/
curr_class = rq->curr->sched_class;
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
update_irq_load_avg(rq, 0);
/* Don't need periodic decay once load/util_avg are null */
if (others_have_blocked(rq))
done = false;
bool decayed = false;
int cpu = cpu_of(rq);
/*
* Iterates the task_group tree in a bottom up fashion, see
@ -7628,9 +7633,13 @@ static void update_blocked_averages(int cpu)
for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
struct sched_entity *se;
if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq))
if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
update_tg_load_avg(cfs_rq, 0);
if (cfs_rq == &rq->cfs)
decayed = true;
}
/* Propagate pending load changes to the parent, if any: */
se = cfs_rq->tg->se[cpu];
if (se && !skip_blocked_update(se))
@ -7645,11 +7654,10 @@ static void update_blocked_averages(int cpu)
/* Don't need periodic decay once load/util_avg are null */
if (cfs_rq_has_blocked(cfs_rq))
done = false;
*done = false;
}
update_blocked_load_status(rq, !done);
rq_unlock_irqrestore(rq, &rf);
return decayed;
}
/*
@ -7699,29 +7707,16 @@ static unsigned long task_h_load(struct task_struct *p)
cfs_rq_load_avg(cfs_rq) + 1);
}
#else
static inline void update_blocked_averages(int cpu)
static bool __update_blocked_fair(struct rq *rq, bool *done)
{
struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq = &rq->cfs;
const struct sched_class *curr_class;
struct rq_flags rf;
bool decayed;
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
if (cfs_rq_has_blocked(cfs_rq))
*done = false;
/*
* update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
* that RT, DL and IRQ signals have been updated before updating CFS.
*/
curr_class = rq->curr->sched_class;
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
update_irq_load_avg(rq, 0);
update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
rq_unlock_irqrestore(rq, &rf);
return decayed;
}
static unsigned long task_h_load(struct task_struct *p)
@ -7730,6 +7725,24 @@ static unsigned long task_h_load(struct task_struct *p)
}
#endif
static void update_blocked_averages(int cpu)
{
bool decayed = false, done = true;
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
decayed |= __update_blocked_others(rq, &done);
decayed |= __update_blocked_fair(rq, &done);
update_blocked_load_status(rq, !done);
if (decayed)
cpufreq_update_util(rq, 0);
rq_unlock_irqrestore(rq, &rf);
}
/********** Helpers for find_busiest_group ************************/
/*
@ -10277,7 +10290,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
* This routine is mostly called to set cfs_rq->curr field when a task
* migrates between groups/classes.
*/
static void set_next_task_fair(struct rq *rq, struct task_struct *p)
static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
{
struct sched_entity *se = &p->se;

View File

@ -385,7 +385,7 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{
}
static void set_next_task_idle(struct rq *rq, struct task_struct *next)
static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
{
update_idle_core(rq);
schedstat_inc(rq->sched_goidle);
@ -399,7 +399,7 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
if (prev)
put_prev_task(rq, prev);
set_next_task_idle(rq, next);
set_next_task_idle(rq, next, true);
return next;
}

View File

@ -1515,13 +1515,16 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
#endif
}
static inline void set_next_task_rt(struct rq *rq, struct task_struct *p)
static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
{
p->se.exec_start = rq_clock_task(rq);
/* The running task is never eligible for pushing */
dequeue_pushable_task(rq, p);
if (!first)
return;
/*
* If prev task was rt, put_prev_task() has already updated the
* utilization. We only care of the case where we start to schedule a
@ -1575,7 +1578,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
return NULL;
p = _pick_next_task_rt(rq);
set_next_task_rt(rq, p);
set_next_task_rt(rq, p, true);
return p;
}

View File

@ -1738,7 +1738,7 @@ struct sched_class {
struct task_struct *prev,
struct rq_flags *rf);
void (*put_prev_task)(struct rq *rq, struct task_struct *p);
void (*set_next_task)(struct rq *rq, struct task_struct *p);
void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
#ifdef CONFIG_SMP
int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
@ -1790,7 +1790,7 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
static inline void set_next_task(struct rq *rq, struct task_struct *next)
{
WARN_ON_ONCE(rq->curr != next);
next->sched_class->set_next_task(rq, next);
next->sched_class->set_next_task(rq, next, false);
}
#ifdef CONFIG_SMP

View File

@ -29,7 +29,7 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
/* we're never preempted */
}
static void set_next_task_stop(struct rq *rq, struct task_struct *stop)
static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
{
stop->se.exec_start = rq_clock_task(rq);
}
@ -42,7 +42,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
if (!sched_stop_runnable(rq))
return NULL;
set_next_task_stop(rq, rq->stop);
set_next_task_stop(rq, rq->stop, true);
return rq->stop;
}

View File

@ -425,7 +425,8 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
* ignored.
*/
#define for_each_pwq(pwq, wq) \
list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
lockdep_is_held(&wq->mutex)) \
if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
else

View File

@ -2049,8 +2049,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
goto nla_put_failure;
{
unsigned long now = jiffies;
unsigned int flush_delta = now - tbl->last_flush;
unsigned int rand_delta = now - tbl->last_rand;
long flush_delta = now - tbl->last_flush;
long rand_delta = now - tbl->last_rand;
struct neigh_hash_table *nht;
struct ndt_config ndc = {
.ndtc_key_len = tbl->key_len,

View File

@ -1462,14 +1462,17 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
struct kobject *kobj = &queue->kobj;
int error = 0;
/* Kobject_put later will trigger netdev_queue_release call
* which decreases dev refcount: Take that reference here
*/
dev_hold(queue->dev);
kobj->kset = dev->queues_kset;
error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
"tx-%u", index);
if (error)
goto err;
dev_hold(queue->dev);
#ifdef CONFIG_BQL
error = sysfs_create_group(kobj, &dql_group);
if (error)

View File

@ -1667,6 +1667,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
ovs_dp_reset_user_features(skb, info);
}
ovs_unlock();
goto err_destroy_meters;
}
@ -1683,7 +1684,6 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
return 0;
err_destroy_meters:
ovs_unlock();
ovs_meters_exit(dp);
err_destroy_ports_array:
kfree(dp->ports);

View File

@ -1296,15 +1296,21 @@ static void packet_sock_destruct(struct sock *sk)
static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
{
u32 rxhash;
u32 *history = po->rollover->history;
u32 victim, rxhash;
int i, count = 0;
rxhash = skb_get_hash(skb);
for (i = 0; i < ROLLOVER_HLEN; i++)
if (po->rollover->history[i] == rxhash)
if (READ_ONCE(history[i]) == rxhash)
count++;
po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
victim = prandom_u32() % ROLLOVER_HLEN;
/* Avoid dirtying the cache line if possible */
if (READ_ONCE(history[victim]) != rxhash)
WRITE_ONCE(history[victim], rxhash);
return count > (ROLLOVER_HLEN >> 1);
}

Some files were not shown because too many files have changed in this diff Show More