This is the 5.4.190 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmJftBAACgkQONu9yGCS aT4f7RAA1/eeQcfKsPYN7I2ToM1F6aB51wYt1Xj0ObYcHM/lm2JWzDu2UB+fTpem rBKvoeA+/xb++vkxBXHpJTK6TIuYder0rGcgnTmbhQPpAb37T22n5P666STRoZV2 0AN0pzFVH+LjdZcPvfHCO/xmI3Z6ay3uWwp0G4tNUUdhpl/K/3dludP8yxX4EBaD UJOKVRWp16rcSj4NtOKjrEADeKymqnsUnjEB5KU3gEfqaDhwEeZc9rw5zWZvRIZ7 9zJkQcHAMWi2oA/wPLbiNF+Be20K1hqT8UV8WgrRyLS8JJuACZodDBchftXYwuQq IqKMbpj+8XS9Yqxujgc+NVDOi5l4vg9Kol4LiHfax/LtRuc+DyqxZimRzVHi/Joz /+lx3urUKzhRPNPR0fUhxwpoOYxilmI0N+ahr40PT+nq0eVOXXwTd8balmhxCpc6 1ssG+g5R0Ij0CblpzEJXodNDkJ00pxRTGRYUmqBwjVMOHt0RTwHfK4qeluPoyC19 X8YdAdrmm4BT9KPUJvStzWIZfKBE+cuho5dCB56e/keg0T9Q98zL9mXPnli0UVOW oD7DZxOQVaJZV6QqYpkxpeut0zN1Fnyih9lkvgY3Y5dlIGZ5PbIDK4sDmo/5RTZE Y1xu87ujBcAbDVN6j8TQmj71iikd4qfGI9vvFiHyK5Zg0rSXyfY= =dDvH -----END PGP SIGNATURE----- Merge 5.4.190 into android11-5.4-lts Changes in 5.4.190 memory: atmel-ebi: Fix missing of_node_put in atmel_ebi_probe net/sched: flower: fix parsing of ethertype following VLAN header veth: Ensure eth header is in skb's linear part gpiolib: acpi: use correct format characters mlxsw: i2c: Fix initialization error flow net/sched: fix initialization order when updating chain 0 head net: ethernet: stmmac: fix altr_tse_pcs function when using a fixed-link net/sched: taprio: Check if socket flags are valid cfg80211: hold bss_lock while updating nontrans_list drm/msm/dsi: Use connector directly in msm_dsi_manager_connector_init() net/smc: Fix NULL pointer dereference in smc_pnet_find_ib() sctp: Initialize daddr on peeled off socket testing/selftests/mqueue: Fix mq_perf_tests to free the allocated cpu set nfc: nci: add flush_workqueue to prevent uaf cifs: potential buffer overflow in handling symlinks drm/amd: Add USBC connector ID drm/amd/display: fix audio format not updated after edid updated drm/amd/display: Update VTEM Infopacket definition drm/amdkfd: Fix Incorrect VMIDs passed to HWS drm/amdkfd: Check for potential null return of kmalloc_array() Drivers: hv: vmbus: Prevent load re-ordering when reading ring buffer scsi: target: tcmu: Fix possible page UAF scsi: ibmvscsis: Increase INITIAL_SRP_LIMIT to 1024 net: micrel: fix KS8851_MLL Kconfig ata: libata-core: Disable READ LOG DMA EXT for Samsung 840 EVOs gpu: ipu-v3: Fix dev_dbg frequency output regulator: wm8994: Add an off-on delay for WM8994 variant arm64: alternatives: mark patch_alternative() as `noinstr` tlb: hugetlb: Add more sizes to tlb_remove_huge_tlb_entry net: usb: aqc111: Fix out-of-bounds accesses in RX fixup drm/amd/display: Fix allocate_mst_payload assert on resume powerpc: Fix virt_addr_valid() for 64-bit Book3E & 32-bit scsi: mvsas: Add PCI ID of RocketRaid 2640 scsi: megaraid_sas: Target with invalid LUN ID is deleted during scan drivers: net: slip: fix NPD bug in sl_tx_timeout() perf/imx_ddr: Fix undefined behavior due to shift overflowing the constant mm, page_alloc: fix build_zonerefs_node() mm: kmemleak: take a full lowmem check in kmemleak_*_phys() gcc-plugins: latent_entropy: use /dev/urandom ath9k: Properly clear TX status area before reporting to mac80211 ath9k: Fix usage of driver-private space in tx_info btrfs: remove unused variable in btrfs_{start,write}_dirty_block_groups() btrfs: mark resumed async balance as writing ALSA: hda/realtek: Add quirk for Clevo PD50PNT ALSA: pcm: Test for "silence" field in struct "pcm_format_data" ipv6: fix panic when forwarding a pkt with no in6 dev drm/amd/display: don't ignore alpha property on pre-multiplied mode genirq/affinity: Consider that CPUs on nodes can be unbalanced tick/nohz: Use WARN_ON_ONCE() to prevent console saturation ARM: davinci: da850-evm: Avoid NULL pointer dereference dm integrity: fix memory corruption when tag_size is less than digest size smp: Fix offline cpu check in flush_smp_call_function_queue() i2c: pasemi: Wait for write xfers to finish dma-direct: avoid redundant memory sync for swiotlb ax25: add refcount in ax25_dev to avoid UAF bugs ax25: fix reference count leaks of ax25_dev ax25: fix UAF bugs of net_device caused by rebinding operation ax25: Fix refcount leaks caused by ax25_cb_del() ax25: fix UAF bug in ax25_send_control() ax25: fix NPD bug in ax25_disconnect ax25: Fix NULL pointer dereferences in ax25 timers ax25: Fix UAF bugs in ax25 timers Linux 5.4.190 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I375cb1d55a4a40c1c31b86c87ddb9235cefcb902
This commit is contained in:
commit
4bd8a3c04c
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 189
|
||||
SUBLEVEL = 190
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -1101,11 +1101,13 @@ static int __init da850_evm_config_emac(void)
|
||||
int ret;
|
||||
u32 val;
|
||||
struct davinci_soc_info *soc_info = &davinci_soc_info;
|
||||
u8 rmii_en = soc_info->emac_pdata->rmii_en;
|
||||
u8 rmii_en;
|
||||
|
||||
if (!machine_is_davinci_da850_evm())
|
||||
return 0;
|
||||
|
||||
rmii_en = soc_info->emac_pdata->rmii_en;
|
||||
|
||||
cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG);
|
||||
|
||||
val = __raw_readl(cfg_chip3_base);
|
||||
|
@ -41,7 +41,7 @@ bool alternative_is_applied(u16 cpufeature)
|
||||
/*
|
||||
* Check if the target PC is within an alternative block.
|
||||
*/
|
||||
static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
|
||||
static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
|
||||
{
|
||||
unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
|
||||
return !(pc >= replptr && pc <= (replptr + alt->alt_len));
|
||||
@ -49,7 +49,7 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
|
||||
|
||||
#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
|
||||
|
||||
static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
|
||||
static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
|
||||
{
|
||||
u32 insn;
|
||||
|
||||
@ -94,7 +94,7 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp
|
||||
return insn;
|
||||
}
|
||||
|
||||
static void patch_alternative(struct alt_instr *alt,
|
||||
static noinstr void patch_alternative(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
__le32 *replptr;
|
||||
|
@ -132,7 +132,11 @@ static inline bool pfn_valid(unsigned long pfn)
|
||||
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
|
||||
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
|
||||
|
||||
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
|
||||
#define virt_addr_valid(vaddr) ({ \
|
||||
unsigned long _addr = (unsigned long)vaddr; \
|
||||
_addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
|
||||
pfn_valid(virt_to_pfn(_addr)); \
|
||||
})
|
||||
|
||||
/*
|
||||
* On Book-E parts we need __va to parse the device tree and we can't
|
||||
|
@ -4580,6 +4580,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_NO_DMA_LOG |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
|
@ -275,8 +275,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
|
||||
pin = agpio->pin_table[0];
|
||||
|
||||
if (pin <= 255) {
|
||||
char ev_name[5];
|
||||
sprintf(ev_name, "_%c%02hhX",
|
||||
char ev_name[8];
|
||||
sprintf(ev_name, "_%c%02X",
|
||||
agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
|
||||
pin);
|
||||
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
|
||||
|
@ -119,6 +119,7 @@
|
||||
#define CONNECTOR_OBJECT_ID_eDP 0x14
|
||||
#define CONNECTOR_OBJECT_ID_MXM 0x15
|
||||
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
|
||||
#define CONNECTOR_OBJECT_ID_USBC 0x17
|
||||
|
||||
/* deleted */
|
||||
|
||||
|
@ -633,7 +633,7 @@ MODULE_PARM_DESC(sched_policy,
|
||||
* Maximum number of processes that HWS can schedule concurrently. The maximum is the
|
||||
* number of VMIDs assigned to the HWS, which is also the default.
|
||||
*/
|
||||
int hws_max_conc_proc = 8;
|
||||
int hws_max_conc_proc = -1;
|
||||
module_param(hws_max_conc_proc, int, 0444);
|
||||
MODULE_PARM_DESC(hws_max_conc_proc,
|
||||
"Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
|
||||
|
@ -609,15 +609,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
|
||||
- kfd->vm_info.first_vmid_kfd + 1;
|
||||
|
||||
/* Verify module parameters regarding mapped process number*/
|
||||
if ((hws_max_conc_proc < 0)
|
||||
|| (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
|
||||
dev_err(kfd_device,
|
||||
"hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
|
||||
hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
|
||||
kfd->vm_info.vmid_num_kfd);
|
||||
if (hws_max_conc_proc >= 0)
|
||||
kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
|
||||
else
|
||||
kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
|
||||
} else
|
||||
kfd->max_proc_per_quantum = hws_max_conc_proc;
|
||||
|
||||
/* Allocate global GWS that is shared by all KFD processes */
|
||||
if (hws_gws_support && amdgpu_amdkfd_alloc_gws(kfd->kgd,
|
||||
|
@ -532,6 +532,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
|
||||
event_waiters = kmalloc_array(num_events,
|
||||
sizeof(struct kfd_event_waiter),
|
||||
GFP_KERNEL);
|
||||
if (!event_waiters)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; (event_waiters) && (i < num_events) ; i++) {
|
||||
init_wait(&event_waiters[i].wait);
|
||||
|
@ -1210,7 +1210,8 @@ static int dm_resume(void *handle)
|
||||
* this is the case when traversing through already created
|
||||
* MST connectors, should be skipped
|
||||
*/
|
||||
if (aconnector->mst_port)
|
||||
if (aconnector->dc_link &&
|
||||
aconnector->dc_link->type == dc_connection_mst_branch)
|
||||
continue;
|
||||
|
||||
mutex_lock(&aconnector->hpd_lock);
|
||||
|
@ -1546,8 +1546,8 @@ bool dc_is_stream_unchanged(
|
||||
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
|
||||
return false;
|
||||
|
||||
// Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
|
||||
if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
|
||||
/*compare audio info*/
|
||||
if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -2202,14 +2202,18 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
&blnd_cfg.black_color);
|
||||
}
|
||||
|
||||
if (per_pixel_alpha)
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
|
||||
else
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
|
||||
|
||||
blnd_cfg.overlap_only = false;
|
||||
blnd_cfg.global_gain = 0xff;
|
||||
|
||||
if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
|
||||
blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
|
||||
} else if (per_pixel_alpha) {
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
|
||||
} else {
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
|
||||
}
|
||||
|
||||
if (pipe_ctx->plane_state->global_alpha)
|
||||
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
|
||||
else
|
||||
|
@ -1740,14 +1740,18 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
pipe_ctx, &blnd_cfg.black_color);
|
||||
}
|
||||
|
||||
if (per_pixel_alpha)
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
|
||||
else
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
|
||||
|
||||
blnd_cfg.overlap_only = false;
|
||||
blnd_cfg.global_gain = 0xff;
|
||||
|
||||
if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
|
||||
blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
|
||||
} else if (per_pixel_alpha) {
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
|
||||
} else {
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
|
||||
}
|
||||
|
||||
if (pipe_ctx->plane_state->global_alpha)
|
||||
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
|
||||
else
|
||||
|
@ -85,7 +85,8 @@
|
||||
//PB7 = MD0
|
||||
#define MASK_VTEM_MD0__VRR_EN 0x01
|
||||
#define MASK_VTEM_MD0__M_CONST 0x02
|
||||
#define MASK_VTEM_MD0__RESERVED2 0x0C
|
||||
#define MASK_VTEM_MD0__QMS_EN 0x04
|
||||
#define MASK_VTEM_MD0__RESERVED2 0x08
|
||||
#define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0
|
||||
|
||||
//MD1
|
||||
@ -94,7 +95,7 @@
|
||||
//MD2
|
||||
#define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03
|
||||
#define MASK_VTEM_MD2__RB 0x04
|
||||
#define MASK_VTEM_MD2__RESERVED3 0xF8
|
||||
#define MASK_VTEM_MD2__NEXT_TFR 0xF8
|
||||
|
||||
//MD3
|
||||
#define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF
|
||||
|
@ -625,7 +625,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
|
||||
return connector;
|
||||
|
||||
fail:
|
||||
connector->funcs->destroy(msm_dsi->connector);
|
||||
connector->funcs->destroy(connector);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -451,8 +451,9 @@ static void ipu_di_config_clock(struct ipu_di *di,
|
||||
|
||||
error = rate / (sig->mode.pixelclock / 1000);
|
||||
|
||||
dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n",
|
||||
rate, div, (signed)(error - 1000) / 10, error % 10);
|
||||
dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %c%d.%d%%\n",
|
||||
rate, div, error < 1000 ? '-' : '+',
|
||||
abs(error - 1000) / 10, abs(error - 1000) % 10);
|
||||
|
||||
/* Allow a 1% error */
|
||||
if (error < 1010 && error >= 990) {
|
||||
|
@ -378,7 +378,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
|
||||
static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
|
||||
{
|
||||
u32 priv_read_loc = rbi->priv_read_index;
|
||||
u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
|
||||
u32 write_loc;
|
||||
|
||||
/*
|
||||
* The Hyper-V host writes the packet data, then uses
|
||||
* store_release() to update the write_index. Use load_acquire()
|
||||
* here to prevent loads of the packet data from being re-ordered
|
||||
* before the read of the write_index and potentially getting
|
||||
* stale data.
|
||||
*/
|
||||
write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
|
||||
|
||||
if (write_loc >= priv_read_loc)
|
||||
return write_loc - priv_read_loc;
|
||||
|
@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter,
|
||||
|
||||
TXFIFO_WR(smbus, msg->buf[msg->len-1] |
|
||||
(stop ? MTXFIFO_STOP : 0));
|
||||
|
||||
if (stop) {
|
||||
err = pasemi_smb_waitready(smbus);
|
||||
if (err)
|
||||
goto reset_out;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -4054,6 +4054,7 @@ try_smaller_buffer:
|
||||
}
|
||||
|
||||
if (ic->internal_hash) {
|
||||
size_t recalc_tags_size;
|
||||
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
|
||||
if (!ic->recalc_wq ) {
|
||||
ti->error = "Cannot allocate workqueue";
|
||||
@ -4067,8 +4068,10 @@ try_smaller_buffer:
|
||||
r = -ENOMEM;
|
||||
goto bad;
|
||||
}
|
||||
ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
|
||||
ic->tag_size, GFP_KERNEL);
|
||||
recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
|
||||
if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
|
||||
recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
|
||||
ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
|
||||
if (!ic->recalc_tags) {
|
||||
ti->error = "Cannot allocate tags for recalculating";
|
||||
r = -ENOMEM;
|
||||
|
@ -545,20 +545,27 @@ static int atmel_ebi_probe(struct platform_device *pdev)
|
||||
smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0);
|
||||
|
||||
ebi->smc.regmap = syscon_node_to_regmap(smc_np);
|
||||
if (IS_ERR(ebi->smc.regmap))
|
||||
return PTR_ERR(ebi->smc.regmap);
|
||||
if (IS_ERR(ebi->smc.regmap)) {
|
||||
ret = PTR_ERR(ebi->smc.regmap);
|
||||
goto put_node;
|
||||
}
|
||||
|
||||
ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np);
|
||||
if (IS_ERR(ebi->smc.layout))
|
||||
return PTR_ERR(ebi->smc.layout);
|
||||
if (IS_ERR(ebi->smc.layout)) {
|
||||
ret = PTR_ERR(ebi->smc.layout);
|
||||
goto put_node;
|
||||
}
|
||||
|
||||
ebi->smc.clk = of_clk_get(smc_np, 0);
|
||||
if (IS_ERR(ebi->smc.clk)) {
|
||||
if (PTR_ERR(ebi->smc.clk) != -ENOENT)
|
||||
return PTR_ERR(ebi->smc.clk);
|
||||
if (PTR_ERR(ebi->smc.clk) != -ENOENT) {
|
||||
ret = PTR_ERR(ebi->smc.clk);
|
||||
goto put_node;
|
||||
}
|
||||
|
||||
ebi->smc.clk = NULL;
|
||||
}
|
||||
of_node_put(smc_np);
|
||||
ret = clk_prepare_enable(ebi->smc.clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -609,6 +616,10 @@ static int atmel_ebi_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
return of_platform_populate(np, NULL, NULL, dev);
|
||||
|
||||
put_node:
|
||||
of_node_put(smc_np);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __maybe_unused int atmel_ebi_resume(struct device *dev)
|
||||
|
@ -649,6 +649,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
|
||||
return 0;
|
||||
|
||||
errout:
|
||||
mutex_destroy(&mlxsw_i2c->cmd.lock);
|
||||
i2c_set_clientdata(client, NULL);
|
||||
|
||||
return err;
|
||||
|
@ -37,6 +37,7 @@ config KS8851
|
||||
config KS8851_MLL
|
||||
tristate "Micrel KS8851 MLL"
|
||||
depends on HAS_IOMEM
|
||||
depends on PTP_1588_CLOCK_OPTIONAL
|
||||
select MII
|
||||
---help---
|
||||
This platform driver is for Micrel KS8851 Address/data bus
|
||||
|
@ -57,10 +57,6 @@
|
||||
#define TSE_PCS_USE_SGMII_ENA BIT(0)
|
||||
#define TSE_PCS_IF_USE_SGMII 0x03
|
||||
|
||||
#define SGMII_ADAPTER_CTRL_REG 0x00
|
||||
#define SGMII_ADAPTER_DISABLE 0x0001
|
||||
#define SGMII_ADAPTER_ENABLE 0x0000
|
||||
|
||||
#define AUTONEGO_LINK_TIMER 20
|
||||
|
||||
static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
|
||||
@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
|
||||
unsigned int speed)
|
||||
{
|
||||
void __iomem *tse_pcs_base = pcs->tse_pcs_base;
|
||||
void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
|
||||
u32 val;
|
||||
|
||||
writew(SGMII_ADAPTER_ENABLE,
|
||||
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
|
||||
|
||||
pcs->autoneg = phy_dev->autoneg;
|
||||
|
||||
if (phy_dev->autoneg == AUTONEG_ENABLE) {
|
||||
|
@ -10,6 +10,10 @@
|
||||
#include <linux/phy.h>
|
||||
#include <linux/timer.h>
|
||||
|
||||
#define SGMII_ADAPTER_CTRL_REG 0x00
|
||||
#define SGMII_ADAPTER_ENABLE 0x0000
|
||||
#define SGMII_ADAPTER_DISABLE 0x0001
|
||||
|
||||
struct tse_pcs {
|
||||
struct device *dev;
|
||||
void __iomem *tse_pcs_base;
|
||||
|
@ -18,9 +18,6 @@
|
||||
|
||||
#include "altr_tse_pcs.h"
|
||||
|
||||
#define SGMII_ADAPTER_CTRL_REG 0x00
|
||||
#define SGMII_ADAPTER_DISABLE 0x0001
|
||||
|
||||
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
|
||||
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
|
||||
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
|
||||
@ -62,16 +59,14 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
|
||||
{
|
||||
struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
|
||||
void __iomem *splitter_base = dwmac->splitter_base;
|
||||
void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
|
||||
void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
|
||||
struct device *dev = dwmac->dev;
|
||||
struct net_device *ndev = dev_get_drvdata(dev);
|
||||
struct phy_device *phy_dev = ndev->phydev;
|
||||
u32 val;
|
||||
|
||||
if ((tse_pcs_base) && (sgmii_adapter_base))
|
||||
writew(SGMII_ADAPTER_DISABLE,
|
||||
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
|
||||
writew(SGMII_ADAPTER_DISABLE,
|
||||
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
|
||||
|
||||
if (splitter_base) {
|
||||
val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
|
||||
@ -93,7 +88,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
|
||||
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
|
||||
}
|
||||
|
||||
if (tse_pcs_base && sgmii_adapter_base)
|
||||
writew(SGMII_ADAPTER_ENABLE,
|
||||
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
|
||||
if (phy_dev)
|
||||
tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
|
||||
}
|
||||
|
||||
|
@ -471,7 +471,7 @@ static void sl_tx_timeout(struct net_device *dev)
|
||||
spin_lock(&sl->lock);
|
||||
|
||||
if (netif_queue_stopped(dev)) {
|
||||
if (!netif_running(dev))
|
||||
if (!netif_running(dev) || !sl->tty)
|
||||
goto out;
|
||||
|
||||
/* May be we must check transmitter timeout here ?
|
||||
|
@ -1102,10 +1102,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||
if (start_of_descs != desc_offset)
|
||||
goto err;
|
||||
|
||||
/* self check desc_offset from header*/
|
||||
if (desc_offset >= skb_len)
|
||||
/* self check desc_offset from header and make sure that the
|
||||
* bounds of the metadata array are inside the SKB
|
||||
*/
|
||||
if (pkt_count * 2 + desc_offset >= skb_len)
|
||||
goto err;
|
||||
|
||||
/* Packets must not overlap the metadata array */
|
||||
skb_trim(skb, desc_offset);
|
||||
|
||||
if (pkt_count == 0)
|
||||
goto err;
|
||||
|
||||
|
@ -245,7 +245,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
rcu_read_lock();
|
||||
rcv = rcu_dereference(priv->peer);
|
||||
if (unlikely(!rcv)) {
|
||||
if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
|
||||
kfree_skb(skb);
|
||||
goto drop;
|
||||
}
|
||||
|
@ -836,7 +836,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
|
||||
continue;
|
||||
|
||||
txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
|
||||
fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
|
||||
fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0];
|
||||
if (fi->keyix == keyix)
|
||||
return true;
|
||||
}
|
||||
|
@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
|
||||
BUILD_BUG_ON(sizeof(struct ath_frame_info) >
|
||||
sizeof(tx_info->rate_driver_data));
|
||||
return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
|
||||
sizeof(tx_info->status.status_driver_data));
|
||||
return (struct ath_frame_info *) &tx_info->status.status_driver_data[0];
|
||||
}
|
||||
|
||||
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
|
||||
@ -2498,6 +2498,16 @@ skip_tx_complete:
|
||||
spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
|
||||
}
|
||||
|
||||
static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info)
|
||||
{
|
||||
void *ptr = &tx_info->status;
|
||||
|
||||
memset(ptr + sizeof(tx_info->status.rates), 0,
|
||||
sizeof(tx_info->status) -
|
||||
sizeof(tx_info->status.rates) -
|
||||
sizeof(tx_info->status.status_driver_data));
|
||||
}
|
||||
|
||||
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
|
||||
struct ath_tx_status *ts, int nframes, int nbad,
|
||||
int txok)
|
||||
@ -2509,6 +2519,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
u8 i, tx_rateindex;
|
||||
|
||||
ath_clear_tx_status(tx_info);
|
||||
|
||||
if (txok)
|
||||
tx_info->status.ack_signal = ts->ts_rssi;
|
||||
|
||||
@ -2523,6 +2535,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
|
||||
tx_info->status.ampdu_len = nframes;
|
||||
tx_info->status.ampdu_ack_len = nframes - nbad;
|
||||
|
||||
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
|
||||
|
||||
for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
|
||||
tx_info->status.rates[i].count = 0;
|
||||
tx_info->status.rates[i].idx = -1;
|
||||
}
|
||||
|
||||
if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
|
||||
(tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
|
||||
/*
|
||||
@ -2544,16 +2563,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
|
||||
tx_info->status.rates[tx_rateindex].count =
|
||||
hw->max_rate_tries;
|
||||
}
|
||||
|
||||
for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
|
||||
tx_info->status.rates[i].count = 0;
|
||||
tx_info->status.rates[i].idx = -1;
|
||||
}
|
||||
|
||||
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
|
||||
|
||||
/* we report airtime in ath_tx_count_airtime(), don't report twice */
|
||||
tx_info->status.tx_time = 0;
|
||||
}
|
||||
|
||||
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
|
||||
|
@ -29,7 +29,7 @@
|
||||
#define CNTL_OVER_MASK 0xFFFFFFFE
|
||||
|
||||
#define CNTL_CSV_SHIFT 24
|
||||
#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT)
|
||||
#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
|
||||
|
||||
#define EVENT_CYCLES_ID 0
|
||||
#define EVENT_CYCLES_COUNTER 0
|
||||
|
@ -71,6 +71,35 @@ static const struct regulator_ops wm8994_ldo2_ops = {
|
||||
};
|
||||
|
||||
static const struct regulator_desc wm8994_ldo_desc[] = {
|
||||
{
|
||||
.name = "LDO1",
|
||||
.id = 1,
|
||||
.type = REGULATOR_VOLTAGE,
|
||||
.n_voltages = WM8994_LDO1_MAX_SELECTOR + 1,
|
||||
.vsel_reg = WM8994_LDO_1,
|
||||
.vsel_mask = WM8994_LDO1_VSEL_MASK,
|
||||
.ops = &wm8994_ldo1_ops,
|
||||
.min_uV = 2400000,
|
||||
.uV_step = 100000,
|
||||
.enable_time = 3000,
|
||||
.off_on_delay = 36000,
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
{
|
||||
.name = "LDO2",
|
||||
.id = 2,
|
||||
.type = REGULATOR_VOLTAGE,
|
||||
.n_voltages = WM8994_LDO2_MAX_SELECTOR + 1,
|
||||
.vsel_reg = WM8994_LDO_2,
|
||||
.vsel_mask = WM8994_LDO2_VSEL_MASK,
|
||||
.ops = &wm8994_ldo2_ops,
|
||||
.enable_time = 3000,
|
||||
.off_on_delay = 36000,
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct regulator_desc wm8958_ldo_desc[] = {
|
||||
{
|
||||
.name = "LDO1",
|
||||
.id = 1,
|
||||
@ -172,9 +201,16 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
|
||||
* regulator core and we need not worry about it on the
|
||||
* error path.
|
||||
*/
|
||||
ldo->regulator = devm_regulator_register(&pdev->dev,
|
||||
&wm8994_ldo_desc[id],
|
||||
&config);
|
||||
if (ldo->wm8994->type == WM8994) {
|
||||
ldo->regulator = devm_regulator_register(&pdev->dev,
|
||||
&wm8994_ldo_desc[id],
|
||||
&config);
|
||||
} else {
|
||||
ldo->regulator = devm_regulator_register(&pdev->dev,
|
||||
&wm8958_ldo_desc[id],
|
||||
&config);
|
||||
}
|
||||
|
||||
if (IS_ERR(ldo->regulator)) {
|
||||
ret = PTR_ERR(ldo->regulator);
|
||||
dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
|
||||
|
@ -35,7 +35,7 @@
|
||||
|
||||
#define IBMVSCSIS_VERSION "v0.2"
|
||||
|
||||
#define INITIAL_SRP_LIMIT 800
|
||||
#define INITIAL_SRP_LIMIT 1024
|
||||
#define DEFAULT_MAX_SECTORS 256
|
||||
#define MAX_TXU 1024 * 1024
|
||||
|
||||
|
@ -2551,6 +2551,9 @@ struct megasas_instance_template {
|
||||
#define MEGASAS_IS_LOGICAL(sdev) \
|
||||
((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
|
||||
|
||||
#define MEGASAS_IS_LUN_VALID(sdev) \
|
||||
(((sdev)->lun == 0) ? 1 : 0)
|
||||
|
||||
#define MEGASAS_DEV_INDEX(scp) \
|
||||
(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
|
||||
scp->device->id)
|
||||
|
@ -2102,6 +2102,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
|
||||
goto scan_target;
|
||||
}
|
||||
return -ENXIO;
|
||||
} else if (!MEGASAS_IS_LUN_VALID(sdev)) {
|
||||
sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
scan_target:
|
||||
@ -2132,6 +2135,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev)
|
||||
instance = megasas_lookup_instance(sdev->host->host_no);
|
||||
|
||||
if (MEGASAS_IS_LOGICAL(sdev)) {
|
||||
if (!MEGASAS_IS_LUN_VALID(sdev)) {
|
||||
sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
|
||||
return;
|
||||
}
|
||||
ld_tgt_id = MEGASAS_TARGET_ID(sdev);
|
||||
instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
|
||||
if (megasas_dbg_lvl & LD_PD_DEBUG)
|
||||
|
@ -646,6 +646,7 @@ static struct pci_device_id mvs_pci_table[] = {
|
||||
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
|
||||
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
|
||||
{ PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
|
||||
{ PCI_VDEVICE(TTI, 0x2640), chip_6440 },
|
||||
{ PCI_VDEVICE(TTI, 0x2710), chip_9480 },
|
||||
{ PCI_VDEVICE(TTI, 0x2720), chip_9480 },
|
||||
{ PCI_VDEVICE(TTI, 0x2721), chip_9480 },
|
||||
|
@ -1488,6 +1488,7 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
|
||||
mutex_lock(&udev->cmdr_lock);
|
||||
page = tcmu_get_block_page(udev, dbi);
|
||||
if (likely(page)) {
|
||||
get_page(page);
|
||||
mutex_unlock(&udev->cmdr_lock);
|
||||
return page;
|
||||
}
|
||||
@ -1526,6 +1527,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
|
||||
/* For the vmalloc()ed cmd area pages */
|
||||
addr = (void *)(unsigned long)info->mem[mi].addr + offset;
|
||||
page = vmalloc_to_page(addr);
|
||||
get_page(page);
|
||||
} else {
|
||||
uint32_t dbi;
|
||||
|
||||
@ -1536,7 +1538,6 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
get_page(page);
|
||||
vmf->page = page;
|
||||
return 0;
|
||||
}
|
||||
|
@ -2388,7 +2388,6 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
|
||||
struct btrfs_path *path = NULL;
|
||||
LIST_HEAD(dirty);
|
||||
struct list_head *io = &cur_trans->io_bgs;
|
||||
int num_started = 0;
|
||||
int loops = 0;
|
||||
|
||||
spin_lock(&cur_trans->dirty_bgs_lock);
|
||||
@ -2455,7 +2454,6 @@ again:
|
||||
cache->io_ctl.inode = NULL;
|
||||
ret = btrfs_write_out_cache(trans, cache, path);
|
||||
if (ret == 0 && cache->io_ctl.inode) {
|
||||
num_started++;
|
||||
should_put = 0;
|
||||
|
||||
/*
|
||||
@ -2556,7 +2554,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
|
||||
int should_put;
|
||||
struct btrfs_path *path;
|
||||
struct list_head *io = &cur_trans->io_bgs;
|
||||
int num_started = 0;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
@ -2614,7 +2611,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
|
||||
cache->io_ctl.inode = NULL;
|
||||
ret = btrfs_write_out_cache(trans, cache, path);
|
||||
if (ret == 0 && cache->io_ctl.inode) {
|
||||
num_started++;
|
||||
should_put = 0;
|
||||
list_add_tail(&cache->io_list, io);
|
||||
} else {
|
||||
|
@ -4320,10 +4320,12 @@ static int balance_kthread(void *data)
|
||||
struct btrfs_fs_info *fs_info = data;
|
||||
int ret = 0;
|
||||
|
||||
sb_start_write(fs_info->sb);
|
||||
mutex_lock(&fs_info->balance_mutex);
|
||||
if (fs_info->balance_ctl)
|
||||
ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
|
||||
mutex_unlock(&fs_info->balance_mutex);
|
||||
sb_end_write(fs_info->sb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -97,6 +97,9 @@ parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
|
||||
if (rc != 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
|
||||
return -EINVAL;
|
||||
|
||||
rc = symlink_hash(link_len, link_str, md5_hash);
|
||||
if (rc) {
|
||||
cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
|
||||
|
@ -547,10 +547,14 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
|
||||
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
|
||||
do { \
|
||||
unsigned long _sz = huge_page_size(h); \
|
||||
if (_sz == PMD_SIZE) \
|
||||
tlb_flush_pmd_range(tlb, address, _sz); \
|
||||
else if (_sz == PUD_SIZE) \
|
||||
if (_sz >= P4D_SIZE) \
|
||||
tlb_flush_p4d_range(tlb, address, _sz); \
|
||||
else if (_sz >= PUD_SIZE) \
|
||||
tlb_flush_pud_range(tlb, address, _sz); \
|
||||
else if (_sz >= PMD_SIZE) \
|
||||
tlb_flush_pmd_range(tlb, address, _sz); \
|
||||
else \
|
||||
tlb_flush_pte_range(tlb, address, _sz); \
|
||||
__tlb_remove_tlb_entry(tlb, ptep, address); \
|
||||
} while (0)
|
||||
|
||||
|
@ -236,6 +236,7 @@ typedef struct ax25_dev {
|
||||
#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
|
||||
ax25_dama_info dama;
|
||||
#endif
|
||||
refcount_t refcount;
|
||||
} ax25_dev;
|
||||
|
||||
typedef struct ax25_cb {
|
||||
@ -290,6 +291,17 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void ax25_dev_hold(ax25_dev *ax25_dev)
|
||||
{
|
||||
refcount_inc(&ax25_dev->refcount);
|
||||
}
|
||||
|
||||
static inline void ax25_dev_put(ax25_dev *ax25_dev)
|
||||
{
|
||||
if (refcount_dec_and_test(&ax25_dev->refcount)) {
|
||||
kfree(ax25_dev);
|
||||
}
|
||||
}
|
||||
static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
skb->dev = dev;
|
||||
|
@ -51,6 +51,8 @@ struct flow_dissector_key_vlan {
|
||||
vlan_dei:1,
|
||||
vlan_priority:3;
|
||||
__be16 vlan_tpid;
|
||||
__be16 vlan_eth_type;
|
||||
u16 padding;
|
||||
};
|
||||
|
||||
struct flow_dissector_key_mpls {
|
||||
|
@ -309,7 +309,8 @@ void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
|
||||
|
||||
if (unlikely(is_swiotlb_buffer(phys)))
|
||||
swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
|
||||
swiotlb_tbl_unmap_single(dev, phys, size, size, dir,
|
||||
attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_direct_unmap_page);
|
||||
|
||||
|
@ -269,8 +269,9 @@ static int __irq_build_affinity_masks(unsigned int startvec,
|
||||
*/
|
||||
if (numvecs <= nodes) {
|
||||
for_each_node_mask(n, nodemsk) {
|
||||
cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
|
||||
node_to_cpumask[n]);
|
||||
/* Ensure that only CPUs which are in both masks are set */
|
||||
cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
|
||||
cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk);
|
||||
if (++curvec == last_affv)
|
||||
curvec = firstvec;
|
||||
}
|
||||
|
@ -222,7 +222,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
|
||||
|
||||
/* There shouldn't be any pending callbacks on an offline CPU. */
|
||||
if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
|
||||
!warned && !llist_empty(head))) {
|
||||
!warned && entry != NULL)) {
|
||||
warned = true;
|
||||
WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
|
||||
|
||||
|
@ -131,7 +131,7 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
|
||||
*/
|
||||
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
WARN_ON(tick_nohz_full_running);
|
||||
WARN_ON_ONCE(tick_nohz_full_running);
|
||||
#endif
|
||||
tick_do_timer_cpu = cpu;
|
||||
}
|
||||
|
@ -1123,7 +1123,7 @@ EXPORT_SYMBOL(kmemleak_no_scan);
|
||||
void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
|
||||
gfp_t gfp)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
|
||||
if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
|
||||
kmemleak_alloc(__va(phys), size, min_count, gfp);
|
||||
}
|
||||
EXPORT_SYMBOL(kmemleak_alloc_phys);
|
||||
@ -1137,7 +1137,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
|
||||
*/
|
||||
void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
|
||||
if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
|
||||
kmemleak_free_part(__va(phys), size);
|
||||
}
|
||||
EXPORT_SYMBOL(kmemleak_free_part_phys);
|
||||
@ -1149,7 +1149,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
|
||||
*/
|
||||
void __ref kmemleak_not_leak_phys(phys_addr_t phys)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
|
||||
if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
|
||||
kmemleak_not_leak(__va(phys));
|
||||
}
|
||||
EXPORT_SYMBOL(kmemleak_not_leak_phys);
|
||||
@ -1161,7 +1161,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
|
||||
*/
|
||||
void __ref kmemleak_ignore_phys(phys_addr_t phys)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
|
||||
if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
|
||||
kmemleak_ignore(__va(phys));
|
||||
}
|
||||
EXPORT_SYMBOL(kmemleak_ignore_phys);
|
||||
|
@ -5569,7 +5569,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
|
||||
do {
|
||||
zone_type--;
|
||||
zone = pgdat->node_zones + zone_type;
|
||||
if (managed_zone(zone)) {
|
||||
if (populated_zone(zone)) {
|
||||
zoneref_set_zone(zone, &zonerefs[nr_zones++]);
|
||||
check_highest_zone(zone_type);
|
||||
}
|
||||
|
@ -89,17 +89,21 @@ again:
|
||||
sk = s->sk;
|
||||
if (!sk) {
|
||||
spin_unlock_bh(&ax25_list_lock);
|
||||
s->ax25_dev = NULL;
|
||||
ax25_disconnect(s, ENETUNREACH);
|
||||
s->ax25_dev = NULL;
|
||||
spin_lock_bh(&ax25_list_lock);
|
||||
goto again;
|
||||
}
|
||||
sock_hold(sk);
|
||||
spin_unlock_bh(&ax25_list_lock);
|
||||
lock_sock(sk);
|
||||
s->ax25_dev = NULL;
|
||||
release_sock(sk);
|
||||
ax25_disconnect(s, ENETUNREACH);
|
||||
s->ax25_dev = NULL;
|
||||
if (sk->sk_socket) {
|
||||
dev_put(ax25_dev->dev);
|
||||
ax25_dev_put(ax25_dev);
|
||||
}
|
||||
release_sock(sk);
|
||||
spin_lock_bh(&ax25_list_lock);
|
||||
sock_put(sk);
|
||||
/* The entry could have been deleted from the
|
||||
@ -365,21 +369,25 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
|
||||
if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl)))
|
||||
return -EFAULT;
|
||||
|
||||
if ((ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr)) == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
if (ax25_ctl.digi_count > AX25_MAX_DIGIS)
|
||||
return -EINVAL;
|
||||
|
||||
if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL)
|
||||
return -EINVAL;
|
||||
|
||||
ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr);
|
||||
if (!ax25_dev)
|
||||
return -ENODEV;
|
||||
|
||||
digi.ndigi = ax25_ctl.digi_count;
|
||||
for (k = 0; k < digi.ndigi; k++)
|
||||
digi.calls[k] = ax25_ctl.digi_addr[k];
|
||||
|
||||
if ((ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev)) == NULL)
|
||||
ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev);
|
||||
if (!ax25) {
|
||||
ax25_dev_put(ax25_dev);
|
||||
return -ENOTCONN;
|
||||
}
|
||||
|
||||
switch (ax25_ctl.cmd) {
|
||||
case AX25_KILL:
|
||||
@ -446,6 +454,7 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
|
||||
}
|
||||
|
||||
out_put:
|
||||
ax25_dev_put(ax25_dev);
|
||||
ax25_cb_put(ax25);
|
||||
return ret;
|
||||
|
||||
@ -971,14 +980,16 @@ static int ax25_release(struct socket *sock)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
ax25_cb *ax25;
|
||||
ax25_dev *ax25_dev;
|
||||
|
||||
if (sk == NULL)
|
||||
return 0;
|
||||
|
||||
sock_hold(sk);
|
||||
sock_orphan(sk);
|
||||
lock_sock(sk);
|
||||
sock_orphan(sk);
|
||||
ax25 = sk_to_ax25(sk);
|
||||
ax25_dev = ax25->ax25_dev;
|
||||
|
||||
if (sk->sk_type == SOCK_SEQPACKET) {
|
||||
switch (ax25->state) {
|
||||
@ -1040,6 +1051,15 @@ static int ax25_release(struct socket *sock)
|
||||
sk->sk_state_change(sk);
|
||||
ax25_destroy_socket(ax25);
|
||||
}
|
||||
if (ax25_dev) {
|
||||
del_timer_sync(&ax25->timer);
|
||||
del_timer_sync(&ax25->t1timer);
|
||||
del_timer_sync(&ax25->t2timer);
|
||||
del_timer_sync(&ax25->t3timer);
|
||||
del_timer_sync(&ax25->idletimer);
|
||||
dev_put(ax25_dev->dev);
|
||||
ax25_dev_put(ax25_dev);
|
||||
}
|
||||
|
||||
sock->sk = NULL;
|
||||
release_sock(sk);
|
||||
@ -1116,8 +1136,10 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
||||
}
|
||||
}
|
||||
|
||||
if (ax25_dev != NULL)
|
||||
if (ax25_dev) {
|
||||
ax25_fillin_cb(ax25, ax25_dev);
|
||||
dev_hold(ax25_dev->dev);
|
||||
}
|
||||
|
||||
done:
|
||||
ax25_cb_add(ax25);
|
||||
|
@ -37,6 +37,7 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
|
||||
for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
|
||||
if (ax25cmp(addr, (ax25_address *)ax25_dev->dev->dev_addr) == 0) {
|
||||
res = ax25_dev;
|
||||
ax25_dev_hold(ax25_dev);
|
||||
}
|
||||
spin_unlock_bh(&ax25_dev_lock);
|
||||
|
||||
@ -56,6 +57,7 @@ void ax25_dev_device_up(struct net_device *dev)
|
||||
return;
|
||||
}
|
||||
|
||||
refcount_set(&ax25_dev->refcount, 1);
|
||||
dev->ax25_ptr = ax25_dev;
|
||||
ax25_dev->dev = dev;
|
||||
dev_hold(dev);
|
||||
@ -84,6 +86,7 @@ void ax25_dev_device_up(struct net_device *dev)
|
||||
ax25_dev->next = ax25_dev_list;
|
||||
ax25_dev_list = ax25_dev;
|
||||
spin_unlock_bh(&ax25_dev_lock);
|
||||
ax25_dev_hold(ax25_dev);
|
||||
|
||||
ax25_register_dev_sysctl(ax25_dev);
|
||||
}
|
||||
@ -113,9 +116,10 @@ void ax25_dev_device_down(struct net_device *dev)
|
||||
if ((s = ax25_dev_list) == ax25_dev) {
|
||||
ax25_dev_list = s->next;
|
||||
spin_unlock_bh(&ax25_dev_lock);
|
||||
ax25_dev_put(ax25_dev);
|
||||
dev->ax25_ptr = NULL;
|
||||
dev_put(dev);
|
||||
kfree(ax25_dev);
|
||||
ax25_dev_put(ax25_dev);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -123,9 +127,10 @@ void ax25_dev_device_down(struct net_device *dev)
|
||||
if (s->next == ax25_dev) {
|
||||
s->next = ax25_dev->next;
|
||||
spin_unlock_bh(&ax25_dev_lock);
|
||||
ax25_dev_put(ax25_dev);
|
||||
dev->ax25_ptr = NULL;
|
||||
dev_put(dev);
|
||||
kfree(ax25_dev);
|
||||
ax25_dev_put(ax25_dev);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -133,6 +138,7 @@ void ax25_dev_device_down(struct net_device *dev)
|
||||
}
|
||||
spin_unlock_bh(&ax25_dev_lock);
|
||||
dev->ax25_ptr = NULL;
|
||||
ax25_dev_put(ax25_dev);
|
||||
}
|
||||
|
||||
int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
|
||||
@ -144,20 +150,32 @@ int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
|
||||
|
||||
switch (cmd) {
|
||||
case SIOCAX25ADDFWD:
|
||||
if ((fwd_dev = ax25_addr_ax25dev(&fwd->port_to)) == NULL)
|
||||
fwd_dev = ax25_addr_ax25dev(&fwd->port_to);
|
||||
if (!fwd_dev) {
|
||||
ax25_dev_put(ax25_dev);
|
||||
return -EINVAL;
|
||||
if (ax25_dev->forward != NULL)
|
||||
}
|
||||
if (ax25_dev->forward) {
|
||||
ax25_dev_put(fwd_dev);
|
||||
ax25_dev_put(ax25_dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
ax25_dev->forward = fwd_dev->dev;
|
||||
ax25_dev_put(fwd_dev);
|
||||
ax25_dev_put(ax25_dev);
|
||||
break;
|
||||
|
||||
case SIOCAX25DELFWD:
|
||||
if (ax25_dev->forward == NULL)
|
||||
if (!ax25_dev->forward) {
|
||||
ax25_dev_put(ax25_dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
ax25_dev->forward = NULL;
|
||||
ax25_dev_put(ax25_dev);
|
||||
break;
|
||||
|
||||
default:
|
||||
ax25_dev_put(ax25_dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -75,11 +75,13 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
|
||||
ax25_dev *ax25_dev;
|
||||
int i;
|
||||
|
||||
if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL)
|
||||
return -EINVAL;
|
||||
if (route->digi_count > AX25_MAX_DIGIS)
|
||||
return -EINVAL;
|
||||
|
||||
ax25_dev = ax25_addr_ax25dev(&route->port_addr);
|
||||
if (!ax25_dev)
|
||||
return -EINVAL;
|
||||
|
||||
write_lock_bh(&ax25_route_lock);
|
||||
|
||||
ax25_rt = ax25_route_list;
|
||||
@ -91,6 +93,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
|
||||
if (route->digi_count != 0) {
|
||||
if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
ax25_dev_put(ax25_dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ax25_rt->digipeat->lastrepeat = -1;
|
||||
@ -101,6 +104,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
|
||||
}
|
||||
}
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
ax25_dev_put(ax25_dev);
|
||||
return 0;
|
||||
}
|
||||
ax25_rt = ax25_rt->next;
|
||||
@ -108,6 +112,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
|
||||
|
||||
if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) {
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
ax25_dev_put(ax25_dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -120,6 +125,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
|
||||
if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
kfree(ax25_rt);
|
||||
ax25_dev_put(ax25_dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ax25_rt->digipeat->lastrepeat = -1;
|
||||
@ -132,6 +138,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
|
||||
ax25_rt->next = ax25_route_list;
|
||||
ax25_route_list = ax25_rt;
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
ax25_dev_put(ax25_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -173,6 +180,7 @@ static int ax25_rt_del(struct ax25_routes_struct *route)
|
||||
}
|
||||
}
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
ax25_dev_put(ax25_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -215,6 +223,7 @@ static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option)
|
||||
|
||||
out:
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
ax25_dev_put(ax25_dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -261,12 +261,20 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
|
||||
{
|
||||
ax25_clear_queues(ax25);
|
||||
|
||||
if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
|
||||
ax25_stop_heartbeat(ax25);
|
||||
ax25_stop_t1timer(ax25);
|
||||
ax25_stop_t2timer(ax25);
|
||||
ax25_stop_t3timer(ax25);
|
||||
ax25_stop_idletimer(ax25);
|
||||
if (reason == ENETUNREACH) {
|
||||
del_timer_sync(&ax25->timer);
|
||||
del_timer_sync(&ax25->t1timer);
|
||||
del_timer_sync(&ax25->t2timer);
|
||||
del_timer_sync(&ax25->t3timer);
|
||||
del_timer_sync(&ax25->idletimer);
|
||||
} else {
|
||||
if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
|
||||
ax25_stop_heartbeat(ax25);
|
||||
ax25_stop_t1timer(ax25);
|
||||
ax25_stop_t2timer(ax25);
|
||||
ax25_stop_t3timer(ax25);
|
||||
ax25_stop_idletimer(ax25);
|
||||
}
|
||||
|
||||
ax25->state = AX25_STATE_0;
|
||||
|
||||
|
@ -1149,6 +1149,7 @@ proto_again:
|
||||
VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
|
||||
}
|
||||
key_vlan->vlan_tpid = saved_vlan_tpid;
|
||||
key_vlan->vlan_eth_type = proto;
|
||||
}
|
||||
|
||||
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
||||
|
@ -506,7 +506,7 @@ int ip6_forward(struct sk_buff *skb)
|
||||
goto drop;
|
||||
|
||||
if (!net->ipv6.devconf_all->disable_policy &&
|
||||
!idev->cnf.disable_policy &&
|
||||
(!idev || !idev->cnf.disable_policy) &&
|
||||
!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
|
||||
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
|
||||
goto drop;
|
||||
|
@ -548,6 +548,10 @@ static int nci_close_device(struct nci_dev *ndev)
|
||||
mutex_lock(&ndev->req_lock);
|
||||
|
||||
if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
|
||||
/* Need to flush the cmd wq in case
|
||||
* there is a queued/running cmd_work
|
||||
*/
|
||||
flush_workqueue(ndev->cmd_wq);
|
||||
del_timer_sync(&ndev->cmd_timer);
|
||||
del_timer_sync(&ndev->data_timer);
|
||||
mutex_unlock(&ndev->req_lock);
|
||||
|
@ -1639,10 +1639,10 @@ static int tcf_chain_tp_insert(struct tcf_chain *chain,
|
||||
if (chain->flushing)
|
||||
return -EAGAIN;
|
||||
|
||||
RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
|
||||
if (*chain_info->pprev == chain->filter_chain)
|
||||
tcf_chain0_head_change(chain, tp);
|
||||
tcf_proto_get(tp);
|
||||
RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
|
||||
rcu_assign_pointer(*chain_info->pprev, tp);
|
||||
|
||||
return 0;
|
||||
|
@ -784,6 +784,7 @@ static int fl_set_key_mpls(struct nlattr **tb,
|
||||
static void fl_set_key_vlan(struct nlattr **tb,
|
||||
__be16 ethertype,
|
||||
int vlan_id_key, int vlan_prio_key,
|
||||
int vlan_next_eth_type_key,
|
||||
struct flow_dissector_key_vlan *key_val,
|
||||
struct flow_dissector_key_vlan *key_mask)
|
||||
{
|
||||
@ -802,6 +803,11 @@ static void fl_set_key_vlan(struct nlattr **tb,
|
||||
}
|
||||
key_val->vlan_tpid = ethertype;
|
||||
key_mask->vlan_tpid = cpu_to_be16(~0);
|
||||
if (tb[vlan_next_eth_type_key]) {
|
||||
key_val->vlan_eth_type =
|
||||
nla_get_be16(tb[vlan_next_eth_type_key]);
|
||||
key_mask->vlan_eth_type = cpu_to_be16(~0);
|
||||
}
|
||||
}
|
||||
|
||||
static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
|
||||
@ -1076,8 +1082,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
|
||||
|
||||
if (eth_type_vlan(ethertype)) {
|
||||
fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
|
||||
TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
|
||||
&mask->vlan);
|
||||
TCA_FLOWER_KEY_VLAN_PRIO,
|
||||
TCA_FLOWER_KEY_VLAN_ETH_TYPE,
|
||||
&key->vlan, &mask->vlan);
|
||||
|
||||
if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
|
||||
ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
|
||||
@ -1085,6 +1092,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
|
||||
fl_set_key_vlan(tb, ethertype,
|
||||
TCA_FLOWER_KEY_CVLAN_ID,
|
||||
TCA_FLOWER_KEY_CVLAN_PRIO,
|
||||
TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
|
||||
&key->cvlan, &mask->cvlan);
|
||||
fl_set_key_val(tb, &key->basic.n_proto,
|
||||
TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
|
||||
@ -2272,13 +2280,13 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
|
||||
goto nla_put_failure;
|
||||
|
||||
if (mask->basic.n_proto) {
|
||||
if (mask->cvlan.vlan_tpid) {
|
||||
if (mask->cvlan.vlan_eth_type) {
|
||||
if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
|
||||
key->basic.n_proto))
|
||||
goto nla_put_failure;
|
||||
} else if (mask->vlan.vlan_tpid) {
|
||||
} else if (mask->vlan.vlan_eth_type) {
|
||||
if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
|
||||
key->basic.n_proto))
|
||||
key->vlan.vlan_eth_type))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
}
|
||||
|
@ -427,7 +427,8 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
if (unlikely(!child))
|
||||
return qdisc_drop(skb, sch, to_free);
|
||||
|
||||
if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
|
||||
/* sk_flags are only safe to use on full sockets. */
|
||||
if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
|
||||
if (!is_valid_interval(skb, sch))
|
||||
return qdisc_drop(skb, sch, to_free);
|
||||
} else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
|
||||
|
@ -5687,7 +5687,7 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
|
||||
* Set the daddr and initialize id to something more random and also
|
||||
* copy over any ip options.
|
||||
*/
|
||||
sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
|
||||
sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sock->sk);
|
||||
sp->pf->copy_ip_options(sk, sock->sk);
|
||||
|
||||
/* Populate the fields of the newsk from the oldsk and migrate the
|
||||
|
@ -295,8 +295,9 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
|
||||
list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
|
||||
if (!strncmp(ibdev->ibdev->name, ib_name,
|
||||
sizeof(ibdev->ibdev->name)) ||
|
||||
!strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
|
||||
IB_DEVICE_NAME_MAX - 1)) {
|
||||
(ibdev->ibdev->dev.parent &&
|
||||
!strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
|
||||
IB_DEVICE_NAME_MAX - 1))) {
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -1457,11 +1457,13 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
|
||||
/* this is a nontransmitting bss, we need to add it to
|
||||
* transmitting bss' list if it is not there
|
||||
*/
|
||||
spin_lock_bh(&rdev->bss_lock);
|
||||
if (cfg80211_add_nontrans_list(non_tx_data->tx_bss,
|
||||
&res->pub)) {
|
||||
if (__cfg80211_unlink_bss(rdev, res))
|
||||
rdev->bss_generation++;
|
||||
}
|
||||
spin_unlock_bh(&rdev->bss_lock);
|
||||
}
|
||||
|
||||
trace_cfg80211_return_bss(&res->pub);
|
||||
|
@ -86,25 +86,31 @@ static struct plugin_info latent_entropy_plugin_info = {
|
||||
.help = "disable\tturn off latent entropy instrumentation\n",
|
||||
};
|
||||
|
||||
static unsigned HOST_WIDE_INT seed;
|
||||
/*
|
||||
* get_random_seed() (this is a GCC function) generates the seed.
|
||||
* This is a simple random generator without any cryptographic security because
|
||||
* the entropy doesn't come from here.
|
||||
*/
|
||||
static unsigned HOST_WIDE_INT deterministic_seed;
|
||||
static unsigned HOST_WIDE_INT rnd_buf[32];
|
||||
static size_t rnd_idx = ARRAY_SIZE(rnd_buf);
|
||||
static int urandom_fd = -1;
|
||||
|
||||
static unsigned HOST_WIDE_INT get_random_const(void)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned HOST_WIDE_INT ret = 0;
|
||||
|
||||
for (i = 0; i < 8 * sizeof(ret); i++) {
|
||||
ret = (ret << 1) | (seed & 1);
|
||||
seed >>= 1;
|
||||
if (ret & 1)
|
||||
seed ^= 0xD800000000000000ULL;
|
||||
if (deterministic_seed) {
|
||||
unsigned HOST_WIDE_INT w = deterministic_seed;
|
||||
w ^= w << 13;
|
||||
w ^= w >> 7;
|
||||
w ^= w << 17;
|
||||
deterministic_seed = w;
|
||||
return deterministic_seed;
|
||||
}
|
||||
|
||||
return ret;
|
||||
if (urandom_fd < 0) {
|
||||
urandom_fd = open("/dev/urandom", O_RDONLY);
|
||||
gcc_assert(urandom_fd >= 0);
|
||||
}
|
||||
if (rnd_idx >= ARRAY_SIZE(rnd_buf)) {
|
||||
gcc_assert(read(urandom_fd, rnd_buf, sizeof(rnd_buf)) == sizeof(rnd_buf));
|
||||
rnd_idx = 0;
|
||||
}
|
||||
return rnd_buf[rnd_idx++];
|
||||
}
|
||||
|
||||
static tree tree_get_random_const(tree type)
|
||||
@ -549,8 +555,6 @@ static void latent_entropy_start_unit(void *gcc_data __unused,
|
||||
tree type, id;
|
||||
int quals;
|
||||
|
||||
seed = get_random_seed(false);
|
||||
|
||||
if (in_lto_p)
|
||||
return;
|
||||
|
||||
@ -585,6 +589,12 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
|
||||
const struct plugin_argument * const argv = plugin_info->argv;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Call get_random_seed() with noinit=true, so that this returns
|
||||
* 0 in the case where no seed has been passed via -frandom-seed.
|
||||
*/
|
||||
deterministic_seed = get_random_seed(true);
|
||||
|
||||
static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = {
|
||||
{
|
||||
.base = &latent_entropy_decl,
|
||||
|
@ -423,7 +423,7 @@ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *data, unsigned int
|
||||
return 0;
|
||||
width = pcm_formats[(INT)format].phys; /* physical width */
|
||||
pat = pcm_formats[(INT)format].silence;
|
||||
if (! width)
|
||||
if (!width || !pat)
|
||||
return -EINVAL;
|
||||
/* signed or 1 byte data */
|
||||
if (pcm_formats[(INT)format].signd == 1 || width <= 8) {
|
||||
|
@ -2568,6 +2568,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
||||
SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
||||
SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
||||
SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
||||
SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
||||
SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
||||
SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
||||
|
@ -180,6 +180,9 @@ void shutdown(int exit_val, char *err_cause, int line_no)
|
||||
if (in_shutdown++)
|
||||
return;
|
||||
|
||||
/* Free the cpu_set allocated using CPU_ALLOC in main function */
|
||||
CPU_FREE(cpu_set);
|
||||
|
||||
for (i = 0; i < num_cpus_to_pin; i++)
|
||||
if (cpu_threads[i]) {
|
||||
pthread_kill(cpu_threads[i], SIGUSR1);
|
||||
@ -551,6 +554,12 @@ int main(int argc, char *argv[])
|
||||
perror("sysconf(_SC_NPROCESSORS_ONLN)");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (getuid() != 0)
|
||||
ksft_exit_skip("Not running as root, but almost all tests "
|
||||
"require root in order to modify\nsystem settings. "
|
||||
"Exiting.\n");
|
||||
|
||||
cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN));
|
||||
cpu_set = CPU_ALLOC(cpus_online);
|
||||
if (cpu_set == NULL) {
|
||||
@ -589,7 +598,7 @@ int main(int argc, char *argv[])
|
||||
cpu_set)) {
|
||||
fprintf(stderr, "Any given CPU may "
|
||||
"only be given once.\n");
|
||||
exit(1);
|
||||
goto err_code;
|
||||
} else
|
||||
CPU_SET_S(cpus_to_pin[cpu],
|
||||
cpu_set_size, cpu_set);
|
||||
@ -607,7 +616,7 @@ int main(int argc, char *argv[])
|
||||
queue_path = malloc(strlen(option) + 2);
|
||||
if (!queue_path) {
|
||||
perror("malloc()");
|
||||
exit(1);
|
||||
goto err_code;
|
||||
}
|
||||
queue_path[0] = '/';
|
||||
queue_path[1] = 0;
|
||||
@ -622,17 +631,12 @@ int main(int argc, char *argv[])
|
||||
fprintf(stderr, "Must pass at least one CPU to continuous "
|
||||
"mode.\n");
|
||||
poptPrintUsage(popt_context, stderr, 0);
|
||||
exit(1);
|
||||
goto err_code;
|
||||
} else if (!continuous_mode) {
|
||||
num_cpus_to_pin = 1;
|
||||
cpus_to_pin[0] = cpus_online - 1;
|
||||
}
|
||||
|
||||
if (getuid() != 0)
|
||||
ksft_exit_skip("Not running as root, but almost all tests "
|
||||
"require root in order to modify\nsystem settings. "
|
||||
"Exiting.\n");
|
||||
|
||||
max_msgs = fopen(MAX_MSGS, "r+");
|
||||
max_msgsize = fopen(MAX_MSGSIZE, "r+");
|
||||
if (!max_msgs)
|
||||
@ -740,4 +744,9 @@ int main(int argc, char *argv[])
|
||||
sleep(1);
|
||||
}
|
||||
shutdown(0, "", 0);
|
||||
|
||||
err_code:
|
||||
CPU_FREE(cpu_set);
|
||||
exit(1);
|
||||
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user