Merge android-4.19-q.95 (138a4a6) into msm-4.19

* refs/heads/tmp-138a4a6:
  Linux 4.19.95
  usb: missing parentheses in USE_NEW_SCHEME
  USB: serial: option: add Telit ME910G1 0x110a composition
  USB: core: fix check for duplicate endpoints
  usb: dwc3: gadget: Fix request complete check
  net: sch_prio: When ungrafting, replace with FIFO
  mlxsw: spectrum_qdisc: Ignore grafting of invisible FIFO
  vlan: vlan_changelink() should propagate errors
  vlan: fix memory leak in vlan_dev_set_egress_priority
  vxlan: fix tos value before xmit
  tcp: fix "old stuff" D-SACK causing SACK to be treated as D-SACK
  sctp: free cmd->obj.chunk for the unprocessed SCTP_CMD_REPLY
  sch_cake: avoid possible divide by zero in cake_enqueue()
  pkt_sched: fq: do not accept silly TCA_FQ_QUANTUM
  net: usb: lan78xx: fix possible skb leak
  net: stmmac: dwmac-sunxi: Allow all RGMII modes
  net: stmmac: dwmac-sun8i: Allow all RGMII modes
  net: dsa: mv88e6xxx: Preserve priority when setting CPU port.
  macvlan: do not assume mac_header is set in macvlan_broadcast()
  gtp: fix bad unlock balance in gtp_encap_enable_socket
  PCI/switchtec: Read all 64 bits of part_event_bitmap
  ARM: dts: imx6ul: use nvmem-cells for cpu speed grading
  cpufreq: imx6q: read OCOTP through nvmem for imx6ul/imx6ull
  powerpc/spinlocks: Include correct header for static key
  powerpc/vcpu: Assume dedicated processors as non-preempt
  hv_netvsc: Fix unwanted rx_table reset
  llc2: Fix return statement of llc_stat_ev_rx_null_dsap_xid_c (and _test_c)
  parisc: Fix compiler warnings in debug_core.c
  block: fix memleak when __blk_rq_map_user_iov() is failed
  s390/dasd: fix memleak in path handling error case
  s390/dasd/cio: Interpret ccw_device_get_mdc return value correctly
  drm/exynos: gsc: add missed component_del
  s390/purgatory: do not build purgatory with kcov, kasan and friends
  net: stmmac: Always arm TX Timer at end of transmission start
  net: stmmac: RX buffer size must be 16 byte aligned
  net: stmmac: xgmac: Clear previous RX buffer size
  net: stmmac: Do not accept invalid MTU values
  fs: avoid softlockups in s_inodes iterators
  perf/x86/intel: Fix PT PMI handling
  kconfig: don't crash on NULL expressions in expr_eq()
  iommu/iova: Init the struct iova to fix the possible memleak
  regulator: rn5t618: fix module aliases
  ASoC: wm8962: fix lambda value
  rfkill: Fix incorrect check to avoid NULL pointer dereference
  parisc: add missing __init annotation
  net: usb: lan78xx: Fix error message format specifier
  cxgb4: Fix kernel panic while accessing sge_info
  bnx2x: Fix logic to get total no. of PFs per engine
  bnx2x: Do not handle requests from VFs after parity
  bpf: Clear skb->tstamp in bpf_redirect when necessary
  btrfs: Fix error messages in qgroup_rescan_init
  powerpc: Ensure that swiotlb buffer is allocated from low memory
  samples: bpf: fix syscall_tp due to unused syscall
  samples: bpf: Replace symbol compare of trace_event
  ARM: dts: am437x-gp/epos-evm: fix panel compatible
  spi: spi-ti-qspi: Fix a bug when accessing non default CS
  bpf, mips: Limit to 33 tail calls
  bnxt_en: Return error if FW returns more data than dump length
  ARM: dts: bcm283x: Fix critical trip point
  ASoC: topology: Check return value for soc_tplg_pcm_create()
  spi: spi-cavium-thunderx: Add missing pci_release_regions()
  ARM: dts: Cygnus: Fix MDIO node address/size cells
  selftests/ftrace: Fix multiple kprobe testcase
  ARM: dts: BCM5301X: Fix MDIO node address/size cells
  netfilter: nf_tables: validate NFT_DATA_VALUE after nft_data_init()
  netfilter: nf_tables: validate NFT_SET_ELEM_INTERVAL_END
  netfilter: nft_set_rbtree: bogus lookup/get on consecutive elements in named sets
  netfilter: uapi: Avoid undefined left-shift in xt_sctp.h
  ARM: vexpress: Set-up shared OPP table instead of individual for each CPU
  ARM: dts: imx6ul: imx6ul-14x14-evk.dtsi: Fix SPI NOR probing
  efi/gop: Fix memory leak in __gop_query32/64()
  efi/gop: Return EFI_SUCCESS if a usable GOP was found
  efi/gop: Return EFI_NOT_FOUND if there are no usable GOPs
  ASoC: Intel: bytcr_rt5640: Update quirk for Teclast X89
  x86/efi: Update e820 with reserved EFI boot services data to fix kexec breakage
  libtraceevent: Fix lib installation with O=
  mwifiex: Fix heap overflow in mmwifiex_process_tdls_action_frame()
  netfilter: ctnetlink: netns exit must wait for callbacks
  locking/spinlock/debug: Fix various data races
  ASoC: max98090: fix possible race conditions
  regulator: fix use after free issue
  bpf: Fix passing modified ctx to ld/abs/ind instruction
  USB: dummy-hcd: increase max number of devices to 32
  USB: dummy-hcd: use usb_urb_dir_in instead of usb_pipein

Conflicts:
	kernel/locking/spinlock_debug.c

1683149 ("usb: dwc3: gadget: Fix logical condition") was reverted
on importing android-4.19-q.90 into msm-4.19 as this change
prevented the adbd userspace daemon from ever receiving packets
from the kernel USB driver. android-4.19-q.95 introduced new
change ceaeb21b ("usb: dwc3: gadget: Fix request complete check")
that fixed the regression, hence it is safe to restore the revert.
It is done in this merge.

Change-Id: I5f839319ad7746728a98c01f8b5c3dab9c9f368a
Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
Ivaylo Georgiev 2020-02-03 21:43:26 -08:00
commit 9459964334
95 changed files with 608 additions and 272 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 94
SUBLEVEL = 95
EXTRAVERSION =
NAME = "People's Front"

View File

@ -83,7 +83,7 @@
};
lcd0: display {
compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
label = "lcd";
backlight = <&lcd_bl>;

View File

@ -45,7 +45,7 @@
};
lcd0: display {
compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
label = "lcd";
backlight = <&lcd_bl>;

View File

@ -169,8 +169,8 @@
mdio: mdio@18002000 {
compatible = "brcm,iproc-mdio";
reg = <0x18002000 0x8>;
#size-cells = <1>;
#address-cells = <0>;
#size-cells = <0>;
#address-cells = <1>;
status = "disabled";
gphy0: ethernet-phy@0 {

View File

@ -39,7 +39,7 @@
trips {
cpu-crit {
temperature = <80000>;
temperature = <90000>;
hysteresis = <0>;
type = "critical";
};

View File

@ -350,8 +350,8 @@
mdio: mdio@18003000 {
compatible = "brcm,iproc-mdio";
reg = <0x18003000 0x8>;
#size-cells = <1>;
#address-cells = <0>;
#size-cells = <0>;
#address-cells = <1>;
};
mdio-bus-mux {

View File

@ -175,7 +175,7 @@
flash0: n25q256a@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "micron,n25q256a";
compatible = "micron,n25q256a", "jedec,spi-nor";
spi-max-frequency = <29000000>;
reg = <0>;
};

View File

@ -87,6 +87,8 @@
"pll1_sys";
arm-supply = <&reg_arm>;
soc-supply = <&reg_soc>;
nvmem-cells = <&cpu_speed_grade>;
nvmem-cell-names = "speed_grade";
};
};
@ -930,6 +932,10 @@
tempmon_temp_grade: temp-grade@20 {
reg = <0x20 4>;
};
cpu_speed_grade: speed-grade@10 {
reg = <0x10 4>;
};
};
lcdif: lcdif@21c8000 {

View File

@ -551,8 +551,9 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
static int __init ve_spc_clk_init(void)
{
int cpu;
int cpu, cluster;
struct clk *clk;
bool init_opp_table[MAX_CLUSTERS] = { false };
if (!info)
return 0; /* Continue only if SPC is initialised */
@ -578,8 +579,17 @@ static int __init ve_spc_clk_init(void)
continue;
}
cluster = topology_physical_package_id(cpu_dev->id);
if (init_opp_table[cluster])
continue;
if (ve_init_opp_table(cpu_dev))
pr_warn("failed to initialise cpu%d opp table\n", cpu);
else if (dev_pm_opp_set_sharing_cpus(cpu_dev,
topology_core_cpumask(cpu_dev->id)))
pr_warn("failed to mark OPPs shared for cpu%d\n", cpu);
else
init_opp_table[cluster] = true;
}
platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);

View File

@ -586,6 +586,7 @@ static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
{
int off, b_off;
int tcc_reg;
ctx->flags |= EBPF_SEEN_TC;
/*
@ -598,14 +599,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
b_off = b_imm(this_idx + 1, ctx);
emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
/*
* if (--TCC < 0)
* if (TCC-- < 0)
* goto out;
*/
/* Delay slot */
emit_instr(ctx, daddiu, MIPS_R_T5,
(ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4;
emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1);
b_off = b_imm(this_idx + 1, ctx);
emit_instr(ctx, bltz, MIPS_R_T5, b_off);
emit_instr(ctx, bltz, tcc_reg, b_off);
/*
* prog = array->ptrs[index];
* if (prog == NULL)

View File

@ -44,8 +44,14 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size)
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
#define xchg(ptr, x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
#define xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
__ret = (__typeof__(*(ptr))) \
__xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \
__ret; \
})
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);

View File

@ -789,7 +789,7 @@ EXPORT_SYMBOL(device_to_hwpath);
static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
struct device *parent);
static void walk_lower_bus(struct parisc_device *dev)
static void __init walk_lower_bus(struct parisc_device *dev)
{
unsigned long io_io_low, io_io_high;

View File

@ -19,6 +19,7 @@
*
* (the type definitions are in asm/spinlock_types.h)
*/
#include <linux/jump_label.h>
#include <linux/irqflags.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
@ -53,10 +54,12 @@
#endif
#ifdef CONFIG_PPC_PSERIES
DECLARE_STATIC_KEY_FALSE(shared_processor);
#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
if (!static_branch_unlikely(&shared_processor))
return false;
return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
}

View File

@ -344,6 +344,14 @@ void __init mem_init(void)
BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
#ifdef CONFIG_SWIOTLB
/*
* Some platforms (e.g. 85xx) limit DMA-able memory way below
* 4G. We force memblock to bottom-up mode to ensure that the
* memory allocated in swiotlb_init() is DMA-able.
* As it's the last memblock allocation, no need to reset it
* back to to-down.
*/
memblock_set_bottom_up(true);
swiotlb_init(0);
#endif

View File

@ -75,6 +75,9 @@
#include "pseries.h"
#include "../../../../drivers/pci/pci.h"
DEFINE_STATIC_KEY_FALSE(shared_processor);
EXPORT_SYMBOL_GPL(shared_processor);
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
@ -761,6 +764,10 @@ static void __init pSeries_setup_arch(void)
if (firmware_has_feature(FW_FEATURE_LPAR)) {
vpa_init(boot_cpuid);
if (lppaca_shared_proc(get_lppaca()))
static_branch_enable(&shared_processor);
ppc_md.power_save = pseries_lpar_idle;
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
#ifdef CONFIG_PCI_IOV

View File

@ -13,8 +13,10 @@ $(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
$(call if_changed_rule,as_o_S)
$(obj)/string.o: $(srctree)/arch/s390/lib/string.c FORCE
$(call if_changed_rule,cc_o_c)
KCOV_INSTRUMENT := n
GCOV_PROFILE := n
UBSAN_SANITIZE := n
KASAN_SANITIZE := n
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib
LDFLAGS_purgatory.ro += -z nodefaultlib

View File

@ -0,0 +1,3 @@
// SPDX-License-Identifier: GPL-2.0
#define __HAVE_ARCH_MEMCMP /* arch function */
#include "../lib/string.c"

View File

@ -375,7 +375,7 @@ int x86_add_exclusive(unsigned int what)
* LBR and BTS are still mutually exclusive.
*/
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return 0;
goto out;
if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
mutex_lock(&pmc_reserve_mutex);
@ -387,6 +387,7 @@ int x86_add_exclusive(unsigned int what)
mutex_unlock(&pmc_reserve_mutex);
}
out:
atomic_inc(&active_events);
return 0;
@ -397,11 +398,15 @@ fail_unlock:
void x86_del_exclusive(unsigned int what)
{
atomic_dec(&active_events);
/*
* See the comment in x86_add_exclusive().
*/
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return;
atomic_dec(&x86_pmu.lbr_exclusive[what]);
atomic_dec(&active_events);
}
int x86_setup_perfctr(struct perf_event *event)

View File

@ -259,10 +259,6 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
return;
}
/* No need to reserve regions that will never be freed. */
if (md.attribute & EFI_MEMORY_RUNTIME)
return;
size += addr % EFI_PAGE_SIZE;
size = round_up(size, EFI_PAGE_SIZE);
addr = round_down(addr, EFI_PAGE_SIZE);
@ -292,6 +288,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
early_memunmap(new, new_size);
efi_memmap_install(new_phys, num_entries);
e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
e820__update_table(e820_table);
}
/*

View File

@ -145,7 +145,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return 0;
unmap_rq:
__blk_rq_unmap_user(bio);
blk_rq_unmap_user(bio);
fail:
rq->bio = NULL;
return ret;

View File

@ -12,6 +12,7 @@
#include <linux/cpu_cooling.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pm_opp.h>
@ -295,20 +296,32 @@ put_node:
#define OCOTP_CFG3_6ULL_SPEED_792MHZ 0x2
#define OCOTP_CFG3_6ULL_SPEED_900MHZ 0x3
static void imx6ul_opp_check_speed_grading(struct device *dev)
static int imx6ul_opp_check_speed_grading(struct device *dev)
{
struct device_node *np;
void __iomem *base;
u32 val;
int ret = 0;
np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
if (!np)
return;
if (of_find_property(dev->of_node, "nvmem-cells", NULL)) {
ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
if (ret)
return ret;
} else {
struct device_node *np;
void __iomem *base;
base = of_iomap(np, 0);
if (!base) {
dev_err(dev, "failed to map ocotp\n");
goto put_node;
np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
if (!np)
return -ENOENT;
base = of_iomap(np, 0);
of_node_put(np);
if (!base) {
dev_err(dev, "failed to map ocotp\n");
return -EFAULT;
}
val = readl_relaxed(base + OCOTP_CFG3);
iounmap(base);
}
/*
@ -319,7 +332,6 @@ static void imx6ul_opp_check_speed_grading(struct device *dev)
* 2b'11: 900000000Hz on i.MX6ULL only;
* We need to set the max speed of ARM according to fuse map.
*/
val = readl_relaxed(base + OCOTP_CFG3);
val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3;
@ -339,9 +351,7 @@ static void imx6ul_opp_check_speed_grading(struct device *dev)
dev_warn(dev, "failed to disable 900MHz OPP\n");
}
iounmap(base);
put_node:
of_node_put(np);
return ret;
}
static int imx6q_cpufreq_probe(struct platform_device *pdev)
@ -399,10 +409,18 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
}
if (of_machine_is_compatible("fsl,imx6ul") ||
of_machine_is_compatible("fsl,imx6ull"))
imx6ul_opp_check_speed_grading(cpu_dev);
else
of_machine_is_compatible("fsl,imx6ull")) {
ret = imx6ul_opp_check_speed_grading(cpu_dev);
if (ret == -EPROBE_DEFER)
return ret;
if (ret) {
dev_err(cpu_dev, "failed to read ocotp: %d\n",
ret);
return ret;
}
} else {
imx6q_opp_check_speed_grading(cpu_dev);
}
/* Because we have added the OPPs here, we must free them */
free_opp = true;

View File

@ -85,30 +85,6 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
}
}
static efi_status_t
__gop_query32(efi_system_table_t *sys_table_arg,
struct efi_graphics_output_protocol_32 *gop32,
struct efi_graphics_output_mode_info **info,
unsigned long *size, u64 *fb_base)
{
struct efi_graphics_output_protocol_mode_32 *mode;
efi_graphics_output_protocol_query_mode query_mode;
efi_status_t status;
unsigned long m;
m = gop32->mode;
mode = (struct efi_graphics_output_protocol_mode_32 *)m;
query_mode = (void *)(unsigned long)gop32->query_mode;
status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size,
info);
if (status != EFI_SUCCESS)
return status;
*fb_base = mode->frame_buffer_base;
return status;
}
static efi_status_t
setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
efi_guid_t *proto, unsigned long size, void **gop_handle)
@ -121,7 +97,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
u64 fb_base;
struct efi_pixel_bitmask pixel_info;
int pixel_format;
efi_status_t status = EFI_NOT_FOUND;
efi_status_t status;
u32 *handles = (u32 *)(unsigned long)gop_handle;
int i;
@ -130,6 +106,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
nr_gops = size / sizeof(u32);
for (i = 0; i < nr_gops; i++) {
struct efi_graphics_output_protocol_mode_32 *mode;
struct efi_graphics_output_mode_info *info = NULL;
efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
bool conout_found = false;
@ -147,9 +124,11 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
if (status == EFI_SUCCESS)
conout_found = true;
status = __gop_query32(sys_table_arg, gop32, &info, &size,
&current_fb_base);
if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
mode = (void *)(unsigned long)gop32->mode;
info = (void *)(unsigned long)mode->info;
current_fb_base = mode->frame_buffer_base;
if ((!first_gop || conout_found) &&
info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
@ -177,7 +156,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
/* Did we find any GOPs? */
if (!first_gop)
goto out;
return EFI_NOT_FOUND;
/* EFI framebuffer */
si->orig_video_isVGA = VIDEO_TYPE_EFI;
@ -199,32 +178,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
si->lfb_size = si->lfb_linelength * si->lfb_height;
si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
out:
return status;
}
static efi_status_t
__gop_query64(efi_system_table_t *sys_table_arg,
struct efi_graphics_output_protocol_64 *gop64,
struct efi_graphics_output_mode_info **info,
unsigned long *size, u64 *fb_base)
{
struct efi_graphics_output_protocol_mode_64 *mode;
efi_graphics_output_protocol_query_mode query_mode;
efi_status_t status;
unsigned long m;
m = gop64->mode;
mode = (struct efi_graphics_output_protocol_mode_64 *)m;
query_mode = (void *)(unsigned long)gop64->query_mode;
status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size,
info);
if (status != EFI_SUCCESS)
return status;
*fb_base = mode->frame_buffer_base;
return status;
return EFI_SUCCESS;
}
static efi_status_t
@ -239,7 +194,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
u64 fb_base;
struct efi_pixel_bitmask pixel_info;
int pixel_format;
efi_status_t status = EFI_NOT_FOUND;
efi_status_t status;
u64 *handles = (u64 *)(unsigned long)gop_handle;
int i;
@ -248,6 +203,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
nr_gops = size / sizeof(u64);
for (i = 0; i < nr_gops; i++) {
struct efi_graphics_output_protocol_mode_64 *mode;
struct efi_graphics_output_mode_info *info = NULL;
efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
bool conout_found = false;
@ -265,9 +221,11 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
if (status == EFI_SUCCESS)
conout_found = true;
status = __gop_query64(sys_table_arg, gop64, &info, &size,
&current_fb_base);
if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
mode = (void *)(unsigned long)gop64->mode;
info = (void *)(unsigned long)mode->info;
current_fb_base = mode->frame_buffer_base;
if ((!first_gop || conout_found) &&
info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
@ -295,7 +253,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
/* Did we find any GOPs? */
if (!first_gop)
goto out;
return EFI_NOT_FOUND;
/* EFI framebuffer */
si->orig_video_isVGA = VIDEO_TYPE_EFI;
@ -317,8 +275,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
si->lfb_size = si->lfb_linelength * si->lfb_height;
si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
out:
return status;
return EFI_SUCCESS;
}
/*

View File

@ -1292,6 +1292,7 @@ static int gsc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
component_del(dev, &gsc_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);

View File

@ -318,7 +318,7 @@ static DEFINE_MUTEX(iova_cache_mutex);
struct iova *alloc_iova_mem(void)
{
return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
return kmem_cache_zalloc(iova_cache, GFP_ATOMIC);
}
EXPORT_SYMBOL(alloc_iova_mem);

View File

@ -371,6 +371,11 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
{
u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST;
/* Use the default high priority for management frames sent to
* the CPU.
*/
port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
return mv88e6390_g1_monitor_write(chip, ptr, port);
}

View File

@ -197,6 +197,7 @@
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0
#define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
/* Offset 0x1C: Global Control 2 */

View File

@ -1113,7 +1113,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
u32 func_config =
MF_CFG_RD(bp,
func_mf_config[BP_PORT(bp) + 2 * i].
func_mf_config[BP_PATH(bp) + 2 * i].
config);
func_num +=
((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);

View File

@ -9995,10 +9995,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp)
*/
static void bnx2x_parity_recover(struct bnx2x *bp)
{
bool global = false;
u32 error_recovered, error_unrecovered;
bool is_parity;
bool is_parity, global = false;
#ifdef CONFIG_BNX2X_SRIOV
int vf_idx;
for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
if (vf)
vf->state = VF_LOST;
}
#endif
DP(NETIF_MSG_HW, "Handling parity\n");
while (1) {
switch (bp->recovery_state) {

View File

@ -139,6 +139,7 @@ struct bnx2x_virtf {
#define VF_ACQUIRED 1 /* VF acquired, but not initialized */
#define VF_ENABLED 2 /* VF Enabled */
#define VF_RESET 3 /* VF FLR'd, pending cleanup */
#define VF_LOST 4 /* Recovery while VFs are loaded */
bool flr_clnup_stage; /* true during flr cleanup */
bool malicious; /* true if FW indicated so, until FLR */

View File

@ -2114,6 +2114,18 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
{
int i;
if (vf->state == VF_LOST) {
/* Just ack the FW and return if VFs are lost
* in case of parity error. VFs are supposed to be timedout
* on waiting for PF response.
*/
DP(BNX2X_MSG_IOV,
"VF 0x%x lost, not handling the request\n", vf->abs_vfid);
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
return;
}
/* check if tlv type is known */
if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
/* Lock the per vf op mutex and note the locker's identity.

View File

@ -2778,8 +2778,15 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
}
}
if (info->dest_buf)
memcpy(info->dest_buf + off, dma_buf, len);
if (info->dest_buf) {
if ((info->seg_start + off + len) <=
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
memcpy(info->dest_buf + off, dma_buf, len);
} else {
rc = -ENOBUFS;
break;
}
}
if (cmn_req->req_type ==
cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
@ -2833,7 +2840,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
u16 segment_id, u32 *seg_len,
void *buf, u32 offset)
void *buf, u32 buf_len, u32 offset)
{
struct hwrm_dbg_coredump_retrieve_input req = {0};
struct bnxt_hwrm_dbg_dma_info info = {NULL};
@ -2848,8 +2855,11 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
seq_no);
info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
data_len);
if (buf)
if (buf) {
info.dest_buf = buf + offset;
info.buf_len = buf_len;
info.seg_start = offset;
}
rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
if (!rc)
@ -2939,14 +2949,17 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
{
u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
struct coredump_segment_record *seg_record = NULL;
u32 offset = 0, seg_hdr_len, seg_record_len;
struct bnxt_coredump_segment_hdr seg_hdr;
struct bnxt_coredump coredump = {NULL};
time64_t start_time;
u16 start_utc;
int rc = 0, i;
if (buf)
buf_len = *dump_len;
start_time = ktime_get_real_seconds();
start_utc = sys_tz.tz_minuteswest * 60;
seg_hdr_len = sizeof(seg_hdr);
@ -2979,6 +2992,12 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
u32 duration = 0, seg_len = 0;
unsigned long start, end;
if (buf && ((offset + seg_hdr_len) >
BNXT_COREDUMP_BUF_LEN(buf_len))) {
rc = -ENOBUFS;
goto err;
}
start = jiffies;
rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
@ -2991,9 +3010,11 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
/* Write segment data into the buffer */
rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
&seg_len, buf,
&seg_len, buf, buf_len,
offset + seg_hdr_len);
if (rc)
if (rc && rc == -ENOBUFS)
goto err;
else if (rc)
netdev_err(bp->dev,
"Failed to retrieve coredump for seg = %d\n",
seg_record->segment_id);
@ -3023,7 +3044,8 @@ err:
rc);
kfree(coredump.data);
*dump_len += sizeof(struct bnxt_coredump_record);
if (rc == -ENOBUFS)
netdev_err(bp->dev, "Firmware returned large coredump buffer");
return rc;
}

View File

@ -31,6 +31,8 @@ struct bnxt_coredump {
u16 total_segs;
};
#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
struct bnxt_hwrm_dbg_dma_info {
void *dest_buf;
int dest_buf_size;
@ -38,6 +40,8 @@ struct bnxt_hwrm_dbg_dma_info {
u16 seq_off;
u16 data_len_off;
u16 segs;
u32 seg_start;
u32 buf_len;
};
struct hwrm_dbg_cmn_input {

View File

@ -3035,6 +3035,9 @@ static int sge_queue_entries(const struct adapter *adap)
int tot_uld_entries = 0;
int i;
if (!is_uld(adap))
goto lld_only;
mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_TX_MAX; i++)
tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i);
@ -3045,6 +3048,7 @@ static int sge_queue_entries(const struct adapter *adap)
}
mutex_unlock(&uld_mutex);
lld_only:
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;

View File

@ -650,6 +650,13 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
return 0;
if (!p->child_handle) {
/* This is an invisible FIFO replacing the original Qdisc.
* Ignore it--the original Qdisc's destroy will follow.
*/
return 0;
}
/* See if the grafted qdisc is already offloaded on any tclass. If so,
* unoffload it.
*/

View File

@ -946,6 +946,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
/* default */
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
break;
case PHY_INTERFACE_MODE_RMII:

View File

@ -53,7 +53,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
* rate, which then uses the auto-reparenting feature of the
* clock driver, and enabling/disabling the clock.
*/
if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
if (phy_interface_mode_is_rgmii(gmac->interface)) {
clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
clk_prepare_enable(gmac->tx_clk);
gmac->clk_enabled = 1;

View File

@ -169,6 +169,8 @@
#define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x)))
#define XGMAC_RxPBL GENMASK(21, 16)
#define XGMAC_RxPBL_SHIFT 16
#define XGMAC_RBSZ GENMASK(14, 1)
#define XGMAC_RBSZ_SHIFT 1
#define XGMAC_RXST BIT(0)
#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x)))
#define XGMAC_DMA_CH_RxDESC_LADDR(x) (0x0000311c + (0x80 * (x)))

View File

@ -379,7 +379,8 @@ static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
u32 value;
value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
value |= bfsize << 1;
value &= ~XGMAC_RBSZ;
value |= bfsize << XGMAC_RBSZ_SHIFT;
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
}

View File

@ -54,7 +54,7 @@
#include "dwxgmac2.h"
#include "hwif.h"
#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
/* Module parameters */
@ -2997,6 +2997,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue);
return NETDEV_TX_OK;
@ -3210,6 +3211,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue);
return NETDEV_TX_OK;
@ -3604,12 +3606,24 @@ static void stmmac_set_rx_mode(struct net_device *dev)
static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct stmmac_priv *priv = netdev_priv(dev);
int txfifosz = priv->plat->tx_fifo_size;
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
txfifosz /= priv->plat->tx_queues_to_use;
if (netif_running(dev)) {
netdev_err(priv->dev, "must be stopped to change its MTU\n");
return -EBUSY;
}
new_mtu = STMMAC_ALIGN(new_mtu);
/* If condition true, FIFO is too small or MTU too large */
if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
return -EINVAL;
dev->mtu = new_mtu;
netdev_update_features(dev);

View File

@ -818,7 +818,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
lock_sock(sock->sk);
if (sock->sk->sk_user_data) {
sk = ERR_PTR(-EBUSY);
goto out_sock;
goto out_rel_sock;
}
sk = sock->sk;
@ -831,8 +831,9 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
out_sock:
out_rel_sock:
release_sock(sock->sk);
out_sock:
sockfd_put(sock);
return sk;
}

View File

@ -181,7 +181,6 @@ struct rndis_device {
u8 hw_mac_adr[ETH_ALEN];
u8 rss_key[NETVSC_HASH_KEYLEN];
u16 rx_table[ITAB_NUM];
};
@ -933,6 +932,8 @@ struct net_device_context {
u32 tx_table[VRSS_SEND_TAB_SIZE];
u16 rx_table[ITAB_NUM];
/* Ethtool settings */
u8 duplex;
u32 speed;

View File

@ -1688,7 +1688,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
rndis_dev = ndev->extension;
if (indir) {
for (i = 0; i < ITAB_NUM; i++)
indir[i] = rndis_dev->rx_table[i];
indir[i] = ndc->rx_table[i];
}
if (key)
@ -1718,7 +1718,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
return -EINVAL;
for (i = 0; i < ITAB_NUM; i++)
rndis_dev->rx_table[i] = indir[i];
ndc->rx_table[i] = indir[i];
}
if (!key) {

View File

@ -719,6 +719,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
const u8 *rss_key, u16 flag)
{
struct net_device *ndev = rdev->ndev;
struct net_device_context *ndc = netdev_priv(ndev);
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_set_complete *set_complete;
@ -758,7 +759,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
/* Set indirection table entries */
itab = (u32 *)(rssp + 1);
for (i = 0; i < ITAB_NUM; i++)
itab[i] = rdev->rx_table[i];
itab[i] = ndc->rx_table[i];
/* Set hask key values */
keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
@ -1244,6 +1245,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
struct netvsc_device_info *device_info)
{
struct net_device *net = hv_get_drvdata(dev);
struct net_device_context *ndc = netdev_priv(net);
struct netvsc_device *net_device;
struct rndis_device *rndis_device;
struct ndis_recv_scale_cap rsscap;
@ -1330,9 +1332,11 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
/* We will use the given number of channels if available. */
net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
for (i = 0; i < ITAB_NUM; i++)
rndis_device->rx_table[i] = ethtool_rxfh_indir_default(
if (!netif_is_rxfh_configured(net)) {
for (i = 0; i < ITAB_NUM; i++)
ndc->rx_table[i] = ethtool_rxfh_indir_default(
i, net_device->num_chn);
}
atomic_set(&net_device->open_chn, 1);
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);

View File

@ -263,7 +263,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
struct net_device *src,
enum macvlan_mode mode)
{
const struct ethhdr *eth = eth_hdr(skb);
const struct ethhdr *eth = skb_eth_hdr(skb);
const struct macvlan_dev *vlan;
struct sk_buff *nskb;
unsigned int i;

View File

@ -510,7 +510,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev,
}
} else {
netdev_warn(dev->net,
"Failed to read stat ret = 0x%x", ret);
"Failed to read stat ret = %d", ret);
}
kfree(stats);
@ -2721,11 +2721,6 @@ static int lan78xx_stop(struct net_device *net)
return 0;
}
static int lan78xx_linearize(struct sk_buff *skb)
{
return skb_linearize(skb);
}
static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
struct sk_buff *skb, gfp_t flags)
{
@ -2736,8 +2731,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
return NULL;
}
if (lan78xx_linearize(skb) < 0)
if (skb_linearize(skb)) {
dev_kfree_skb_any(skb);
return NULL;
}
tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;

View File

@ -2217,7 +2217,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
ndst = &rt->dst;
skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
vni, md, flags, udp_sum);
@ -2254,7 +2254,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet);
err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),

View File

@ -956,59 +956,117 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
switch (*pos) {
case WLAN_EID_SUPP_RATES:
if (pos[1] > 32)
return;
sta_ptr->tdls_cap.rates_len = pos[1];
for (i = 0; i < pos[1]; i++)
sta_ptr->tdls_cap.rates[i] = pos[i + 2];
break;
case WLAN_EID_EXT_SUPP_RATES:
if (pos[1] > 32)
return;
basic = sta_ptr->tdls_cap.rates_len;
if (pos[1] > 32 - basic)
return;
for (i = 0; i < pos[1]; i++)
sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
sta_ptr->tdls_cap.rates_len += pos[1];
break;
case WLAN_EID_HT_CAPABILITY:
memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
return;
if (pos[1] != sizeof(struct ieee80211_ht_cap))
return;
/* copy the ie's value into ht_capb*/
memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
sizeof(struct ieee80211_ht_cap));
sta_ptr->is_11n_enabled = 1;
break;
case WLAN_EID_HT_OPERATION:
memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
if (pos > end -
sizeof(struct ieee80211_ht_operation) - 2)
return;
if (pos[1] != sizeof(struct ieee80211_ht_operation))
return;
/* copy the ie's value into ht_oper*/
memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
sizeof(struct ieee80211_ht_operation));
break;
case WLAN_EID_BSS_COEX_2040:
if (pos > end - 3)
return;
if (pos[1] != 1)
return;
sta_ptr->tdls_cap.coex_2040 = pos[2];
break;
case WLAN_EID_EXT_CAPABILITY:
if (pos > end - sizeof(struct ieee_types_header))
return;
if (pos[1] < sizeof(struct ieee_types_header))
return;
if (pos[1] > 8)
return;
memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
sizeof(struct ieee_types_header) +
min_t(u8, pos[1], 8));
break;
case WLAN_EID_RSN:
if (pos > end - sizeof(struct ieee_types_header))
return;
if (pos[1] < sizeof(struct ieee_types_header))
return;
if (pos[1] > IEEE_MAX_IE_SIZE -
sizeof(struct ieee_types_header))
return;
memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
sizeof(struct ieee_types_header) +
min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
sizeof(struct ieee_types_header)));
break;
case WLAN_EID_QOS_CAPA:
if (pos > end - 3)
return;
if (pos[1] != 1)
return;
sta_ptr->tdls_cap.qos_info = pos[2];
break;
case WLAN_EID_VHT_OPERATION:
if (priv->adapter->is_hw_11ac_capable)
memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
if (priv->adapter->is_hw_11ac_capable) {
if (pos > end -
sizeof(struct ieee80211_vht_operation) - 2)
return;
if (pos[1] !=
sizeof(struct ieee80211_vht_operation))
return;
/* copy the ie's value into vhtoper*/
memcpy(&sta_ptr->tdls_cap.vhtoper, pos + 2,
sizeof(struct ieee80211_vht_operation));
}
break;
case WLAN_EID_VHT_CAPABILITY:
if (priv->adapter->is_hw_11ac_capable) {
memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
if (pos > end -
sizeof(struct ieee80211_vht_cap) - 2)
return;
if (pos[1] != sizeof(struct ieee80211_vht_cap))
return;
/* copy the ie's value into vhtcap*/
memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
sizeof(struct ieee80211_vht_cap));
sta_ptr->is_11ac_enabled = 1;
}
break;
case WLAN_EID_AID:
if (priv->adapter->is_hw_11ac_capable)
if (priv->adapter->is_hw_11ac_capable) {
if (pos > end - 4)
return;
if (pos[1] != 2)
return;
sta_ptr->tdls_cap.aid =
get_unaligned_le16((pos + 2));
}
break;
default:
break;
}

View File

@ -13,7 +13,7 @@
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/wait.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/nospec.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
@ -633,7 +633,7 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
u32 reg;
s.global = ioread32(&stdev->mmio_sw_event->global_summary);
s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
s.part_bitmap = readq(&stdev->mmio_sw_event->part_event_bitmap);
s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
for (i = 0; i < stdev->partition_count; i++) {

View File

@ -1742,8 +1742,8 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
regulator = create_regulator(rdev, dev, id);
if (regulator == NULL) {
regulator = ERR_PTR(-ENOMEM);
put_device(&rdev->dev);
module_put(rdev->owner);
put_device(&rdev->dev);
return regulator;
}
@ -1869,13 +1869,13 @@ static void _regulator_put(struct regulator *regulator)
rdev->open_count--;
rdev->exclusive = 0;
put_device(&rdev->dev);
regulator_unlock(rdev);
kfree_const(regulator->supply_name);
kfree(regulator);
module_put(rdev->owner);
put_device(&rdev->dev);
}
/**

View File

@ -154,6 +154,7 @@ static struct platform_driver rn5t618_regulator_driver = {
module_platform_driver(rn5t618_regulator_driver);
MODULE_ALIAS("platform:rn5t618-regulator");
MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
MODULE_DESCRIPTION("RN5T618 regulator driver");
MODULE_LICENSE("GPL v2");

View File

@ -1135,7 +1135,8 @@ static u32 get_fcx_max_data(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int fcx_in_css, fcx_in_gneq, fcx_in_features;
int tpm, mdc;
unsigned int mdc;
int tpm;
if (dasd_nofcx)
return 0;
@ -1149,7 +1150,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
return 0;
mdc = ccw_device_get_mdc(device->cdev, 0);
if (mdc < 0) {
if (mdc == 0) {
dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
return 0;
} else {
@ -1160,12 +1161,12 @@ static u32 get_fcx_max_data(struct dasd_device *device)
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
{
struct dasd_eckd_private *private = device->private;
int mdc;
unsigned int mdc;
u32 fcx_max_data;
if (private->fcx_max_data) {
mdc = ccw_device_get_mdc(device->cdev, lpm);
if ((mdc < 0)) {
if (mdc == 0) {
dev_warn(&device->cdev->dev,
"Detecting the maximum data size for zHPF "
"requests failed (rc=%d) for a new path %x\n",
@ -1769,7 +1770,7 @@ out_err2:
dasd_free_block(device->block);
device->block = NULL;
out_err1:
kfree(private->conf_data);
dasd_eckd_clear_conf_data(device);
kfree(device->private);
device->private = NULL;
return rc;
@ -1778,7 +1779,6 @@ out_err1:
static void dasd_eckd_uncheck_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int i;
if (!private)
return;
@ -1788,21 +1788,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
private->sneq = NULL;
private->vdsneq = NULL;
private->gneq = NULL;
private->conf_len = 0;
for (i = 0; i < 8; i++) {
kfree(device->path[i].conf_data);
if ((__u8 *)device->path[i].conf_data ==
private->conf_data) {
private->conf_data = NULL;
private->conf_len = 0;
}
device->path[i].conf_data = NULL;
device->path[i].cssid = 0;
device->path[i].ssid = 0;
device->path[i].chpid = 0;
}
kfree(private->conf_data);
private->conf_data = NULL;
dasd_eckd_clear_conf_data(device);
}
static struct dasd_ccw_req *

View File

@ -624,7 +624,7 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout);
* @mask: mask of paths to use
*
* Return the number of 64K-bytes blocks all paths at least support
* for a transport command. Return values <= 0 indicate failures.
* for a transport command. Return value 0 indicates failure.
*/
int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
{

View File

@ -81,6 +81,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
error:
clk_disable_unprepare(p->clk);
pci_release_regions(pdev);
spi_master_put(master);
return ret;
}
@ -95,6 +96,7 @@ static void thunderx_spi_remove(struct pci_dev *pdev)
return;
clk_disable_unprepare(p->clk);
pci_release_regions(pdev);
/* Put everything in a known state. */
writeq(0, p->register_base + OCTEON_SPI_CFG(p));
}

View File

@ -69,6 +69,7 @@ struct ti_qspi {
u32 dc;
bool mmap_enabled;
int current_cs;
};
#define QSPI_PID (0x0)
@ -494,6 +495,7 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi)
MEM_CS_EN(spi->chip_select));
}
qspi->mmap_enabled = true;
qspi->current_cs = spi->chip_select;
}
static void ti_qspi_disable_memory_map(struct spi_device *spi)
@ -505,6 +507,7 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi)
regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
MEM_CS_MASK, 0);
qspi->mmap_enabled = false;
qspi->current_cs = -1;
}
static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
@ -550,7 +553,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem,
mutex_lock(&qspi->list_lock);
if (!qspi->mmap_enabled)
if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select)
ti_qspi_enable_memory_map(mem->spi);
ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth,
op->addr.nbytes, op->dummy.nbytes);
@ -807,6 +810,7 @@ no_dma:
}
}
qspi->mmap_enabled = false;
qspi->current_cs = -1;
ret = devm_spi_register_master(&pdev->dev, master);
if (!ret)

View File

@ -203,9 +203,58 @@ static const unsigned short super_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_INT] = 1024,
};
static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
int asnum, struct usb_host_interface *ifp, int num_ep,
unsigned char *buffer, int size)
static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1,
struct usb_endpoint_descriptor *e2)
{
if (e1->bEndpointAddress == e2->bEndpointAddress)
return true;
if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) {
if (usb_endpoint_num(e1) == usb_endpoint_num(e2))
return true;
}
return false;
}
/*
* Check for duplicate endpoint addresses in other interfaces and in the
* altsetting currently being parsed.
*/
static bool config_endpoint_is_duplicate(struct usb_host_config *config,
int inum, int asnum, struct usb_endpoint_descriptor *d)
{
struct usb_endpoint_descriptor *epd;
struct usb_interface_cache *intfc;
struct usb_host_interface *alt;
int i, j, k;
for (i = 0; i < config->desc.bNumInterfaces; ++i) {
intfc = config->intf_cache[i];
for (j = 0; j < intfc->num_altsetting; ++j) {
alt = &intfc->altsetting[j];
if (alt->desc.bInterfaceNumber == inum &&
alt->desc.bAlternateSetting != asnum)
continue;
for (k = 0; k < alt->desc.bNumEndpoints; ++k) {
epd = &alt->endpoint[k].desc;
if (endpoint_is_duplicate(epd, d))
return true;
}
}
}
return false;
}
static int usb_parse_endpoint(struct device *ddev, int cfgno,
struct usb_host_config *config, int inum, int asnum,
struct usb_host_interface *ifp, int num_ep,
unsigned char *buffer, int size)
{
unsigned char *buffer0 = buffer;
struct usb_endpoint_descriptor *d;
@ -242,13 +291,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
goto skip_to_next_endpoint_or_interface_descriptor;
/* Check for duplicate endpoint addresses */
for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
if (ifp->endpoint[i].desc.bEndpointAddress ==
d->bEndpointAddress) {
dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum, d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor;
}
if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum, d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor;
}
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
@ -522,8 +568,8 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
if (((struct usb_descriptor_header *) buffer)->bDescriptorType
== USB_DT_INTERFACE)
break;
retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt,
num_ep, buffer, size);
retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum,
alt, num_ep, buffer, size);
if (retval < 0)
return retval;
++n;

View File

@ -2668,7 +2668,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
#define SET_ADDRESS_TRIES 2
#define GET_DESCRIPTOR_TRIES 2
#define SET_CONFIG_TRIES (2 * (use_both_schemes + 1))
#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)scheme)
#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)(scheme))
#define HUB_ROOT_RESET_TIME 60 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10

View File

@ -2875,6 +2875,13 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
{
/*
* For OUT direction, host may send less than the setup
* length. Return true for all OUT requests.
*/
if (!req->direction)
return true;
return req->request.actual == req->request.length;
}
@ -2910,7 +2917,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
req->request.actual = req->request.length - req->remaining;
if (!dwc3_gadget_ep_request_completed(req) &&
if (!dwc3_gadget_ep_request_completed(req) ||
req->num_pending_sgs) {
__dwc3_gadget_kick_transfer(dep);
goto out;

View File

@ -1335,7 +1335,7 @@ static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req,
u32 this_sg;
bool next_sg;
to_host = usb_pipein(urb->pipe);
to_host = usb_urb_dir_in(urb);
rbuf = req->req.buf + req->req.actual;
if (!urb->num_sgs) {
@ -1423,7 +1423,7 @@ top:
/* FIXME update emulated data toggle too */
to_host = usb_pipein(urb->pipe);
to_host = usb_urb_dir_in(urb);
if (unlikely(len == 0))
is_short = 1;
else {
@ -1844,7 +1844,7 @@ restart:
/* find the gadget's ep for this request (if configured) */
address = usb_pipeendpoint (urb->pipe);
if (usb_pipein(urb->pipe))
if (usb_urb_dir_in(urb))
address |= USB_DIR_IN;
ep = find_endpoint(dum, address);
if (!ep) {
@ -2399,7 +2399,7 @@ static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb)
s = "?";
break;
} s; }),
ep, ep ? (usb_pipein(urb->pipe) ? "in" : "out") : "",
ep, ep ? (usb_urb_dir_in(urb) ? "in" : "out") : "",
({ char *s; \
switch (usb_pipetype(urb->pipe)) { \
case PIPE_CONTROL: \
@ -2739,7 +2739,7 @@ static struct platform_driver dummy_hcd_driver = {
};
/*-------------------------------------------------------------------------*/
#define MAX_NUM_UDC 2
#define MAX_NUM_UDC 32
static struct platform_device *the_udc_pdev[MAX_NUM_UDC];
static struct platform_device *the_hcd_pdev[MAX_NUM_UDC];

View File

@ -1172,6 +1172,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */
.driver_info = NCTRL(0) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),

View File

@ -2862,12 +2862,12 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
if (!(fs_info->qgroup_flags &
BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
btrfs_warn(fs_info,
"qgroup rescan init failed, qgroup is not enabled");
"qgroup rescan init failed, qgroup rescan is not queued");
ret = -EINVAL;
} else if (!(fs_info->qgroup_flags &
BTRFS_QGROUP_STATUS_FLAG_ON)) {
btrfs_warn(fs_info,
"qgroup rescan init failed, qgroup rescan is not queued");
"qgroup rescan init failed, qgroup is not enabled");
ret = -EINVAL;
}

View File

@ -35,11 +35,11 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
spin_unlock(&inode->i_lock);
spin_unlock(&sb->s_inode_list_lock);
cond_resched();
invalidate_mapping_pages(inode->i_mapping, 0, -1);
iput(toput_inode);
toput_inode = inode;
cond_resched();
spin_lock(&sb->s_inode_list_lock);
}
spin_unlock(&sb->s_inode_list_lock);

View File

@ -660,6 +660,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
struct inode *inode, *next;
LIST_HEAD(dispose);
again:
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock);
@ -682,6 +683,12 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
list_add(&inode->i_lru, &dispose);
if (need_resched()) {
spin_unlock(&sb->s_inode_list_lock);
cond_resched();
dispose_list(&dispose);
goto again;
}
}
spin_unlock(&sb->s_inode_list_lock);

View File

@ -90,6 +90,7 @@ void fsnotify_unmount_inodes(struct super_block *sb)
iput_inode = inode;
cond_resched();
spin_lock(&sb->s_inode_list_lock);
}
spin_unlock(&sb->s_inode_list_lock);

View File

@ -980,6 +980,7 @@ static int add_dquot_ref(struct super_block *sb, int type)
* later.
*/
old_inode = inode;
cond_resched();
spin_lock(&sb->s_inode_list_lock);
}
spin_unlock(&sb->s_inode_list_lock);

View File

@ -28,6 +28,14 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
return (struct ethhdr *)skb_mac_header(skb);
}
/* Prefer this version in TX path, instead of
* skb_reset_mac_header() + eth_hdr()
*/
static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb->data;
}
static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_inner_mac_header(skb);

View File

@ -41,19 +41,19 @@ struct xt_sctp_info {
#define SCTP_CHUNKMAP_SET(chunkmap, type) \
do { \
(chunkmap)[type / bytes(__u32)] |= \
1 << (type % bytes(__u32)); \
1u << (type % bytes(__u32)); \
} while (0)
#define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \
do { \
(chunkmap)[type / bytes(__u32)] &= \
~(1 << (type % bytes(__u32))); \
~(1u << (type % bytes(__u32))); \
} while (0)
#define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \
({ \
((chunkmap)[type / bytes (__u32)] & \
(1 << (type % bytes (__u32)))) ? 1: 0; \
(1u << (type % bytes (__u32)))) ? 1: 0; \
})
#define SCTP_CHUNKMAP_RESET(chunkmap) \

View File

@ -4272,6 +4272,7 @@ static bool may_access_skb(enum bpf_prog_type type)
static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env);
static const int ctx_reg = BPF_REG_6;
u8 mode = BPF_MODE(insn->code);
int i, err;
@ -4305,11 +4306,11 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
}
/* check whether implicit source operand (register R6) is readable */
err = check_reg_arg(env, BPF_REG_6, SRC_OP);
err = check_reg_arg(env, ctx_reg, SRC_OP);
if (err)
return err;
if (regs[BPF_REG_6].type != PTR_TO_CTX) {
if (regs[ctx_reg].type != PTR_TO_CTX) {
verbose(env,
"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
return -EINVAL;
@ -4322,6 +4323,10 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return err;
}
err = check_ctx_reg(env, &regs[ctx_reg], ctx_reg);
if (err < 0)
return err;
/* reset caller saved regs to unreadable */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, regs, caller_saved[i]);

View File

@ -53,19 +53,19 @@ EXPORT_SYMBOL(__rwlock_init);
static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
struct task_struct *owner = NULL;
struct task_struct *owner = READ_ONCE(lock->owner);
if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
owner = lock->owner;
if (owner == SPINLOCK_OWNER_INIT)
owner = NULL;
printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
msg, raw_smp_processor_id(),
current->comm, task_pid_nr(current));
printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
".owner_cpu: %d\n",
lock, lock->magic,
lock, READ_ONCE(lock->magic),
owner ? owner->comm : "<none>",
owner ? task_pid_nr(owner) : -1,
lock->owner_cpu);
READ_ONCE(lock->owner_cpu));
#ifdef CONFIG_DEBUG_SPINLOCK_BITE_ON_BUG
msm_trigger_wdog_bite();
#elif defined(CONFIG_DEBUG_SPINLOCK_PANIC_ON_BUG)
@ -87,16 +87,16 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg)
static inline void
debug_spin_lock_before(raw_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(lock->owner == current, lock, "recursion");
SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
lock, "cpu recursion");
}
static inline void debug_spin_lock_after(raw_spinlock_t *lock)
{
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
WRITE_ONCE(lock->owner, current);
}
static inline void debug_spin_unlock(raw_spinlock_t *lock)
@ -106,8 +106,8 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
WRITE_ONCE(lock->owner_cpu, -1);
}
/*
@ -195,8 +195,8 @@ static inline void debug_write_lock_before(rwlock_t *lock)
static inline void debug_write_lock_after(rwlock_t *lock)
{
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
WRITE_ONCE(lock->owner, current);
}
static inline void debug_write_unlock(rwlock_t *lock)
@ -205,8 +205,8 @@ static inline void debug_write_unlock(rwlock_t *lock)
RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
WRITE_ONCE(lock->owner_cpu, -1);
}
void do_raw_write_lock(rwlock_t *lock)

View File

@ -114,6 +114,7 @@ int vlan_check_real_dev(struct net_device *real_dev,
void vlan_setup(struct net_device *dev);
int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack);
void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
void vlan_dev_uninit(struct net_device *dev);
bool vlan_dev_inherit_address(struct net_device *dev,
struct net_device *real_dev);

View File

@ -612,7 +612,8 @@ static int vlan_dev_init(struct net_device *dev)
return 0;
}
static void vlan_dev_uninit(struct net_device *dev)
/* Note: this function might be called multiple times for the same device. */
void vlan_dev_uninit(struct net_device *dev)
{
struct vlan_priority_tci_mapping *pm;
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);

View File

@ -110,11 +110,13 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
struct ifla_vlan_flags *flags;
struct ifla_vlan_qos_mapping *m;
struct nlattr *attr;
int rem;
int rem, err;
if (data[IFLA_VLAN_FLAGS]) {
flags = nla_data(data[IFLA_VLAN_FLAGS]);
vlan_dev_change_flags(dev, flags->flags, flags->mask);
err = vlan_dev_change_flags(dev, flags->flags, flags->mask);
if (err)
return err;
}
if (data[IFLA_VLAN_INGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
@ -125,7 +127,9 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (data[IFLA_VLAN_EGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
m = nla_data(attr);
vlan_dev_set_egress_priority(dev, m->from, m->to);
err = vlan_dev_set_egress_priority(dev, m->from, m->to);
if (err)
return err;
}
}
return 0;
@ -181,10 +185,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
err = vlan_changelink(dev, tb, data, extack);
if (err < 0)
return err;
return register_vlan_dev(dev, extack);
if (!err)
err = register_vlan_dev(dev, extack);
if (err)
vlan_dev_uninit(dev);
return err;
}
static inline size_t vlan_qos_map_size(unsigned int n)

View File

@ -2007,6 +2007,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
}
skb->dev = dev;
skb->tstamp = 0;
__this_cpu_inc(xmit_recursion);
ret = dev_queue_xmit(skb);

View File

@ -1716,8 +1716,11 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
}
/* Ignore very old stuff early */
if (!after(sp[used_sacks].end_seq, prior_snd_una))
if (!after(sp[used_sacks].end_seq, prior_snd_una)) {
if (i == 0)
first_sack_index = -1;
continue;
}
used_sacks++;
}

View File

@ -32,7 +32,7 @@ static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb)
return LLC_PDU_IS_CMD(pdu) && /* command PDU */
LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID &&
!pdu->dsap ? 0 : 1; /* NULL DSAP value */
!pdu->dsap; /* NULL DSAP value */
}
static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
@ -42,7 +42,7 @@ static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
return LLC_PDU_IS_CMD(pdu) && /* command PDU */
LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST &&
!pdu->dsap ? 0 : 1; /* NULL DSAP */
!pdu->dsap; /* NULL DSAP */
}
static int llc_station_ac_send_xid_r(struct sk_buff *skb)

View File

@ -3576,6 +3576,9 @@ static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
list_for_each_entry(net, net_exit_list, exit_list)
ctnetlink_net_exit(net);
/* wait for other cpus until they are done with ctnl_notifiers */
synchronize_rcu();
}
static struct pernet_operations ctnetlink_net_ops = {

View File

@ -4117,8 +4117,10 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
return err;
err = -EINVAL;
if (desc.type != NFT_DATA_VALUE || desc.len != set->klen)
if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) {
nft_data_release(&elem.key.val, desc.type);
return err;
}
priv = set->ops->get(ctx->net, set, &elem, flags);
if (IS_ERR(priv))
@ -4351,14 +4353,20 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
if (nla[NFTA_SET_ELEM_DATA] == NULL &&
!(flags & NFT_SET_ELEM_INTERVAL_END))
return -EINVAL;
if (nla[NFTA_SET_ELEM_DATA] != NULL &&
flags & NFT_SET_ELEM_INTERVAL_END)
return -EINVAL;
} else {
if (nla[NFTA_SET_ELEM_DATA] != NULL)
return -EINVAL;
}
if ((flags & NFT_SET_ELEM_INTERVAL_END) &&
(nla[NFTA_SET_ELEM_DATA] ||
nla[NFTA_SET_ELEM_OBJREF] ||
nla[NFTA_SET_ELEM_TIMEOUT] ||
nla[NFTA_SET_ELEM_EXPIRATION] ||
nla[NFTA_SET_ELEM_USERDATA] ||
nla[NFTA_SET_ELEM_EXPR]))
return -EINVAL;
timeout = 0;
if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))

View File

@ -83,7 +83,7 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
tb[NFTA_BITWISE_MASK]);
if (err < 0)
return err;
if (d1.len != priv->len) {
if (d1.type != NFT_DATA_VALUE || d1.len != priv->len) {
err = -EINVAL;
goto err1;
}
@ -92,7 +92,7 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
tb[NFTA_BITWISE_XOR]);
if (err < 0)
goto err1;
if (d2.len != priv->len) {
if (d2.type != NFT_DATA_VALUE || d2.len != priv->len) {
err = -EINVAL;
goto err2;
}

View File

@ -82,6 +82,12 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
if (err < 0)
return err;
if (desc.type != NFT_DATA_VALUE) {
err = -EINVAL;
nft_data_release(&priv->data, desc.type);
return err;
}
priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
err = nft_validate_register_load(priv->sreg, desc.len);
if (err < 0)

View File

@ -70,11 +70,21 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
if (err < 0)
return err;
if (desc_from.type != NFT_DATA_VALUE) {
err = -EINVAL;
goto err1;
}
err = nft_data_init(NULL, &priv->data_to, sizeof(priv->data_to),
&desc_to, tb[NFTA_RANGE_TO_DATA]);
if (err < 0)
goto err1;
if (desc_to.type != NFT_DATA_VALUE) {
err = -EINVAL;
goto err2;
}
if (desc_from.len != desc_to.len) {
err = -EINVAL;
goto err2;

View File

@ -77,8 +77,13 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
parent = rcu_dereference_raw(parent->rb_left);
continue;
}
if (nft_rbtree_interval_end(rbe))
goto out;
if (nft_rbtree_interval_end(rbe)) {
if (nft_set_is_anonymous(set))
return false;
parent = rcu_dereference_raw(parent->rb_left);
interval = NULL;
continue;
}
*ext = &rbe->ext;
return true;
@ -91,7 +96,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
*ext = &interval->ext;
return true;
}
out:
return false;
}
@ -139,8 +144,10 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
} else if (d > 0) {
parent = rcu_dereference_raw(parent->rb_right);
} else {
if (!nft_set_elem_active(&rbe->ext, genmask))
if (!nft_set_elem_active(&rbe->ext, genmask)) {
parent = rcu_dereference_raw(parent->rb_left);
continue;
}
if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
(*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
@ -148,7 +155,11 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
*elem = rbe;
return true;
}
return false;
if (nft_rbtree_interval_end(rbe))
interval = NULL;
parent = rcu_dereference_raw(parent->rb_left);
}
}

View File

@ -1009,10 +1009,13 @@ static void rfkill_sync_work(struct work_struct *work)
int __must_check rfkill_register(struct rfkill *rfkill)
{
static unsigned long rfkill_no;
struct device *dev = &rfkill->dev;
struct device *dev;
int error;
BUG_ON(!rfkill);
if (!rfkill)
return -EINVAL;
dev = &rfkill->dev;
mutex_lock(&rfkill_global_mutex);

View File

@ -1758,7 +1758,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
q->avg_window_begin));
u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
do_div(b, window_interval);
b = div64_u64(b, window_interval);
q->avg_peak_bandwidth =
cake_ewma(q->avg_peak_bandwidth, b,
b > q->avg_peak_bandwidth ? 2 : 8);

View File

@ -735,10 +735,12 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_FQ_QUANTUM]) {
u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
if (quantum > 0)
if (quantum > 0 && quantum <= (1 << 20)) {
q->quantum = quantum;
else
} else {
NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
err = -EINVAL;
}
}
if (tb[TCA_FQ_INITIAL_QUANTUM])

View File

@ -314,8 +314,14 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
bool any_qdisc_is_offloaded;
int err;
if (new == NULL)
new = &noop_qdisc;
if (!new) {
new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
TC_H_MAKE(sch->handle, arg), extack);
if (!new)
new = &noop_qdisc;
else
qdisc_hash_add(new, true);
}
*old = qdisc_replace(sch, new, &q->queues[band]);

View File

@ -1373,8 +1373,10 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
/* Generate an INIT ACK chunk. */
new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
0);
if (!new_obj)
goto nomem;
if (!new_obj) {
error = -ENOMEM;
break;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
@ -1396,7 +1398,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
if (!new_obj) {
if (cmd->obj.chunk)
sctp_chunk_free(cmd->obj.chunk);
goto nomem;
error = -ENOMEM;
break;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
@ -1443,8 +1446,10 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
/* Generate a SHUTDOWN chunk. */
new_obj = sctp_make_shutdown(asoc, chunk);
if (!new_obj)
goto nomem;
if (!new_obj) {
error = -ENOMEM;
break;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
break;
@ -1780,11 +1785,17 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
break;
}
if (error)
if (error) {
cmd = sctp_next_cmd(commands);
while (cmd) {
if (cmd->verb == SCTP_CMD_REPLY)
sctp_chunk_free(cmd->obj.chunk);
cmd = sctp_next_cmd(commands);
}
break;
}
}
out:
/* If this is in response to a received chunk, wait until
* we are done with the packet to open the queue so that we don't
* send multiple packets in response to a single request.
@ -1799,7 +1810,4 @@ out:
sp->data_ready_signalled = 0;
return error;
nomem:
error = -ENOMEM;
goto out;
}

View File

@ -50,13 +50,27 @@ static __always_inline void count(void *map)
SEC("tracepoint/syscalls/sys_enter_open")
int trace_enter_open(struct syscalls_enter_open_args *ctx)
{
count((void *)&enter_open_map);
count(&enter_open_map);
return 0;
}
SEC("tracepoint/syscalls/sys_enter_openat")
int trace_enter_open_at(struct syscalls_enter_open_args *ctx)
{
count(&enter_open_map);
return 0;
}
SEC("tracepoint/syscalls/sys_exit_open")
int trace_enter_exit(struct syscalls_exit_open_args *ctx)
{
count((void *)&exit_open_map);
count(&exit_open_map);
return 0;
}
SEC("tracepoint/syscalls/sys_exit_openat")
int trace_enter_exit_at(struct syscalls_exit_open_args *ctx)
{
count(&exit_open_map);
return 0;
}

View File

@ -35,9 +35,9 @@ static void print_ksym(__u64 addr)
return;
sym = ksym_search(addr);
printf("%s;", sym->name);
if (!strcmp(sym->name, "sys_read"))
if (!strstr(sym->name, "sys_read"))
sys_read_seen = true;
else if (!strcmp(sym->name, "sys_write"))
else if (!strstr(sym->name, "sys_write"))
sys_write_seen = true;
}

View File

@ -252,6 +252,13 @@ static int expr_eq(struct expr *e1, struct expr *e2)
{
int res, old_count;
/*
* A NULL expr is taken to be yes, but there's also a different way to
* represent yes. expr_is_yes() checks for either representation.
*/
if (!e1 || !e2)
return expr_is_yes(e1) && expr_is_yes(e2);
if (e1->type != e2->type)
return 0;
switch (e1->type) {

View File

@ -2121,10 +2121,8 @@ static void max98090_pll_det_disable_work(struct work_struct *work)
M98090_IULK_MASK, 0);
}
static void max98090_pll_work(struct work_struct *work)
static void max98090_pll_work(struct max98090_priv *max98090)
{
struct max98090_priv *max98090 =
container_of(work, struct max98090_priv, pll_work);
struct snd_soc_component *component = max98090->component;
if (!snd_soc_component_is_active(component))
@ -2277,7 +2275,7 @@ static irqreturn_t max98090_interrupt(int irq, void *data)
if (active & M98090_ULK_MASK) {
dev_dbg(component->dev, "M98090_ULK_MASK\n");
schedule_work(&max98090->pll_work);
max98090_pll_work(max98090);
}
if (active & M98090_JDET_MASK) {
@ -2440,7 +2438,6 @@ static int max98090_probe(struct snd_soc_component *component)
max98090_pll_det_enable_work);
INIT_WORK(&max98090->pll_det_disable_work,
max98090_pll_det_disable_work);
INIT_WORK(&max98090->pll_work, max98090_pll_work);
/* Enable jack detection */
snd_soc_component_write(component, M98090_REG_JACK_DETECT,
@ -2493,7 +2490,6 @@ static void max98090_remove(struct snd_soc_component *component)
cancel_delayed_work_sync(&max98090->jack_work);
cancel_delayed_work_sync(&max98090->pll_det_enable_work);
cancel_work_sync(&max98090->pll_det_disable_work);
cancel_work_sync(&max98090->pll_work);
max98090->component = NULL;
}

View File

@ -1533,7 +1533,6 @@ struct max98090_priv {
struct delayed_work jack_work;
struct delayed_work pll_det_enable_work;
struct work_struct pll_det_disable_work;
struct work_struct pll_work;
struct snd_soc_jack *jack;
unsigned int dai_fmt;
int tdm_slots;

View File

@ -2792,7 +2792,7 @@ static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
if (target % Fref == 0) {
fll_div->theta = 0;
fll_div->lambda = 0;
fll_div->lambda = 1;
} else {
gcd_fll = gcd(target, fratio * Fref);
@ -2862,7 +2862,7 @@ static int wm8962_set_fll(struct snd_soc_component *component, int fll_id, int s
return -EINVAL;
}
if (fll_div.theta || fll_div.lambda)
if (fll_div.theta)
fll1 |= WM8962_FLL_FRAC;
/* Stop the FLL while we reconfigure */

View File

@ -677,13 +677,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_MCLK_EN),
},
{
/* Teclast X89 */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
},
.driver_data = (void *)(BYT_RT5640_IN3_MAP |
BYT_RT5640_MCLK_EN |
BYT_RT5640_SSP0_AIF1),
BYT_RT5640_JD_SRC_JD1_IN4P |
BYT_RT5640_OVCD_TH_2000UA |
BYT_RT5640_OVCD_SF_1P0 |
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
},
{ /* Toshiba Satellite Click Mini L9W-B */
.matches = {

View File

@ -1890,6 +1890,7 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
int count = hdr->count;
int i;
bool abi_match;
int ret;
if (tplg->pass != SOC_TPLG_PASS_PCM_DAI)
return 0;
@ -1926,7 +1927,12 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
}
/* create the FE DAIs and DAI links */
soc_tplg_pcm_create(tplg, _pcm);
ret = soc_tplg_pcm_create(tplg, _pcm);
if (ret < 0) {
if (!abi_match)
kfree(_pcm);
return ret;
}
/* offset by version-specific struct size and
* real priv data size

View File

@ -115,6 +115,7 @@ EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION)
LIB_TARGET = libtraceevent.a libtraceevent.so.$(EVENT_PARSE_VERSION)
LIB_INSTALL = libtraceevent.a libtraceevent.so*
LIB_INSTALL := $(addprefix $(OUTPUT),$(LIB_INSTALL))
INCLUDES = -I. -I $(srctree)/tools/include $(CONFIG_INCLUDES)

View File

@ -25,9 +25,9 @@ while read i; do
test $N -eq 256 && break
done
L=`wc -l kprobe_events`
if [ $L -ne $N ]; then
echo "The number of kprobes events ($L) is not $N"
L=`cat kprobe_events | wc -l`
if [ $L -ne 256 ]; then
echo "The number of kprobes events ($L) is not 256"
exit_fail
fi