This is the 4.19.264 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmNj1bgACgkQONu9yGCS
 aT4ABQ/8CCO0lyrHTLSG/hmhXOaLkEq3q4sO5xI2hAAwaIGngivQpjR+qGicUxm2
 MmX9pLihi5uEpayVFM0Gb5+6NRObgUULAetiDk34gSqnWjFQksrS8WZdNzXSIq85
 yn3+1o87Nr+gOTGd1xighmRKw87Kaacqtt80MXBeTQ7SZt7ES7Oxn/I4zw1vjiDz
 2nfUp+w2yWsUoQHOUGITe3+ae8QmTX1dwQhXf/z0EX8VqgGEKD7Sv6aSxZfmb4cL
 srBYw0D3bi5+cSH/auiN+rxkGQ7CZ24xsqaFZN4L5lAhO+TTfanp6XJAMAKFYm5N
 1sjeBfNNDq8LEThqBG1RULlaMOkflW7fXAZuA6oGJTr1+V0MP8h79jniKSJ8kGnN
 xpprlnm7hKy0OazbRIcRBPim3hv5fvy+U7eiWQqZCOdYc93hTflzamXeu+OZNpsB
 flAJbUGDUniApKFhyXhEWr8jCz7oQvf2VzQmKRB6KpbEOgaEJ5S8Ls5pGzt3JqdW
 AOQHC4t4/EcyVvOBUcIiXYtnE3VQ4RuOU6bCU5soqDiWTYk1yeRWKFiFLpkzpMAR
 FjKBLFez2Dc+T09DXcXbJZ7V3t510hhLw9Pai1iXuZlkYuk6jJGZ/VMmi8txxyYt
 wChGDUcnv2W/Ub7hLSk9GkxtaXQ3zdLYVk2GtloCkuqorks8EFs=
 =7RS7
 -----END PGP SIGNATURE-----

Merge 4.19.264 into android-4.19-stable

Changes in 4.19.264
	ocfs2: clear dinode links count in case of error
	ocfs2: fix BUG when iput after ocfs2_mknod fails
	x86/microcode/AMD: Apply the patch early on every logical thread
	hwmon/coretemp: Handle large core ID value
	ata: ahci-imx: Fix MODULE_ALIAS
	ata: ahci: Match EM_MAX_SLOTS with SATA_PMP_MAX_PORTS
	KVM: arm64: vgic: Fix exit condition in scan_its_table()
	media: venus: dec: Handle the case where find_format fails
	arm64: errata: Remove AES hwcap for COMPAT tasks
	r8152: add PID for the Lenovo OneLink+ Dock
	btrfs: fix processing of delayed data refs during backref walking
	btrfs: fix processing of delayed tree block refs during backref walking
	ACPI: extlog: Handle multiple records
	tipc: Fix recognition of trial period
	tipc: fix an information leak in tipc_topsrv_kern_subscr
	HID: magicmouse: Do not set BTN_MOUSE on double report
	net/atm: fix proc_mpc_write incorrect return value
	net: sched: cake: fix null pointer access issue when cake_init() fails
	net: hns: fix possible memory leak in hnae_ae_register()
	iommu/vt-d: Clean up si_domain in the init_dmars() error path
	media: v4l2-mem2mem: Apply DST_QUEUE_OFF_BASE on MMAP buffers across ioctls
	ACPI: video: Force backlight native for more TongFang devices
	Makefile.debug: re-enable debug info for .S files
	hv_netvsc: Fix race between VF offering and VF association message from host
	mm: /proc/pid/smaps_rollup: fix no vma's null-deref
	can: kvaser_usb: Fix possible completions during init_completion
	ALSA: Use del_timer_sync() before freeing timer
	ALSA: au88x0: use explicitly signed char
	USB: add RESET_RESUME quirk for NVIDIA Jetson devices in RCM
	usb: dwc3: gadget: Stop processing more requests on IMI
	usb: dwc3: gadget: Don't set IMI for no_interrupt
	usb: bdc: change state when port disconnected
	usb: xhci: add XHCI_SPURIOUS_SUCCESS to ASM1042 despite being a V0.96 controller
	xhci: Remove device endpoints from bandwidth list when freeing the device
	tools: iio: iio_utils: fix digit calculation
	iio: light: tsl2583: Fix module unloading
	fbdev: smscufx: Fix several use-after-free bugs
	mac802154: Fix LQI recording
	drm/msm/dsi: fix memory corruption with too many bridges
	drm/msm/hdmi: fix memory corruption with too many bridges
	mmc: core: Fix kernel panic when remove non-standard SDIO card
	kernfs: fix use-after-free in __kernfs_remove
	perf auxtrace: Fix address filter symbol name match for modules
	s390/futex: add missing EX_TABLE entry to __futex_atomic_op()
	Xen/gntdev: don't ignore kernel unmapping error
	xen/gntdev: Prevent leaking grants
	mm,hugetlb: take hugetlb_lock before decrementing h->resv_huge_pages
	net: ieee802154: fix error return code in dgram_bind()
	drm/msm: Fix return type of mdp4_lvds_connector_mode_valid
	arc: iounmap() arg is volatile
	ALSA: ac97: fix possible memory leak in snd_ac97_dev_register()
	tipc: fix a null-ptr-deref in tipc_topsrv_accept
	net: netsec: fix error handling in netsec_register_mdio()
	x86/unwind/orc: Fix unreliable stack dump with gcov
	amd-xgbe: fix the SFP compliance codes check for DAC cables
	amd-xgbe: add the bit rate quirk for Molex cables
	kcm: annotate data-races around kcm->rx_psock
	kcm: annotate data-races around kcm->rx_wait
	net: fix UAF issue in nfqnl_nf_hook_drop() when ops_init() failed
	net: lantiq_etop: don't free skb when returning NETDEV_TX_BUSY
	tcp: fix indefinite deferral of RTO with SACK reneging
	can: mscan: mpc5xxx: mpc5xxx_can_probe(): add missing put_clock() in error path
	PM: hibernate: Allow hybrid sleep to work with s2idle
	media: vivid: s_fbuf: add more sanity checks
	media: vivid: dev->bitmap_cap wasn't freed in all cases
	media: v4l2-dv-timings: add sanity checks for blanking values
	media: videodev2.h: V4L2_DV_BT_BLANKING_HEIGHT should check 'interlaced'
	i40e: Fix ethtool rx-flow-hash setting for X722
	i40e: Fix VF hang when reset is triggered on another VF
	i40e: Fix flow-type by setting GL_HASH_INSET registers
	net: ksz884x: fix missing pci_disable_device() on error in pcidev_init()
	PM: domains: Fix handling of unavailable/disabled idle states
	ALSA: aoa: i2sbus: fix possible memory leak in i2sbus_add_dev()
	ALSA: aoa: Fix I2S device accounting
	openvswitch: switch from WARN to pr_warn
	net: ehea: fix possible memory leak in ehea_register_port()
	net/mlx5e: Do not increment ESN when updating IPsec ESN state
	can: rcar_canfd: rcar_canfd_handle_global_receive(): fix IRQ storm on global FIFO receive
	Linux 4.19.264

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I283d9b8453e673ca7e260e30eb1a2d05269096c8
This commit is contained in:
Greg Kroah-Hartman 2022-11-07 09:38:03 +01:00
commit e543b3322e
82 changed files with 718 additions and 247 deletions

View File

@ -55,7 +55,9 @@ stable kernels.
| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
| ARM | Cortex-A57 | #852523 | N/A |
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
| ARM | Cortex-A57 | #1742098 | ARM64_ERRATUM_1742098 |
| ARM | Cortex-A72 | #853709 | N/A |
| ARM | Cortex-A72 | #1655431 | ARM64_ERRATUM_1742098 |
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 263
SUBLEVEL = 264
EXTRAVERSION =
NAME = "People's Front"
@ -774,7 +774,9 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
else
KBUILD_CFLAGS += -g
endif
ifneq ($(LLVM_IAS),1)
ifeq ($(LLVM_IAS),1)
KBUILD_AFLAGS += -g
else
KBUILD_AFLAGS += -Wa,-gdwarf-2
endif
endif

View File

@ -35,7 +35,7 @@ static inline void ioport_unmap(void __iomem *addr)
{
}
extern void iounmap(const void __iomem *addr);
extern void iounmap(const volatile void __iomem *addr);
#define ioremap_nocache(phy, sz) ioremap(phy, sz)
#define ioremap_wc(phy, sz) ioremap(phy, sz)

View File

@ -95,7 +95,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
EXPORT_SYMBOL(ioremap_prot);
void iounmap(const void __iomem *addr)
void iounmap(const volatile void __iomem *addr)
{
/* weird double cast to handle phys_addr_t > 32 bits */
if (arc_uncached_addr_space((phys_addr_t)(u32)addr))

View File

@ -523,6 +523,22 @@ config ARM64_ERRATUM_1542419
If unsure, say Y.
config ARM64_ERRATUM_1742098
bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence"
depends on COMPAT
default y
help
This option removes the AES hwcap for aarch32 user-space to
workaround erratum 1742098 on Cortex-A57 and Cortex-A72.
Affected parts may corrupt the AES state if an interrupt is
taken between a pair of AES instructions. These instructions
are only present if the cryptography extensions are present.
All software should have a fallback implementation for CPUs
that don't implement the cryptography extensions.
If unsure, say Y.
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y

View File

@ -55,8 +55,9 @@
#define ARM64_SSBS 34
#define ARM64_WORKAROUND_1542419 35
#define ARM64_SPECTRE_BHB 36
#define ARM64_WORKAROUND_1742098 37
/* kabi: reserve 37 - 62 for future cpu capabilities */
/* kabi: reserve 38 - 62 for future cpu capabilities */
#define ARM64_NCAPS 62
#endif /* __ASM_CPUCAPS_H */

View File

@ -683,6 +683,15 @@ static const struct midr_range arm64_harden_el2_vectors[] = {
#endif
#ifdef CONFIG_ARM64_ERRATUM_1742098
static struct midr_range broken_aarch32_aes[] = {
MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
{},
};
#endif
const struct arm64_cpu_capabilities arm64_errata[] = {
#if defined(CONFIG_ARM64_ERRATUM_826319) || \
defined(CONFIG_ARM64_ERRATUM_827319) || \
@ -883,6 +892,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.matches = has_neoverse_n1_erratum_1542419,
.cpu_enable = cpu_enable_trap_ctr_access,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1742098
{
.desc = "ARM erratum 1742098",
.capability = ARM64_WORKAROUND_1742098,
CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
},
#endif
{
}

View File

@ -31,6 +31,7 @@
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/fpsimd.h>
#include <asm/hwcap.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/sysreg.h>
@ -1154,6 +1155,14 @@ static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
}
#endif /* CONFIG_ARM64_SSBD */
static void elf_hwcap_fixup(void)
{
#ifdef CONFIG_ARM64_ERRATUM_1742098
if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
#endif /* ARM64_ERRATUM_1742098 */
}
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@ -1802,8 +1811,10 @@ void __init setup_cpu_features(void)
mark_const_caps_ready();
setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0())
if (system_supports_32bit_el0()) {
setup_elf_hwcaps(compat_elf_hwcaps);
elf_hwcap_fixup();
}
if (system_uses_ttbr0_pan())
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");

View File

@ -16,7 +16,8 @@
"3: jl 1b\n" \
" lhi %0,0\n" \
"4: sacf 768\n" \
EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
EX_TABLE(0b,4b) EX_TABLE(1b,4b) \
EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
: "=d" (ret), "=&d" (oldval), "=&d" (newval), \
"=m" (*uaddr) \
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \

View File

@ -222,7 +222,13 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p
return ret;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
if (rev >= mc->hdr.patch_id)
/*
* Allow application of the same revision to pick up SMT-specific
* changes even if the revision of the other SMT thread is already
* up-to-date.
*/
if (rev > mc->hdr.patch_id)
return ret;
if (!__apply_microcode_amd(mc)) {
@ -304,8 +310,12 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
/* Check whether we have saved a new patch already: */
if (*new_rev && rev < mc->hdr.patch_id) {
/*
* Check whether a new patch has been saved already. Also, allow application of
* the same revision in order to pick up SMT-thread-specific configuration even
* if the sibling SMT thread already has an up-to-date revision.
*/
if (*new_rev && rev <= mc->hdr.patch_id) {
if (!__apply_microcode_amd(mc)) {
*new_rev = mc->hdr.patch_id;
return;

View File

@ -668,7 +668,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
/* Otherwise, skip ahead to the user-specified starting frame: */
while (!unwind_done(state) &&
(!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
state->sp < (unsigned long)first_frame))
state->sp <= (unsigned long)first_frame))
unwind_next_frame(state);
return;

View File

@ -13,6 +13,7 @@
#include <linux/ratelimit.h>
#include <linux/edac.h>
#include <linux/ras.h>
#include <acpi/ghes.h>
#include <asm/cpu.h>
#include <asm/mce.h>
@ -141,8 +142,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
int cpu = mce->extcpu;
struct acpi_hest_generic_status *estatus, *tmp;
struct acpi_hest_generic_data *gdata;
const guid_t *fru_id = &guid_null;
char *fru_text = "";
const guid_t *fru_id;
char *fru_text;
guid_t *sec_type;
static u32 err_seq;
@ -163,17 +164,23 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
/* log event via trace */
err_seq++;
gdata = (struct acpi_hest_generic_data *)(tmp + 1);
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
fru_id = (guid_t *)gdata->fru_id;
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
fru_text = gdata->fru_text;
sec_type = (guid_t *)gdata->section_type;
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem = (void *)(gdata + 1);
if (gdata->error_data_length >= sizeof(*mem))
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
(u8)gdata->error_severity);
apei_estatus_for_each_section(tmp, gdata) {
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
fru_id = (guid_t *)gdata->fru_id;
else
fru_id = &guid_null;
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
fru_text = gdata->fru_text;
else
fru_text = "";
sec_type = (guid_t *)gdata->section_type;
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem = (void *)(gdata + 1);
if (gdata->error_data_length >= sizeof(*mem))
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
(u8)gdata->error_severity);
}
}
out:

View File

@ -447,6 +447,70 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
},
},
/*
* More Tongfang devices with the same issue as the Clevo NL5xRU and
* NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description above.
*/
{
.callback = video_detect_force_native,
.ident = "TongFang GKxNRxx",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GKxNRxx"),
},
},
{
.callback = video_detect_force_native,
.ident = "TongFang GKxNRxx",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A1650TI"),
},
},
{
.callback = video_detect_force_native,
.ident = "TongFang GKxNRxx",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A2060"),
},
},
{
.callback = video_detect_force_native,
.ident = "TongFang GKxNRxx",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A1650TI"),
},
},
{
.callback = video_detect_force_native,
.ident = "TongFang GKxNRxx",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A2060"),
},
},
{
.callback = video_detect_force_native,
.ident = "TongFang GMxNGxx",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxNGxx"),
},
},
{
.callback = video_detect_force_native,
.ident = "TongFang GMxZGxx",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxZGxx"),
},
},
{
.callback = video_detect_force_native,
.ident = "TongFang GMxRGxx",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
},
},
/*
* Desktops which falsely report a backlight and which our heuristics
* for this do not catch.

View File

@ -265,7 +265,7 @@ enum {
PCS_7 = 0x94, /* 7+ port PCS (Denverton) */
/* em constants */
EM_MAX_SLOTS = 8,
EM_MAX_SLOTS = SATA_PMP_MAX_PORTS,
EM_MAX_RETRY = 5,
/* em_ctl bits */

View File

@ -1250,4 +1250,4 @@ module_platform_driver(imx_ahci_driver);
MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ahci:imx");
MODULE_ALIAS("platform:" DRV_NAME);

View File

@ -2459,6 +2459,10 @@ static int genpd_iterate_idle_states(struct device_node *dn,
np = it.node;
if (!of_match_node(idle_state_match, np))
continue;
if (!of_device_is_available(np))
continue;
if (states) {
ret = genpd_parse_state(&states[i], np);
if (ret) {

View File

@ -74,8 +74,9 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
return ret;
}
static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
static enum drm_mode_status
mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);

View File

@ -200,6 +200,12 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
return -EINVAL;
priv = dev->dev_private;
if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
DRM_DEV_ERROR(dev->dev, "too many bridges\n");
return -ENOSPC;
}
msm_dsi->dev = dev;
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);

View File

@ -299,6 +299,11 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
struct platform_device *pdev = hdmi->pdev;
int ret;
if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
DRM_DEV_ERROR(dev->dev, "too many bridges\n");
return -ENOSPC;
}
hdmi->dev = dev;
hdmi->encoder = encoder;

View File

@ -343,7 +343,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
magicmouse_raw_event(hdev, report, data + 2, data[1]);
magicmouse_raw_event(hdev, report, data + 2 + data[1],
size - 2 - data[1]);
break;
return 0;
default:
return 0;
}

View File

@ -59,9 +59,6 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
#define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id)
#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
#ifdef CONFIG_SMP
#define for_each_sibling(i, cpu) \
for_each_cpu(i, topology_sibling_cpumask(cpu))
@ -104,6 +101,8 @@ struct temp_data {
struct platform_data {
struct device *hwmon_dev;
u16 pkg_id;
u16 cpu_map[NUM_REAL_CORES];
struct ida ida;
struct cpumask cpumask;
struct temp_data *core_data[MAX_CORE_DATA];
struct device_attribute name_attr;
@ -454,7 +453,7 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
MSR_IA32_THERM_STATUS;
tdata->is_pkg_data = pkg_flag;
tdata->cpu = cpu;
tdata->cpu_core_id = TO_CORE_ID(cpu);
tdata->cpu_core_id = topology_core_id(cpu);
tdata->attr_size = MAX_CORE_ATTRS;
mutex_init(&tdata->update_lock);
return tdata;
@ -467,7 +466,7 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
struct platform_data *pdata = platform_get_drvdata(pdev);
struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 eax, edx;
int err, attr_no;
int err, index, attr_no;
/*
* Find attr number for sysfs:
@ -475,14 +474,26 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
* The attr number is always core id + 2
* The Pkgtemp will always show up as temp1_*, if available
*/
attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu);
if (pkg_flag) {
attr_no = PKG_SYSFS_ATTR_NO;
} else {
index = ida_alloc(&pdata->ida, GFP_KERNEL);
if (index < 0)
return index;
pdata->cpu_map[index] = topology_core_id(cpu);
attr_no = index + BASE_SYSFS_ATTR_NO;
}
if (attr_no > MAX_CORE_DATA - 1)
return -ERANGE;
if (attr_no > MAX_CORE_DATA - 1) {
err = -ERANGE;
goto ida_free;
}
tdata = init_temp_data(cpu, pkg_flag);
if (!tdata)
return -ENOMEM;
if (!tdata) {
err = -ENOMEM;
goto ida_free;
}
/* Test if we can access the status register */
err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
@ -518,6 +529,9 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
exit_free:
pdata->core_data[attr_no] = NULL;
kfree(tdata);
ida_free:
if (!pkg_flag)
ida_free(&pdata->ida, index);
return err;
}
@ -537,6 +551,9 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx)
kfree(pdata->core_data[indx]);
pdata->core_data[indx] = NULL;
if (indx >= BASE_SYSFS_ATTR_NO)
ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO);
}
static int coretemp_probe(struct platform_device *pdev)
@ -550,6 +567,7 @@ static int coretemp_probe(struct platform_device *pdev)
return -ENOMEM;
pdata->pkg_id = pdev->id;
ida_init(&pdata->ida);
platform_set_drvdata(pdev, pdata);
pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
@ -566,6 +584,7 @@ static int coretemp_remove(struct platform_device *pdev)
if (pdata->core_data[i])
coretemp_remove_core(pdata, i);
ida_destroy(&pdata->ida);
return 0;
}
@ -660,7 +679,7 @@ static int coretemp_cpu_offline(unsigned int cpu)
struct platform_device *pdev = coretemp_get_pdev(cpu);
struct platform_data *pd;
struct temp_data *tdata;
int indx, target;
int i, indx = -1, target;
/*
* Don't execute this on suspend as the device remove locks
@ -673,12 +692,19 @@ static int coretemp_cpu_offline(unsigned int cpu)
if (!pdev)
return 0;
/* The core id is too big, just return */
indx = TO_ATTR_NO(cpu);
if (indx > MAX_CORE_DATA - 1)
pd = platform_get_drvdata(pdev);
for (i = 0; i < NUM_REAL_CORES; i++) {
if (pd->cpu_map[i] == topology_core_id(cpu)) {
indx = i + BASE_SYSFS_ATTR_NO;
break;
}
}
/* Too many cores and this core is not populated, just return */
if (indx < 0)
return 0;
pd = platform_get_drvdata(pdev);
tdata = pd->core_data[indx];
cpumask_clear_cpu(cpu, &pd->cpumask);

View File

@ -866,7 +866,7 @@ static int tsl2583_probe(struct i2c_client *clientp,
TSL2583_POWER_OFF_DELAY_MS);
pm_runtime_use_autosuspend(&clientp->dev);
ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
ret = iio_device_register(indio_dev);
if (ret) {
dev_err(&clientp->dev, "%s: iio registration failed\n",
__func__);

View File

@ -2800,6 +2800,7 @@ static int __init si_domain_init(int hw)
if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
domain_exit(si_domain);
si_domain = NULL;
return -EFAULT;
}
@ -3502,6 +3503,10 @@ free_iommu:
disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
}
if (si_domain) {
domain_exit(si_domain);
si_domain = NULL;
}
kfree(g_iommus);

View File

@ -155,6 +155,8 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
else
return NULL;
fmt = find_format(inst, pixmp->pixelformat, f->type);
if (!fmt)
return NULL;
}
pixmp->width = clamp(pixmp->width, frame_width_min(inst),

View File

@ -297,6 +297,28 @@ static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a
return vivid_vid_out_g_fbuf(file, fh, a);
}
/*
* Only support the framebuffer of one of the vivid instances.
* Anything else is rejected.
*/
bool vivid_validate_fb(const struct v4l2_framebuffer *a)
{
struct vivid_dev *dev;
int i;
for (i = 0; i < n_devs; i++) {
dev = vivid_devs[i];
if (!dev || !dev->video_pbase)
continue;
if ((unsigned long)a->base == dev->video_pbase &&
a->fmt.width <= dev->display_width &&
a->fmt.height <= dev->display_height &&
a->fmt.bytesperline <= dev->display_byte_stride)
return true;
}
return false;
}
static int vidioc_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a)
{
struct video_device *vdev = video_devdata(file);

View File

@ -551,4 +551,6 @@ static inline bool vivid_is_hdmi_out(const struct vivid_dev *dev)
return dev->output_type[dev->output] == HDMI;
}
bool vivid_validate_fb(const struct v4l2_framebuffer *a);
#endif

View File

@ -441,6 +441,12 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
dev->crop_cap = dev->src_rect;
dev->crop_bounds_cap = dev->src_rect;
if (dev->bitmap_cap &&
(dev->compose_cap.width != dev->crop_cap.width ||
dev->compose_cap.height != dev->crop_cap.height)) {
vfree(dev->bitmap_cap);
dev->bitmap_cap = NULL;
}
dev->compose_cap = dev->crop_cap;
if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap))
dev->compose_cap.height /= 2;
@ -871,6 +877,8 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_rect *crop = &dev->crop_cap;
struct v4l2_rect *compose = &dev->compose_cap;
unsigned orig_compose_w = compose->width;
unsigned orig_compose_h = compose->height;
unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
int ret;
@ -987,17 +995,17 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
s->r.height /= factor;
}
v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
if (dev->bitmap_cap && (compose->width != s->r.width ||
compose->height != s->r.height)) {
vfree(dev->bitmap_cap);
dev->bitmap_cap = NULL;
}
*compose = s->r;
break;
default:
return -EINVAL;
}
if (dev->bitmap_cap && (compose->width != orig_compose_w ||
compose->height != orig_compose_h)) {
vfree(dev->bitmap_cap);
dev->bitmap_cap = NULL;
}
tpg_s_crop_compose(&dev->tpg, crop, compose);
return 0;
}
@ -1240,7 +1248,14 @@ int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
return -EINVAL;
if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8)
return -EINVAL;
if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
if (a->fmt.bytesperline > a->fmt.sizeimage / a->fmt.height)
return -EINVAL;
/*
* Only support the framebuffer of one of the vivid instances.
* Anything else is rejected.
*/
if (!vivid_validate_fb(a))
return -EINVAL;
dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base);

View File

@ -161,6 +161,20 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
(bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
(!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
return false;
/* sanity checks for the blanking timings */
if (!bt->interlaced &&
(bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch))
return false;
if (bt->hfrontporch > 2 * bt->width ||
bt->hsync > 1024 || bt->hbackporch > 1024)
return false;
if (bt->vfrontporch > 4096 ||
bt->vsync > 128 || bt->vbackporch > 4096)
return false;
if (bt->interlaced && (bt->il_vfrontporch > 4096 ||
bt->il_vsync > 128 || bt->il_vbackporch > 4096))
return false;
return fnc == NULL || fnc(t, fnc_handle);
}
EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings);

View File

@ -445,19 +445,14 @@ int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
struct v4l2_buffer *buf)
{
struct vb2_queue *vq;
int ret = 0;
unsigned int i;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
ret = vb2_querybuf(vq, buf);
/* Adjust MMAP memory offsets for the CAPTURE queue */
if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
unsigned int i;
for (i = 0; i < buf->length; ++i)
buf->m.planes[i].m.mem_offset
+= DST_QUEUE_OFF_BASE;
@ -465,8 +460,23 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
buf->m.offset += DST_QUEUE_OFF_BASE;
}
}
}
return ret;
int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
ret = vb2_querybuf(vq, buf);
if (ret)
return ret;
/* Adjust MMAP memory offsets for the CAPTURE queue */
v4l2_m2m_adjust_mem_offset(vq, buf);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
@ -478,10 +488,15 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
ret = vb2_qbuf(vq, buf);
if (!ret)
v4l2_m2m_try_schedule(m2m_ctx);
if (ret)
return ret;
return ret;
/* Adjust MMAP memory offsets for the CAPTURE queue */
v4l2_m2m_adjust_mem_offset(vq, buf);
v4l2_m2m_try_schedule(m2m_ctx);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
@ -489,9 +504,17 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
if (ret)
return ret;
/* Adjust MMAP memory offsets for the CAPTURE queue */
v4l2_m2m_adjust_mem_offset(vq, buf);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
@ -503,10 +526,15 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
ret = vb2_prepare_buf(vq, buf);
if (!ret)
v4l2_m2m_try_schedule(m2m_ctx);
if (ret)
return ret;
return ret;
/* Adjust MMAP memory offsets for the CAPTURE queue */
v4l2_m2m_adjust_mem_offset(vq, buf);
v4l2_m2m_try_schedule(m2m_ctx);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);

View File

@ -264,7 +264,8 @@ static void sdio_release_func(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
sdio_free_func_cis(func);
if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO))
sdio_free_func_cis(func);
kfree(func->info);
kfree(func->tmpbuf);

View File

@ -336,14 +336,14 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
&mscan_clksrc);
if (!priv->can.clock.freq) {
dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
goto exit_free_mscan;
goto exit_put_clock;
}
err = register_mscandev(dev, mscan_clksrc);
if (err) {
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
goto exit_free_mscan;
goto exit_put_clock;
}
dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
@ -351,7 +351,9 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
return 0;
exit_free_mscan:
exit_put_clock:
if (data->put_clock)
data->put_clock(ofdev);
free_candev(dev);
exit_dispose_irq:
irq_dispose_mapping(irq);

View File

@ -1079,7 +1079,7 @@ static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id)
struct rcar_canfd_global *gpriv = dev_id;
struct net_device *ndev;
struct rcar_canfd_channel *priv;
u32 sts, gerfl;
u32 sts, cc, gerfl;
u32 ch, ridx;
/* Global error interrupts still indicate a condition specific
@ -1097,7 +1097,9 @@ static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id)
/* Handle Rx interrupts */
sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
if (likely(sts & RCANFD_RFSTS_RFIF)) {
cc = rcar_canfd_read(priv->base, RCANFD_RFCC(ridx));
if (likely(sts & RCANFD_RFSTS_RFIF &&
cc & RCANFD_RFCC_RFIE)) {
if (napi_schedule_prep(&priv->napi)) {
/* Disable Rx FIFO interrupts */
rcar_canfd_clear_bit(priv->base,

View File

@ -1845,7 +1845,7 @@ static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv)
{
int err;
init_completion(&priv->start_comp);
reinit_completion(&priv->start_comp);
err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ,
priv->channel);
@ -1863,7 +1863,7 @@ static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv)
{
int err;
init_completion(&priv->stop_comp);
reinit_completion(&priv->stop_comp);
/* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT
* see comment in kvaser_usb_hydra_update_state()

View File

@ -1324,7 +1324,7 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
{
int err;
init_completion(&priv->start_comp);
reinit_completion(&priv->start_comp);
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP,
priv->channel);
@ -1342,7 +1342,7 @@ static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
{
int err;
init_completion(&priv->stop_comp);
reinit_completion(&priv->stop_comp);
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
priv->channel);

View File

@ -238,6 +238,7 @@ enum xgbe_sfp_speed {
#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d
#define XGBE_SFP_BASE_BR_10GBE_MIN 0x64
#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68
#define XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX 0x78
#define XGBE_SFP_BASE_CU_CABLE_LEN 18
@ -283,6 +284,8 @@ struct xgbe_sfp_eeprom {
#define XGBE_BEL_FUSE_VENDOR "BEL-FUSE "
#define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 "
#define XGBE_MOLEX_VENDOR "Molex Inc. "
struct xgbe_sfp_ascii {
union {
char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
@ -833,7 +836,11 @@ static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
break;
case XGBE_SFP_SPEED_10000:
min = XGBE_SFP_BASE_BR_10GBE_MIN;
max = XGBE_SFP_BASE_BR_10GBE_MAX;
if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
XGBE_MOLEX_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN) == 0)
max = XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX;
else
max = XGBE_SFP_BASE_BR_10GBE_MAX;
break;
default:
return false;
@ -1138,7 +1145,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
}
/* Determine the type of SFP */
if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
if (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE &&
xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
phy_data->sfp_base = XGBE_SFP_BASE_10000_SR;
else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR)
phy_data->sfp_base = XGBE_SFP_BASE_10000_LR;
@ -1154,9 +1164,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
phy_data->sfp_base = XGBE_SFP_BASE_1000_CX;
else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T)
phy_data->sfp_base = XGBE_SFP_BASE_1000_T;
else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
switch (phy_data->sfp_base) {
case XGBE_SFP_BASE_1000_T:

View File

@ -423,8 +423,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
hdev->cls_dev.release = hnae_release;
(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
ret = device_register(&hdev->cls_dev);
if (ret)
if (ret) {
put_device(&hdev->cls_dev);
return ret;
}
__module_get(THIS_MODULE);

View File

@ -2916,6 +2916,7 @@ static struct device *ehea_register_port(struct ehea_port *port,
ret = of_device_register(&port->ofdev);
if (ret) {
pr_err("failed to register device. ret=%d\n", ret);
put_device(&port->ofdev.dev);
goto out;
}

View File

@ -2699,10 +2699,17 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
if (cmd->flow_type == TCP_V4_FLOW ||
cmd->flow_type == UDP_V4_FLOW) {
if (i_set & I40E_L3_SRC_MASK)
cmd->data |= RXH_IP_SRC;
if (i_set & I40E_L3_DST_MASK)
cmd->data |= RXH_IP_DST;
if (hw->mac.type == I40E_MAC_X722) {
if (i_set & I40E_X722_L3_SRC_MASK)
cmd->data |= RXH_IP_SRC;
if (i_set & I40E_X722_L3_DST_MASK)
cmd->data |= RXH_IP_DST;
} else {
if (i_set & I40E_L3_SRC_MASK)
cmd->data |= RXH_IP_SRC;
if (i_set & I40E_L3_DST_MASK)
cmd->data |= RXH_IP_DST;
}
} else if (cmd->flow_type == TCP_V6_FLOW ||
cmd->flow_type == UDP_V6_FLOW) {
if (i_set & I40E_L3_V6_SRC_MASK)
@ -3009,12 +3016,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
/**
* i40e_get_rss_hash_bits - Read RSS Hash bits from register
* @hw: hw structure
* @nfc: pointer to user request
* @i_setc: bits currently set
*
* Returns value of bits to be set per user request
**/
static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
struct ethtool_rxnfc *nfc,
u64 i_setc)
{
u64 i_set = i_setc;
u64 src_l3 = 0, dst_l3 = 0;
@ -3033,8 +3043,13 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
dst_l3 = I40E_L3_V6_DST_MASK;
} else if (nfc->flow_type == TCP_V4_FLOW ||
nfc->flow_type == UDP_V4_FLOW) {
src_l3 = I40E_L3_SRC_MASK;
dst_l3 = I40E_L3_DST_MASK;
if (hw->mac.type == I40E_MAC_X722) {
src_l3 = I40E_X722_L3_SRC_MASK;
dst_l3 = I40E_X722_L3_DST_MASK;
} else {
src_l3 = I40E_L3_SRC_MASK;
dst_l3 = I40E_L3_DST_MASK;
}
} else {
/* Any other flow type are not supported here */
return i_set;
@ -3052,6 +3067,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
return i_set;
}
#define FLOW_PCTYPES_SIZE 64
/**
* i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
* @pf: pointer to the physical function struct
@ -3064,9 +3080,11 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
struct i40e_hw *hw = &pf->hw;
u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
u8 flow_pctype = 0;
DECLARE_BITMAP(flow_pctypes, FLOW_PCTYPES_SIZE);
u64 i_set, i_setc;
bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE);
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev,
"Change of RSS hash input set is not supported when MFP mode is enabled\n");
@ -3082,36 +3100,35 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
switch (nfc->flow_type) {
case TCP_V4_FLOW:
flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
hena |=
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case TCP_V6_FLOW:
flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
hena |=
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
hena |=
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case UDP_V4_FLOW:
flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
hena |=
BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
flow_pctypes);
set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
flow_pctypes);
}
hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
break;
case UDP_V6_FLOW:
flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
hena |=
BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
flow_pctypes);
set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
flow_pctypes);
}
hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
break;
case AH_ESP_V4_FLOW:
@ -3144,17 +3161,20 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
if (flow_pctype) {
i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
flow_pctype)) |
((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
flow_pctype)) << 32);
i_set = i40e_get_rss_hash_bits(nfc, i_setc);
i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype),
(u32)i_set);
i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype),
(u32)(i_set >> 32));
hena |= BIT_ULL(flow_pctype);
if (bitmap_weight(flow_pctypes, FLOW_PCTYPES_SIZE)) {
u8 flow_id;
for_each_set_bit(flow_id, flow_pctypes, FLOW_PCTYPES_SIZE) {
i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id)) |
((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id)) << 32);
i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc);
i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id),
(u32)i_set);
i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id),
(u32)(i_set >> 32));
hena |= BIT_ULL(flow_id);
}
}
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);

View File

@ -1462,6 +1462,10 @@ struct i40e_lldp_variables {
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
/* INPUT SET MASK for RSS, flow director, and flexible payload */
#define I40E_X722_L3_SRC_SHIFT 49
#define I40E_X722_L3_SRC_MASK (0x3ULL << I40E_X722_L3_SRC_SHIFT)
#define I40E_X722_L3_DST_SHIFT 41
#define I40E_X722_L3_DST_MASK (0x3ULL << I40E_X722_L3_DST_SHIFT)
#define I40E_L3_SRC_SHIFT 47
#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
#define I40E_L3_V6_SRC_SHIFT 43

View File

@ -1227,10 +1227,12 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
return true;
/* If the VFs have been disabled, this means something else is
* resetting the VF, so we shouldn't continue.
*/
if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
/* Bail out if VFs are disabled. */
if (test_bit(__I40E_VF_DISABLE, pf->state))
return true;
/* If VF is being reset already we don't need to continue. */
if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
return true;
i40e_trigger_vf_reset(vf, flr);
@ -1267,7 +1269,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
i40e_cleanup_reset_vf(vf);
i40e_flush(hw);
clear_bit(__I40E_VF_DISABLE, pf->state);
clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
return true;
}
@ -1300,8 +1302,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
return false;
/* Begin reset on all VFs at once */
for (v = 0; v < pf->num_alloc_vfs; v++)
i40e_trigger_vf_reset(&pf->vf[v], flr);
for (v = 0; v < pf->num_alloc_vfs; v++) {
vf = &pf->vf[v];
/* If VF is being reset no need to trigger reset again */
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
i40e_trigger_vf_reset(&pf->vf[v], flr);
}
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
@ -1317,9 +1323,11 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
*/
while (v < pf->num_alloc_vfs) {
vf = &pf->vf[v];
reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
break;
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
break;
}
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
@ -1347,6 +1355,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
if (pf->vf[v].lan_vsi_idx == 0)
continue;
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
}
@ -1358,6 +1370,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
if (pf->vf[v].lan_vsi_idx == 0)
continue;
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
}
@ -1367,8 +1383,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
mdelay(50);
/* Finish the reset on each VF */
for (v = 0; v < pf->num_alloc_vfs; v++)
for (v = 0; v < pf->num_alloc_vfs; v++) {
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
i40e_cleanup_reset_vf(&pf->vf[v]);
}
i40e_flush(hw);
clear_bit(__I40E_VF_DISABLE, pf->state);

View File

@ -37,6 +37,7 @@ enum i40e_vf_states {
I40E_VF_STATE_MC_PROMISC,
I40E_VF_STATE_UC_PROMISC,
I40E_VF_STATE_PRE_ENABLE,
I40E_VF_STATE_RESETTING
};
/* VF capabilities */

View File

@ -480,7 +480,6 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
dev_kfree_skb_any(skb);
netdev_err(dev, "tx ring full\n");
netif_tx_stop_queue(txq);
return NETDEV_TX_BUSY;

View File

@ -117,7 +117,6 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
struct xfrm_replay_state_esn *replay_esn;
u32 seq_bottom;
u8 overlap;
u32 *esn;
if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
sa_entry->esn_state.trigger = 0;
@ -130,11 +129,9 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
htonl(seq_bottom));
esn = &sa_entry->esn_state.esn;
sa_entry->esn_state.trigger = 1;
if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
++(*esn);
sa_entry->esn_state.overlap = 0;
return true;
} else if (unlikely(!overlap &&

View File

@ -6936,7 +6936,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
char banner[sizeof(version)];
struct ksz_switch *sw = NULL;
result = pci_enable_device(pdev);
result = pcim_enable_device(pdev);
if (result)
return result;

View File

@ -1549,11 +1549,13 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
ret = PTR_ERR(priv->phydev);
dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
priv->phydev = NULL;
mdiobus_unregister(bus);
return -ENODEV;
}
ret = phy_device_register(priv->phydev);
if (ret) {
phy_device_free(priv->phydev);
mdiobus_unregister(bus);
dev_err(priv->dev,
"phy_device_register err(%d)\n", ret);

View File

@ -950,6 +950,9 @@ struct net_device_context {
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
/* completion variable to confirm vf association */
struct completion vf_add;
};
/* Per channel data */

View File

@ -1227,6 +1227,10 @@ static void netvsc_send_vf(struct net_device *ndev,
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
if (net_device_ctx->vf_alloc)
complete(&net_device_ctx->vf_add);
netdev_info(ndev, "VF slot %u %s\n",
net_device_ctx->vf_serial,
net_device_ctx->vf_alloc ? "added" : "removed");

View File

@ -2121,6 +2121,7 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
{
struct device *parent = vf_netdev->dev.parent;
struct net_device_context *ndev_ctx;
struct net_device *ndev;
struct pci_dev *pdev;
u32 serial;
@ -2147,6 +2148,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
return hv_get_drvdata(ndev_ctx->device_ctx);
}
/* Fallback path to check synthetic vf with
* help of mac addr
*/
list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
ndev = hv_get_drvdata(ndev_ctx->device_ctx);
if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
netdev_notice(vf_netdev,
"falling back to mac addr based matching\n");
return ndev;
}
}
netdev_notice(vf_netdev,
"no netdev found for vf serial:%u\n", serial);
return NULL;
@ -2216,6 +2229,11 @@ static int netvsc_vf_changed(struct net_device *vf_netdev)
if (!netvsc_dev)
return NOTIFY_DONE;
if (vf_is_up && !net_device_ctx->vf_alloc) {
netdev_info(ndev, "Waiting for the VF association from host\n");
wait_for_completion(&net_device_ctx->vf_add);
}
netvsc_switch_datapath(ndev, vf_is_up);
netdev_info(ndev, "Data path switched %s VF: %s\n",
vf_is_up ? "to" : "from", vf_netdev->name);
@ -2237,6 +2255,7 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
reinit_completion(&net_device_ctx->vf_add);
netdev_rx_handler_unregister(vf_netdev);
netdev_upper_dev_unlink(vf_netdev, ndev);
RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
@ -2274,6 +2293,7 @@ static int netvsc_probe(struct hv_device *dev,
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
init_completion(&net_device_ctx->vf_add);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);

View File

@ -777,6 +777,13 @@ static const struct usb_device_id products[] = {
},
#endif
/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,

View File

@ -5327,6 +5327,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)},
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},

View File

@ -388,6 +388,15 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Kingston DataTraveler 3.0 */
{ USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
/* NVIDIA Jetson devices in Force Recovery mode */
{ USB_DEVICE(0x0955, 0x7018), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x0955, 0x7019), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x0955, 0x7418), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x0955, 0x7721), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x0955, 0x7c18), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x0955, 0x7e19), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x0955, 0x7f21), .driver_info = USB_QUIRK_RESET_RESUME },
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },

View File

@ -981,8 +981,8 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
}
/* always enable Interrupt on Missed ISOC */
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
if (!no_interrupt && !chain)
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
break;
case USB_ENDPOINT_XFER_BULK:
@ -2363,6 +2363,10 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
if (event->status & DEPEVT_STATUS_SHORT && !chain)
return 1;
if ((trb->ctrl & DWC3_TRB_CTRL_ISP_IMI) &&
DWC3_TRB_SIZE_TRBSTS(trb->size) == DWC3_TRBSTS_MISSED_ISOC)
return 1;
if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
(trb->ctrl & DWC3_TRB_CTRL_LST))
return 1;

View File

@ -151,6 +151,7 @@ static void bdc_uspc_disconnected(struct bdc *bdc, bool reinit)
bdc->delayed_status = false;
bdc->reinit = reinit;
bdc->test_mode = false;
usb_gadget_set_state(&bdc->gadget, USB_STATE_NOTATTACHED);
}
/* TNotify wkaeup timer */

View File

@ -906,15 +906,19 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
if (dev->eps[i].stream_info)
xhci_free_stream_info(xhci,
dev->eps[i].stream_info);
/* Endpoints on the TT/root port lists should have been removed
* when usb_disable_device() was called for the device.
* We can't drop them anyway, because the udev might have gone
* away by this point, and we can't tell what speed it was.
/*
* Endpoints are normally deleted from the bandwidth list when
* endpoints are dropped, before device is freed.
* If host is dying or being removed then endpoints aren't
* dropped cleanly, so delete the endpoint from list here.
* Only applicable for hosts with software bandwidth checking.
*/
if (!list_empty(&dev->eps[i].bw_endpoint_list))
xhci_warn(xhci, "Slot %u endpoint %u "
"not removed from BW list!\n",
slot_id, i);
if (!list_empty(&dev->eps[i].bw_endpoint_list)) {
list_del_init(&dev->eps[i].bw_endpoint_list);
xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n",
slot_id, i);
}
}
/* If this is a hub, free the TT(s) from the TT list */
xhci_free_tt_info(xhci, dev, slot_id);

View File

@ -242,8 +242,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_BROKEN_STREAMS;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI)
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
/*
* try to tame the ASMedia 1042 controller which reports 0.96
* but appears to behave more like 1.0
*/
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
xhci->quirks |= XHCI_BROKEN_STREAMS;
}
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) {
xhci->quirks |= XHCI_TRUST_TX_LENGTH;

View File

@ -100,7 +100,6 @@ struct ufx_data {
struct kref kref;
int fb_count;
bool virtualized; /* true when physical usb device not present */
struct delayed_work free_framebuffer_work;
atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */
atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
u8 *edid; /* null until we read edid from hw or get from sysfs */
@ -1119,15 +1118,24 @@ static void ufx_free(struct kref *kref)
{
struct ufx_data *dev = container_of(kref, struct ufx_data, kref);
/* this function will wait for all in-flight urbs to complete */
if (dev->urbs.count > 0)
ufx_free_urb_list(dev);
pr_debug("freeing ufx_data %p", dev);
kfree(dev);
}
static void ufx_ops_destory(struct fb_info *info)
{
struct ufx_data *dev = info->par;
int node = info->node;
/* Assume info structure is freed after this point */
framebuffer_release(info);
pr_debug("fb_info for /dev/fb%d has been freed", node);
/* release reference taken by kref_init in probe() */
kref_put(&dev->kref, ufx_free);
}
static void ufx_release_urb_work(struct work_struct *work)
{
struct urb_node *unode = container_of(work, struct urb_node,
@ -1136,14 +1144,9 @@ static void ufx_release_urb_work(struct work_struct *work)
up(&unode->dev->urbs.limit_sem);
}
static void ufx_free_framebuffer_work(struct work_struct *work)
static void ufx_free_framebuffer(struct ufx_data *dev)
{
struct ufx_data *dev = container_of(work, struct ufx_data,
free_framebuffer_work.work);
struct fb_info *info = dev->info;
int node = info->node;
unregister_framebuffer(info);
if (info->cmap.len != 0)
fb_dealloc_cmap(&info->cmap);
@ -1155,11 +1158,6 @@ static void ufx_free_framebuffer_work(struct work_struct *work)
dev->info = NULL;
/* Assume info structure is freed after this point */
framebuffer_release(info);
pr_debug("fb_info for /dev/fb%d has been freed", node);
/* ref taken in probe() as part of registering framebfufer */
kref_put(&dev->kref, ufx_free);
}
@ -1171,11 +1169,13 @@ static int ufx_ops_release(struct fb_info *info, int user)
{
struct ufx_data *dev = info->par;
mutex_lock(&disconnect_mutex);
dev->fb_count--;
/* We can't free fb_info here - fbmem will touch it when we return */
if (dev->virtualized && (dev->fb_count == 0))
schedule_delayed_work(&dev->free_framebuffer_work, HZ);
ufx_free_framebuffer(dev);
if ((dev->fb_count == 0) && (info->fbdefio)) {
fb_deferred_io_cleanup(info);
@ -1189,6 +1189,8 @@ static int ufx_ops_release(struct fb_info *info, int user)
kref_put(&dev->kref, ufx_free);
mutex_unlock(&disconnect_mutex);
return 0;
}
@ -1295,6 +1297,7 @@ static struct fb_ops ufx_ops = {
.fb_blank = ufx_ops_blank,
.fb_check_var = ufx_ops_check_var,
.fb_set_par = ufx_ops_set_par,
.fb_destroy = ufx_ops_destory,
};
/* Assumes &info->lock held by caller
@ -1678,9 +1681,6 @@ static int ufx_usb_probe(struct usb_interface *interface,
goto destroy_modedb;
}
INIT_DELAYED_WORK(&dev->free_framebuffer_work,
ufx_free_framebuffer_work);
retval = ufx_reg_read(dev, 0x3000, &id_rev);
check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval);
dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev);
@ -1753,10 +1753,12 @@ e_nomem:
static void ufx_usb_disconnect(struct usb_interface *interface)
{
struct ufx_data *dev;
struct fb_info *info;
mutex_lock(&disconnect_mutex);
dev = usb_get_intfdata(interface);
info = dev->info;
pr_debug("USB disconnect starting\n");
@ -1770,12 +1772,15 @@ static void ufx_usb_disconnect(struct usb_interface *interface)
/* if clients still have us open, will be freed on last close */
if (dev->fb_count == 0)
schedule_delayed_work(&dev->free_framebuffer_work, 0);
ufx_free_framebuffer(dev);
/* release reference taken by kref_init in probe() */
kref_put(&dev->kref, ufx_free);
/* this function will wait for all in-flight urbs to complete */
if (dev->urbs.count > 0)
ufx_free_urb_list(dev);
/* consider ufx_data freed */
pr_debug("freeing ufx_data %p", dev);
unregister_framebuffer(info);
mutex_unlock(&disconnect_mutex);
}

View File

@ -387,8 +387,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
for (i = 0; i < map->count; i++) {
if (map->map_ops[i].status == GNTST_okay) {
map->unmap_ops[i].handle = map->map_ops[i].handle;
if (!use_ptemod)
alloced++;
alloced++;
} else if (!err)
err = -EINVAL;
@ -397,8 +396,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
if (use_ptemod) {
if (map->kmap_ops[i].status == GNTST_okay) {
if (map->map_ops[i].status == GNTST_okay)
alloced++;
alloced++;
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
} else if (!err)
err = -EINVAL;
@ -414,20 +412,42 @@ static void __unmap_grant_pages_done(int result,
unsigned int i;
struct gntdev_grant_map *map = data->data;
unsigned int offset = data->unmap_ops - map->unmap_ops;
int successful_unmaps = 0;
int live_grants;
for (i = 0; i < data->count; i++) {
if (map->unmap_ops[offset + i].status == GNTST_okay &&
map->unmap_ops[offset + i].handle != -1)
successful_unmaps++;
WARN_ON(map->unmap_ops[offset+i].status &&
map->unmap_ops[offset+i].handle != -1);
pr_debug("unmap handle=%d st=%d\n",
map->unmap_ops[offset+i].handle,
map->unmap_ops[offset+i].status);
map->unmap_ops[offset+i].handle = -1;
if (use_ptemod) {
if (map->kunmap_ops[offset + i].status == GNTST_okay &&
map->kunmap_ops[offset + i].handle != -1)
successful_unmaps++;
WARN_ON(map->kunmap_ops[offset+i].status &&
map->kunmap_ops[offset+i].handle != -1);
pr_debug("kunmap handle=%u st=%d\n",
map->kunmap_ops[offset+i].handle,
map->kunmap_ops[offset+i].status);
map->kunmap_ops[offset+i].handle = -1;
}
}
/*
* Decrease the live-grant counter. This must happen after the loop to
* prevent premature reuse of the grants by gnttab_mmap().
*/
atomic_sub(data->count, &map->live_grants);
live_grants = atomic_sub_return(successful_unmaps, &map->live_grants);
if (WARN_ON(live_grants < 0))
pr_err("%s: live_grants became negative (%d) after unmapping %d pages!\n",
__func__, live_grants, successful_unmaps);
/* Release reference taken by __unmap_grant_pages */
gntdev_put_map(NULL, map);

View File

@ -136,6 +136,7 @@ struct share_check {
u64 root_objectid;
u64 inum;
int share_count;
bool have_delayed_delete_refs;
};
static inline int extent_is_shared(struct share_check *sc)
@ -760,16 +761,11 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
struct share_check *sc)
{
struct btrfs_delayed_ref_node *node;
struct btrfs_delayed_extent_op *extent_op = head->extent_op;
struct btrfs_key key;
struct btrfs_key tmp_op_key;
struct rb_node *n;
int count;
int ret = 0;
if (extent_op && extent_op->update_key)
btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
spin_lock(&head->lock);
for (n = rb_first(&head->ref_tree); n; n = rb_next(n)) {
node = rb_entry(n, struct btrfs_delayed_ref_node,
@ -796,10 +792,16 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
case BTRFS_TREE_BLOCK_REF_KEY: {
/* NORMAL INDIRECT METADATA backref */
struct btrfs_delayed_tree_ref *ref;
struct btrfs_key *key_ptr = NULL;
if (head->extent_op && head->extent_op->update_key) {
btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
key_ptr = &key;
}
ref = btrfs_delayed_node_to_tree_ref(node);
ret = add_indirect_ref(fs_info, preftrees, ref->root,
&tmp_op_key, ref->level + 1,
key_ptr, ref->level + 1,
node->bytenr, count, sc,
GFP_ATOMIC);
break;
@ -825,13 +827,22 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
key.offset = ref->offset;
/*
* Found a inum that doesn't match our known inum, we
* know it's shared.
* If we have a share check context and a reference for
* another inode, we can't exit immediately. This is
* because even if this is a BTRFS_ADD_DELAYED_REF
* reference we may find next a BTRFS_DROP_DELAYED_REF
* which cancels out this ADD reference.
*
* If this is a DROP reference and there was no previous
* ADD reference, then we need to signal that when we
* process references from the extent tree (through
* add_inline_refs() and add_keyed_refs()), we should
* not exit early if we find a reference for another
* inode, because one of the delayed DROP references
* may cancel that reference in the extent tree.
*/
if (sc && sc->inum && ref->objectid != sc->inum) {
ret = BACKREF_FOUND_SHARED;
goto out;
}
if (sc && count < 0)
sc->have_delayed_delete_refs = true;
ret = add_indirect_ref(fs_info, preftrees, ref->root,
&key, 0, node->bytenr, count, sc,
@ -861,7 +872,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
}
if (!ret)
ret = extent_is_shared(sc);
out:
spin_unlock(&head->lock);
return ret;
}
@ -965,7 +976,8 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
if (sc && sc->inum && key.objectid != sc->inum) {
if (sc && sc->inum && key.objectid != sc->inum &&
!sc->have_delayed_delete_refs) {
ret = BACKREF_FOUND_SHARED;
break;
}
@ -975,6 +987,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
ret = add_indirect_ref(fs_info, preftrees, root,
&key, 0, bytenr, count,
sc, GFP_NOFS);
break;
}
default:
@ -1064,7 +1077,8 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info,
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
if (sc && sc->inum && key.objectid != sc->inum) {
if (sc && sc->inum && key.objectid != sc->inum &&
!sc->have_delayed_delete_refs) {
ret = BACKREF_FOUND_SHARED;
break;
}
@ -1490,6 +1504,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
.root_objectid = root->objectid,
.inum = inum,
.share_count = 0,
.have_delayed_delete_refs = false,
};
tmp = ulist_alloc(GFP_NOFS);
@ -1528,6 +1543,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
break;
bytenr = node->val;
shared.share_count = 0;
shared.have_delayed_delete_refs = false;
cond_resched();
}

View File

@ -1513,8 +1513,11 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
mutex_lock(&kernfs_mutex);
kn = kernfs_find_ns(parent, name, ns);
if (kn)
if (kn) {
kernfs_get(kn);
__kernfs_remove(kn);
kernfs_put(kn);
}
mutex_unlock(&kernfs_mutex);

View File

@ -245,6 +245,7 @@ static int ocfs2_mknod(struct inode *dir,
handle_t *handle = NULL;
struct ocfs2_super *osb;
struct ocfs2_dinode *dirfe;
struct ocfs2_dinode *fe = NULL;
struct buffer_head *new_fe_bh = NULL;
struct inode *inode = NULL;
struct ocfs2_alloc_context *inode_ac = NULL;
@ -395,6 +396,7 @@ static int ocfs2_mknod(struct inode *dir,
goto leave;
}
fe = (struct ocfs2_dinode *) new_fe_bh->b_data;
if (S_ISDIR(mode)) {
status = ocfs2_fill_new_dir(osb, handle, dir, inode,
new_fe_bh, data_ac, meta_ac);
@ -460,8 +462,11 @@ static int ocfs2_mknod(struct inode *dir,
leave:
if (status < 0 && did_quota_inode)
dquot_free_inode(inode);
if (handle)
if (handle) {
if (status < 0 && fe)
ocfs2_set_links_count(fe, 0);
ocfs2_commit_trans(osb, handle);
}
ocfs2_inode_unlock(dir, 1);
if (did_block_signals)
@ -639,18 +644,9 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
return status;
}
status = __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
parent_fe_bh, handle, inode_ac,
fe_blkno, suballoc_loc, suballoc_bit);
if (status < 0) {
u64 bg_blkno = ocfs2_which_suballoc_group(fe_blkno, suballoc_bit);
int tmp = ocfs2_free_suballoc_bits(handle, inode_ac->ac_inode,
inode_ac->ac_bh, suballoc_bit, bg_blkno, 1);
if (tmp)
mlog_errno(tmp);
}
return status;
}
static int ocfs2_mkdir(struct inode *dir,
@ -2031,8 +2027,11 @@ bail:
ocfs2_clusters_to_bytes(osb->sb, 1));
if (status < 0 && did_quota_inode)
dquot_free_inode(inode);
if (handle)
if (handle) {
if (status < 0 && fe)
ocfs2_set_links_count(fe, 0);
ocfs2_commit_trans(osb, handle);
}
ocfs2_inode_unlock(dir, 1);
if (did_block_signals)

View File

@ -905,7 +905,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
last_vma_end = vma->vm_end;
}
show_vma_header_prefix(m, priv->mm->mmap->vm_start,
show_vma_header_prefix(m, priv->mm->mmap ? priv->mm->mmap->vm_start : 0,
last_vma_end, 0, 0, 0, 0);
seq_pad(m, ' ');
seq_puts(m, "[rollup]\n");

View File

@ -1428,7 +1428,8 @@ struct v4l2_bt_timings {
((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
#define V4L2_DV_BT_BLANKING_HEIGHT(bt) \
((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \
(bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch)
((bt)->interlaced ? \
((bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch) : 0))
#define V4L2_DV_BT_FRAME_HEIGHT(bt) \
((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))

View File

@ -626,7 +626,7 @@ static void power_down(void)
int error;
if (hibernation_mode == HIBERNATION_SUSPEND) {
error = suspend_devices_and_enter(PM_SUSPEND_MEM);
error = suspend_devices_and_enter(mem_sleep_current);
if (error) {
hibernation_mode = hibernation_ops ?
HIBERNATION_PLATFORM :

View File

@ -2116,11 +2116,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
if (!page)
goto out_uncharge_cgroup;
spin_lock(&hugetlb_lock);
if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
SetPagePrivate(page);
h->resv_huge_pages--;
}
spin_lock(&hugetlb_lock);
list_move(&page->lru, &h->hugepage_activelist);
/* Fall through */
}

View File

@ -222,11 +222,12 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
if (!page)
return -ENOMEM;
for (p = page, len = 0; len < nbytes; p++, len++) {
for (p = page, len = 0; len < nbytes; p++) {
if (get_user(*p, buff++)) {
free_page((unsigned long)page);
return -EFAULT;
}
len += 1;
if (*p == '\0' || *p == '\n')
break;
}

View File

@ -112,6 +112,7 @@ static int net_assign_generic(struct net *net, unsigned int id, void *data)
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
struct net_generic *ng;
int err = -ENOMEM;
void *data = NULL;
@ -130,7 +131,13 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
if (!err)
return 0;
if (ops->id && ops->size) {
cleanup:
ng = rcu_dereference_protected(net->gen,
lockdep_is_held(&pernet_ops_rwsem));
ng->ptr[*ops->id] = NULL;
}
kfree(data);
out:

View File

@ -518,8 +518,10 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
if (err < 0)
goto out;
if (addr->family != AF_IEEE802154)
if (addr->family != AF_IEEE802154) {
err = -EINVAL;
goto out;
}
ieee802154_addr_from_sa(&haddr, &addr->addr);
dev = ieee802154_get_dev(sock_net(sk), &haddr);

View File

@ -2035,7 +2035,8 @@ void tcp_enter_loss(struct sock *sk)
*/
static bool tcp_check_sack_reneging(struct sock *sk, int flag)
{
if (flag & FLAG_SACK_RENEGING) {
if (flag & FLAG_SACK_RENEGING &&
flag & FLAG_SND_UNA_ADVANCED) {
struct tcp_sock *tp = tcp_sk(sk);
unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
msecs_to_jiffies(10));

View File

@ -164,7 +164,8 @@ static void kcm_rcv_ready(struct kcm_sock *kcm)
/* Buffer limit is okay now, add to ready list */
list_add_tail(&kcm->wait_rx_list,
&kcm->mux->kcm_rx_waiters);
kcm->rx_wait = true;
/* paired with lockless reads in kcm_rfree() */
WRITE_ONCE(kcm->rx_wait, true);
}
static void kcm_rfree(struct sk_buff *skb)
@ -180,7 +181,7 @@ static void kcm_rfree(struct sk_buff *skb)
/* For reading rx_wait and rx_psock without holding lock */
smp_mb__after_atomic();
if (!kcm->rx_wait && !kcm->rx_psock &&
if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) &&
sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
spin_lock_bh(&mux->rx_lock);
kcm_rcv_ready(kcm);
@ -239,7 +240,8 @@ try_again:
if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
/* Should mean socket buffer full */
list_del(&kcm->wait_rx_list);
kcm->rx_wait = false;
/* paired with lockless reads in kcm_rfree() */
WRITE_ONCE(kcm->rx_wait, false);
/* Commit rx_wait to read in kcm_free */
smp_wmb();
@ -282,10 +284,12 @@ static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
kcm = list_first_entry(&mux->kcm_rx_waiters,
struct kcm_sock, wait_rx_list);
list_del(&kcm->wait_rx_list);
kcm->rx_wait = false;
/* paired with lockless reads in kcm_rfree() */
WRITE_ONCE(kcm->rx_wait, false);
psock->rx_kcm = kcm;
kcm->rx_psock = psock;
/* paired with lockless reads in kcm_rfree() */
WRITE_ONCE(kcm->rx_psock, psock);
spin_unlock_bh(&mux->rx_lock);
@ -312,7 +316,8 @@ static void unreserve_rx_kcm(struct kcm_psock *psock,
spin_lock_bh(&mux->rx_lock);
psock->rx_kcm = NULL;
kcm->rx_psock = NULL;
/* paired with lockless reads in kcm_rfree() */
WRITE_ONCE(kcm->rx_psock, NULL);
/* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
* kcm_rfree
@ -1244,7 +1249,8 @@ static void kcm_recv_disable(struct kcm_sock *kcm)
if (!kcm->rx_psock) {
if (kcm->rx_wait) {
list_del(&kcm->wait_rx_list);
kcm->rx_wait = false;
/* paired with lockless reads in kcm_rfree() */
WRITE_ONCE(kcm->rx_wait, false);
}
requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
@ -1797,7 +1803,8 @@ static void kcm_done(struct kcm_sock *kcm)
if (kcm->rx_wait) {
list_del(&kcm->wait_rx_list);
kcm->rx_wait = false;
/* paired with lockless reads in kcm_rfree() */
WRITE_ONCE(kcm->rx_wait, false);
}
/* Move any pending receive messages to other kcm sockets */
requeue_rx_msgs(mux, &sk->sk_receive_queue);

View File

@ -140,7 +140,7 @@ static int
ieee802154_parse_frame_start(struct sk_buff *skb, struct ieee802154_hdr *hdr)
{
int hlen;
struct ieee802154_mac_cb *cb = mac_cb_init(skb);
struct ieee802154_mac_cb *cb = mac_cb(skb);
skb_reset_mac_header(skb);
@ -302,8 +302,9 @@ void
ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi)
{
struct ieee802154_local *local = hw_to_local(hw);
struct ieee802154_mac_cb *cb = mac_cb_init(skb);
mac_cb(skb)->lqi = lqi;
cb->lqi = lqi;
skb->pkt_type = IEEE802154_RX_MSG;
skb_queue_tail(&local->skb_queue, skb);
tasklet_schedule(&local->tasklet);

View File

@ -1551,7 +1551,8 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *in
if (IS_ERR(dp))
return;
WARN(dp->user_features, "Dropping previously announced user features\n");
pr_warn("%s: Dropping previously announced user features\n",
ovs_dp_name(dp));
dp->user_features = 0;
}

View File

@ -2149,8 +2149,12 @@ retry:
static void cake_reset(struct Qdisc *sch)
{
struct cake_sched_data *q = qdisc_priv(sch);
u32 c;
if (!q->tins)
return;
for (c = 0; c < CAKE_MAX_TINS; c++)
cake_clear_tin(sch, c);
}

View File

@ -146,8 +146,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
{
struct net *net = d->net;
struct tipc_net *tn = tipc_net(net);
bool trial = time_before(jiffies, tn->addr_trial_end);
u32 self = tipc_own_addr(net);
bool trial = time_before(jiffies, tn->addr_trial_end) && !self;
if (mtyp == DSC_TRIAL_FAIL_MSG) {
if (!trial)

View File

@ -457,12 +457,19 @@ static void tipc_conn_data_ready(struct sock *sk)
static void tipc_topsrv_accept(struct work_struct *work)
{
struct tipc_topsrv *srv = container_of(work, struct tipc_topsrv, awork);
struct socket *lsock = srv->listener;
struct socket *newsock;
struct socket *newsock, *lsock;
struct tipc_conn *con;
struct sock *newsk;
int ret;
spin_lock_bh(&srv->idr_lock);
if (!srv->listener) {
spin_unlock_bh(&srv->idr_lock);
return;
}
lsock = srv->listener;
spin_unlock_bh(&srv->idr_lock);
while (1) {
ret = kernel_accept(lsock, &newsock, O_NONBLOCK);
if (ret < 0)
@ -496,7 +503,7 @@ static void tipc_topsrv_listener_data_ready(struct sock *sk)
read_lock_bh(&sk->sk_callback_lock);
srv = sk->sk_user_data;
if (srv->listener)
if (srv)
queue_work(srv->rcv_wq, &srv->awork);
read_unlock_bh(&sk->sk_callback_lock);
}
@ -575,7 +582,7 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
sub.seq.upper = upper;
sub.timeout = TIPC_WAIT_FOREVER;
sub.filter = filter;
*(u32 *)&sub.usr_handle = port;
*(u64 *)&sub.usr_handle = (u64)port;
con = tipc_conn_alloc(tipc_topsrv(net));
if (IS_ERR(con))
@ -706,8 +713,9 @@ static void tipc_topsrv_stop(struct net *net)
__module_get(lsock->sk->sk_prot_creator->owner);
srv->listener = NULL;
spin_unlock_bh(&srv->idr_lock);
sock_release(lsock);
tipc_topsrv_work_stop(srv);
sock_release(lsock);
idr_destroy(&srv->conn_idr);
kfree(srv);
}

View File

@ -148,6 +148,7 @@ static int i2sbus_get_and_fixup_rsrc(struct device_node *np, int index,
return rc;
}
/* Returns 1 if added, 0 for otherwise; don't return a negative value! */
/* FIXME: look at device node refcounting */
static int i2sbus_add_dev(struct macio_dev *macio,
struct i2sbus_control *control,
@ -213,7 +214,7 @@ static int i2sbus_add_dev(struct macio_dev *macio,
* either as the second one in that case is just a modem. */
if (!ok) {
kfree(dev);
return -ENODEV;
return 0;
}
mutex_init(&dev->lock);
@ -302,6 +303,10 @@ static int i2sbus_add_dev(struct macio_dev *macio,
if (soundbus_add_one(&dev->sound)) {
printk(KERN_DEBUG "i2sbus: device registration error!\n");
if (dev->sound.ofdev.dev.kobj.state_initialized) {
soundbus_dev_put(&dev->sound);
return 0;
}
goto err;
}

View File

@ -1965,6 +1965,7 @@ static int snd_ac97_dev_register(struct snd_device *device)
snd_ac97_get_short_name(ac97));
if ((err = device_register(&ac97->dev)) < 0) {
ac97_err(ac97, "Can't register ac97 bus\n");
put_device(&ac97->dev);
ac97->dev.bus = NULL;
return err;
}

View File

@ -153,7 +153,7 @@ struct snd_vortex {
#ifndef CHIP_AU8810
stream_t dma_wt[NR_WT];
wt_voice_t wt_voice[NR_WT]; /* WT register cache. */
char mixwt[(NR_WT / NR_WTPB) * 6]; /* WT mixin objects */
s8 mixwt[(NR_WT / NR_WTPB) * 6]; /* WT mixin objects */
#endif
/* Global resources */
@ -247,8 +247,8 @@ static int vortex_alsafmt_aspfmt(snd_pcm_format_t alsafmt, vortex_t *v);
static void vortex_connect_default(vortex_t * vortex, int en);
static int vortex_adb_allocroute(vortex_t * vortex, int dma, int nr_ch,
int dir, int type, int subdev);
static char vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out,
int restype);
static int vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out,
int restype);
#ifndef CHIP_AU8810
static int vortex_wt_allocroute(vortex_t * vortex, int dma, int nr_ch);
static void vortex_wt_connect(vortex_t * vortex, int en);

View File

@ -2004,7 +2004,7 @@ static int resnum[VORTEX_RESOURCE_LAST] =
out: Mean checkout if != 0. Else mean Checkin resource.
restype: Indicates type of resource to be checked in or out.
*/
static char
static int
vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, int restype)
{
int i, qty = resnum[restype], resinuse = 0;

View File

@ -138,15 +138,10 @@ EXPORT_SYMBOL(snd_emux_register);
*/
int snd_emux_free(struct snd_emux *emu)
{
unsigned long flags;
if (! emu)
return -EINVAL;
spin_lock_irqsave(&emu->voice_lock, flags);
if (emu->timer_active)
del_timer(&emu->tlist);
spin_unlock_irqrestore(&emu->voice_lock, flags);
del_timer_sync(&emu->tlist);
snd_emux_proc_free(emu);
snd_emux_delete_virmidi(emu);

View File

@ -546,6 +546,10 @@ static int calc_digits(int num)
{
int count = 0;
/* It takes a digit to represent zero */
if (!num)
return 1;
while (num != 0) {
num /= 10;
count++;

View File

@ -1678,11 +1678,19 @@ struct sym_args {
bool near;
};
static bool kern_sym_name_match(const char *kname, const char *name)
{
size_t n = strlen(name);
return !strcmp(kname, name) ||
(!strncmp(kname, name, n) && kname[n] == '\t');
}
static bool kern_sym_match(struct sym_args *args, const char *name, char type)
{
/* A function with the same name, and global or the n'th found or any */
return kallsyms__is_function(type) &&
!strcmp(name, args->name) &&
kern_sym_name_match(name, args->name) &&
((args->global && isupper(type)) ||
(args->selected && ++(args->cnt) == args->idx) ||
(!args->global && !args->selected));

View File

@ -1915,7 +1915,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
memset(entry, 0, esz);
while (len > 0) {
while (true) {
int next_offset;
size_t byte_offset;
@ -1928,6 +1928,9 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
return next_offset;
byte_offset = next_offset * esz;
if (byte_offset >= len)
break;
id += next_offset;
gpa += byte_offset;
len -= byte_offset;