This is the 4.19.135 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl8hMIIACgkQONu9yGCS
 aT4+fQ/+LvplGwblkH8vuttKsz17BHXD8/FcL6LIbLKTyaWJMWp/raMWQyQLrZpL
 B58dFVZxenmpljLvupB9a1qifZk00U8M+aoU45Pf6PHAww2sGoU1h5sZexqRqOmN
 FAh3hLMq1qyJ6qDIugZ0sbtDaO7t5GwvT1YKKo6V9hqi0XamTVrppI/EVVDDA0ve
 /WigyxjT51DIPd3bmjJ3Xn920artrW+fydA+jTyMBBME/qFi5s2yN7Rui0ViNz44
 crwGxAN1v3+MboulHFsnCdLAlh9hyI4VNXpvpNhKIoVE9BMHgBmmnWA+0KUIjAeA
 8GfL2TcspjElNnz9T4f957Rj6Ft7qlStYIyJ45rcGRMXkyNs1lw5CDfkJcy8giVD
 7yImkQZ2c8jCgkr/Vor/MfHOPtg1KzpAuNrWZnobTdnaBGxgcC61pnKHxF5Vx40h
 78hOFXqunGNMwNBR4EEjmP4B3zapeHVo4GXBPtwY8M878Uj28z2pL4Vx6MhbvmTf
 1i8xipclcgpV5ZyN+zv8XA55pcw8ahQOuUknEx+3yH0chlf5cIxXhr92g1DrDwoF
 YvNYJQA7qJpgx/k582u6bJYkBdNa+XJaBLjQUhI/Z9UVS33S/CouGHpFyIMpVMx9
 vo3ujFpuUP4ZCeKENjINa7RfQhD7oHQQQrk5RcsFBYJaWgCdi3A=
 =ugxS
 -----END PGP SIGNATURE-----

Merge 4.19.135 into android-4.19-stable

Changes in 4.19.135
	soc: qcom: rpmh: Dirt can only make you dirtier, not cleaner
	gpio: arizona: handle pm_runtime_get_sync failure case
	gpio: arizona: put pm_runtime in case of failure
	pinctrl: amd: fix npins for uart0 in kerncz_groups
	mac80211: allow rx of mesh eapol frames with default rx key
	scsi: scsi_transport_spi: Fix function pointer check
	xtensa: fix __sync_fetch_and_{and,or}_4 declarations
	xtensa: update *pos in cpuinfo_op.next
	drivers/net/wan/lapbether: Fixed the value of hard_header_len
	net: sky2: initialize return of gm_phy_read
	drm/nouveau/i2c/g94-: increase NV_PMGR_DP_AUXCTL_TRANSACTREQ timeout
	drivers/firmware/psci: Fix memory leakage in alloc_init_cpu_groups()
	fuse: fix weird page warning
	irqdomain/treewide: Keep firmware node unconditionally allocated
	SUNRPC reverting d03727b248d0 ("NFSv4 fix CLOSE not waiting for direct IO compeletion")
	spi: spi-fsl-dspi: Exit the ISR with IRQ_NONE when it's not ours
	tipc: clean up skb list lock handling on send path
	IB/umem: fix reference count leak in ib_umem_odp_get()
	uprobes: Change handle_swbp() to send SIGTRAP with si_code=SI_KERNEL, to fix GDB regression
	ALSA: info: Drop WARN_ON() from buffer NULL sanity check
	ASoC: rt5670: Correct RT5670_LDO_SEL_MASK
	btrfs: fix double free on ulist after backref resolution failure
	btrfs: fix mount failure caused by race with umount
	btrfs: fix page leaks after failure to lock page for delalloc
	bnxt_en: Fix race when modifying pause settings.
	fpga: dfl: fix bug in port reset handshake
	hippi: Fix a size used in a 'pci_free_consistent()' in an error handling path
	ax88172a: fix ax88172a_unbind() failures
	net: dp83640: fix SIOCSHWTSTAMP to update the struct with actual configuration
	ieee802154: fix one possible memleak in adf7242_probe
	drm: sun4i: hdmi: Fix inverted HPD result
	net: smc91x: Fix possible memory leak in smc_drv_probe()
	bonding: check error value of register_netdevice() immediately
	mlxsw: destroy workqueue when trap_register in mlxsw_emad_init
	qed: suppress "don't support RoCE & iWARP" flooding on HW init
	ipvs: fix the connection sync failed in some cases
	net: ethernet: ave: Fix error returns in ave_init
	i2c: rcar: always clear ICSAR to avoid side effects
	bonding: check return value of register_netdevice() in bond_newlink()
	serial: exar: Fix GPIO configuration for Sealevel cards based on XR17V35X
	scripts/decode_stacktrace: strip basepath from all paths
	scripts/gdb: fix lx-symbols 'gdb.error' while loading modules
	HID: i2c-hid: add Mediacom FlexBook edge13 to descriptor override
	HID: alps: support devices with report id 2
	HID: steam: fixes race in handling device list.
	HID: apple: Disable Fn-key key-re-mapping on clone keyboards
	dmaengine: tegra210-adma: Fix runtime PM imbalance on error
	Input: add `SW_MACHINE_COVER`
	spi: mediatek: use correct SPI_CFG2_REG MACRO
	regmap: dev_get_regmap_match(): fix string comparison
	hwmon: (aspeed-pwm-tacho) Avoid possible buffer overflow
	dmaengine: ioat setting ioat timeout as module parameter
	Input: synaptics - enable InterTouch for ThinkPad X1E 1st gen
	usb: gadget: udc: gr_udc: fix memleak on error handling path in gr_ep_init()
	hwmon: (adm1275) Make sure we are reading enough data for different chips
	hwmon: (scmi) Fix potential buffer overflow in scmi_hwmon_probe()
	arm64: Use test_tsk_thread_flag() for checking TIF_SINGLESTEP
	x86: math-emu: Fix up 'cmp' insn for clang ias
	RISC-V: Upgrade smp_mb__after_spinlock() to iorw,iorw
	binder: Don't use mmput() from shrinker function.
	usb: xhci-mtk: fix the failure of bandwidth allocation
	usb: xhci: Fix ASM2142/ASM3142 DMA addressing
	Revert "cifs: Fix the target file was deleted when rename failed."
	staging: wlan-ng: properly check endpoint types
	staging: comedi: addi_apci_1032: check INSN_CONFIG_DIGITAL_TRIG shift
	staging: comedi: ni_6527: fix INSN_CONFIG_DIGITAL_TRIG support
	staging: comedi: addi_apci_1500: check INSN_CONFIG_DIGITAL_TRIG shift
	staging: comedi: addi_apci_1564: check INSN_CONFIG_DIGITAL_TRIG shift
	serial: 8250: fix null-ptr-deref in serial8250_start_tx()
	serial: 8250_mtk: Fix high-speed baud rates clamping
	fbdev: Detect integer underflow at "struct fbcon_ops"->clear_margins.
	vt: Reject zero-sized screen buffer size.
	Makefile: Fix GCC_TOOLCHAIN_DIR prefix for Clang cross compilation
	mm/memcg: fix refcount error while moving and swapping
	mm: memcg/slab: synchronize access to kmem_cache dying flag using a spinlock
	mm: memcg/slab: fix memory leak at non-root kmem_cache destroy
	io-mapping: indicate mapping failure
	drm/amdgpu: Fix NULL dereference in dpm sysfs handlers
	drm/amd/powerplay: fix a crash when overclocking Vega M
	parisc: Add atomic64_set_release() define to avoid CPU soft lockups
	x86, vmlinux.lds: Page-align end of ..page_aligned sections
	ASoC: rt5670: Add new gpio1_is_ext_spk_en quirk and enable it on the Lenovo Miix 2 10
	ASoC: qcom: Drop HAS_DMA dependency to fix link failure
	dm integrity: fix integrity recalculation that is improperly skipped
	ath9k: Fix general protection fault in ath9k_hif_usb_rx_cb
	ath9k: Fix regression with Atheros 9271
	Linux 4.19.135

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I0bbcde83e7c810352d998f28d3484efa2b9ede8e
This commit is contained in:
Greg Kroah-Hartman 2020-07-29 13:22:30 +02:00
commit bcf9517454
104 changed files with 587 additions and 228 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 134
SUBLEVEL = 135
EXTRAVERSION =
NAME = "People's Front"
@ -489,7 +489,7 @@ ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_FLAGS)), y)
$(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?")
endif
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif
ifneq ($(GCC_TOOLCHAIN),)

View File

@ -392,14 +392,14 @@ void user_rewind_single_step(struct task_struct *task)
* If single step is active for this thread, then set SPSR.SS
* to 1 to avoid returning to the active-pending state.
*/
if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
set_regs_spsr_ss(task_pt_regs(task));
}
NOKPROBE_SYMBOL(user_rewind_single_step);
void user_fastforward_single_step(struct task_struct *task)
{
if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
clear_regs_spsr_ss(task_pt_regs(task));
}

View File

@ -212,6 +212,8 @@ atomic64_set(atomic64_t *v, s64 i)
_atomic_spin_unlock_irqrestore(v, flags);
}
#define atomic64_set_release(v, i) atomic64_set((v), (i))
static __inline__ s64
atomic64_read(const atomic64_t *v)
{

View File

@ -69,8 +69,16 @@ do { \
* The AQ/RL pair provides a RCpc critical section, but there's not really any
* way we can take advantage of that here because the ordering is only enforced
* on that one lock. Thus, we're just doing a full fence.
*
* Since we allow writeX to be called from preemptive regions we need at least
* an "o" in the predecessor set to ensure device writes are visible before the
* task is marked as available for scheduling on a new hart. While I don't see
* any concrete reason we need a full IO fence, it seems safer to just upgrade
* this in order to avoid any IO crossing a scheduling boundary. In both
* instances the scheduler pairs this with an mb(), so nothing is necessary on
* the new hart.
*/
#define smp_mb__after_spinlock() RISCV_FENCE(rw,rw)
#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
#include <asm-generic/barrier.h>

View File

@ -2323,12 +2323,12 @@ static int mp_irqdomain_create(int ioapic)
ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops,
(void *)(long)ioapic);
/* Release fw handle if it was allocated above */
if (!cfg->dev)
irq_domain_free_fwnode(fn);
if (!ip->irqdomain)
if (!ip->irqdomain) {
/* Release fw handle if it was allocated above */
if (!cfg->dev)
irq_domain_free_fwnode(fn);
return -ENOMEM;
}
ip->irqdomain->parent = parent;

View File

@ -265,12 +265,13 @@ void __init arch_init_msi_domain(struct irq_domain *parent)
msi_default_domain =
pci_msi_create_irq_domain(fn, &pci_msi_domain_info,
parent);
irq_domain_free_fwnode(fn);
}
if (!msi_default_domain)
if (!msi_default_domain) {
irq_domain_free_fwnode(fn);
pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n");
else
} else {
msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
}
}
#ifdef CONFIG_IRQ_REMAP
@ -303,7 +304,8 @@ struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent,
if (!fn)
return NULL;
d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent);
irq_domain_free_fwnode(fn);
if (!d)
irq_domain_free_fwnode(fn);
return d;
}
#endif
@ -366,7 +368,8 @@ static struct irq_domain *dmar_get_irq_domain(void)
if (fn) {
dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info,
x86_vector_domain);
irq_domain_free_fwnode(fn);
if (!dmar_domain)
irq_domain_free_fwnode(fn);
}
out:
mutex_unlock(&dmar_lock);
@ -491,7 +494,10 @@ struct irq_domain *hpet_create_irq_domain(int hpet_id)
}
d = msi_create_irq_domain(fn, domain_info, parent);
irq_domain_free_fwnode(fn);
if (!d) {
irq_domain_free_fwnode(fn);
kfree(domain_info);
}
return d;
}

View File

@ -703,7 +703,6 @@ int __init arch_early_irq_init(void)
x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
NULL);
BUG_ON(x86_vector_domain == NULL);
irq_domain_free_fwnode(fn);
irq_set_default_host(x86_vector_domain);
arch_init_msi_domain(x86_vector_domain);

View File

@ -378,6 +378,7 @@ SECTIONS
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
__bss_start = .;
*(.bss..page_aligned)
. = ALIGN(PAGE_SIZE);
*(BSS_MAIN)
BSS_DECRYPTED
. = ALIGN(PAGE_SIZE);

View File

@ -209,7 +209,7 @@ sqrt_stage_2_finish:
#ifdef PARANOID
/* It should be possible to get here only if the arg is ffff....ffff */
cmp $0xffffffff,FPU_fsqrt_arg_1
cmpl $0xffffffff,FPU_fsqrt_arg_1
jnz sqrt_stage_2_error
#endif /* PARANOID */

View File

@ -167,9 +167,10 @@ static struct irq_domain *uv_get_irq_domain(void)
goto out;
uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL);
irq_domain_free_fwnode(fn);
if (uv_domain)
uv_domain->parent = x86_vector_domain;
else
irq_domain_free_fwnode(fn);
out:
mutex_unlock(&uv_lock);

View File

@ -720,7 +720,8 @@ c_start(struct seq_file *f, loff_t *pos)
static void *
c_next(struct seq_file *f, void *v, loff_t *pos)
{
return NULL;
++*pos;
return c_start(f, pos);
}
static void

View File

@ -87,13 +87,13 @@ void __xtensa_libgcc_window_spill(void)
}
EXPORT_SYMBOL(__xtensa_libgcc_window_spill);
unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v)
unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v)
{
BUG();
}
EXPORT_SYMBOL(__sync_fetch_and_and_4);
unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v)
{
BUG();
}

View File

@ -953,7 +953,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
up_read(&mm->mmap_sem);
mmput(mm);
mmput_async(mm);
trace_binder_unmap_kernel_start(alloc, index);

View File

@ -1343,7 +1343,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
/* If the user didn't specify a name match any */
if (data)
return (*r)->name == data;
return !strcmp((*r)->name, data);
else
return 1;
}

View File

@ -38,6 +38,18 @@
#include "../dmaengine.h"
int completion_timeout = 200;
module_param(completion_timeout, int, 0644);
MODULE_PARM_DESC(completion_timeout,
"set ioat completion timeout [msec] (default 200 [msec])");
int idle_timeout = 2000;
module_param(idle_timeout, int, 0644);
MODULE_PARM_DESC(idle_timeout,
"set ioat idel timeout [msec] (default 2000 [msec])");
#define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
#define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
static char *chanerr_str[] = {
"DMA Transfer Source Address Error",
"DMA Transfer Destination Address Error",

View File

@ -111,8 +111,6 @@ struct ioatdma_chan {
#define IOAT_RUN 5
#define IOAT_CHAN_ACTIVE 6
struct timer_list timer;
#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
#define RESET_DELAY msecs_to_jiffies(100)
struct ioatdma_device *ioat_dma;
dma_addr_t completion_dma;

View File

@ -583,6 +583,7 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc)
ret = pm_runtime_get_sync(tdc2dev(tdc));
if (ret < 0) {
pm_runtime_put_noidle(tdc2dev(tdc));
free_irq(tdc->irq, tdc);
return ret;
}
@ -764,8 +765,10 @@ static int tegra_adma_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
if (ret < 0) {
pm_runtime_put_noidle(&pdev->dev);
goto rpm_disable;
}
ret = tegra_adma_init(tdma);
if (ret)

View File

@ -164,8 +164,10 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
GFP_KERNEL);
if (!cpu_groups)
if (!cpu_groups) {
free_cpumask_var(tmp);
return -ENOMEM;
}
cpumask_copy(tmp, cpu_online_mask);
@ -174,6 +176,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
topology_core_cpumask(cpumask_any(tmp));
if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
free_cpumask_var(tmp);
free_cpu_groups(num_groups, &cpu_groups);
return -ENOMEM;
}

View File

@ -79,7 +79,8 @@ static int port_disable(struct platform_device *pdev)
* on this port and minimum soft reset pulse width has elapsed.
* Driver polls port_soft_reset_ack to determine if reset done by HW.
*/
if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
v & PORT_CTRL_SFTRST_ACK,
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
dev_err(&pdev->dev, "timeout, fail to reset device\n");
return -ETIMEDOUT;

View File

@ -69,6 +69,7 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
ret = pm_runtime_get_sync(chip->parent);
if (ret < 0) {
dev_err(chip->parent, "Failed to resume: %d\n", ret);
pm_runtime_put_autosuspend(chip->parent);
return ret;
}
@ -77,12 +78,15 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
if (ret < 0) {
dev_err(chip->parent, "Failed to drop cache: %d\n",
ret);
pm_runtime_put_autosuspend(chip->parent);
return ret;
}
ret = regmap_read(arizona->regmap, reg, &val);
if (ret < 0)
if (ret < 0) {
pm_runtime_put_autosuspend(chip->parent);
return ret;
}
pm_runtime_mark_last_busy(chip->parent);
pm_runtime_put_autosuspend(chip->parent);
@ -111,6 +115,7 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip,
ret = pm_runtime_get_sync(chip->parent);
if (ret < 0) {
dev_err(chip->parent, "Failed to resume: %d\n", ret);
pm_runtime_put(chip->parent);
return ret;
}
}

View File

@ -529,8 +529,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
while (isspace(*++tmp_str));
while (tmp_str[0]) {
sub_str = strsep(&tmp_str, delimiter);
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@ -630,8 +629,7 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
memcpy(buf_cpy, buf, bytes);
buf_cpy[bytes] = '\0';
tmp = buf_cpy;
while (tmp[0]) {
sub_str = strsep(&tmp, delimiter);
while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
if (ret)
@ -882,8 +880,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
while (tmp_str[0]) {
sub_str = strsep(&tmp_str, delimiter);
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret) {
count = -EINVAL;

View File

@ -643,9 +643,6 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
/* sclk is bigger than max sclk in the dependence table */
*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
(dep_table->entries[i - 1].vddc -
(uint16_t)VDDC_VDDCI_DELTA));
if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
*voltage |= (data->vbios_boot_state.vddci_bootup_value *
@ -653,8 +650,13 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
else if (dep_table->entries[i - 1].vddci)
*voltage |= (dep_table->entries[i - 1].vddci *
VOLTAGE_SCALE) << VDDC_SHIFT;
else
else {
vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
(dep_table->entries[i - 1].vddc -
(uint16_t)VDDC_VDDCI_DELTA));
*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
}
if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;

View File

@ -118,10 +118,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (retries)
udelay(400);
/* transaction request, wait up to 1ms for it to complete */
/* transaction request, wait up to 2ms for it to complete */
nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl);
timeout = 1000;
timeout = 2000;
do {
ctrl = nvkm_rd32(device, 0x00e4e4 + base);
udelay(1);

View File

@ -118,10 +118,10 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (retries)
udelay(400);
/* transaction request, wait up to 1ms for it to complete */
/* transaction request, wait up to 2ms for it to complete */
nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl);
timeout = 1000;
timeout = 2000;
do {
ctrl = nvkm_rd32(device, 0x00d954 + base);
udelay(1);

View File

@ -243,7 +243,7 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
unsigned long reg;
reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
if (reg & SUN4I_HDMI_HPD_HIGH) {
if (!(reg & SUN4I_HDMI_HPD_HIGH)) {
cec_phys_addr_invalidate(hdmi->cec_adap);
return connector_status_disconnected;
}

View File

@ -29,6 +29,7 @@
#define U1_MOUSE_REPORT_ID 0x01 /* Mouse data ReportID */
#define U1_ABSOLUTE_REPORT_ID 0x03 /* Absolute data ReportID */
#define U1_ABSOLUTE_REPORT_ID_SECD 0x02 /* FW-PTP Absolute data ReportID */
#define U1_FEATURE_REPORT_ID 0x05 /* Feature ReportID */
#define U1_SP_ABSOLUTE_REPORT_ID 0x06 /* Feature ReportID */
@ -372,6 +373,7 @@ static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size)
case U1_FEATURE_REPORT_ID:
break;
case U1_ABSOLUTE_REPORT_ID:
case U1_ABSOLUTE_REPORT_ID_SECD:
for (i = 0; i < hdata->max_fingers; i++) {
u8 *contact = &data[i * 5];

View File

@ -57,6 +57,7 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
struct apple_sc {
unsigned long quirks;
unsigned int fn_on;
unsigned int fn_found;
DECLARE_BITMAP(pressed_numlock, KEY_CNT);
};
@ -342,12 +343,15 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
struct apple_sc *asc = hid_get_drvdata(hdev);
if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
usage->hid == (HID_UP_MSVENDOR | 0x0003) ||
usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) {
/* The fn key on Apple USB keyboards */
set_bit(EV_REP, hi->input->evbit);
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
asc->fn_found = true;
apple_setup_input(hi->input);
return 1;
}
@ -374,6 +378,19 @@ static int apple_input_mapped(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
static int apple_input_configured(struct hid_device *hdev,
struct hid_input *hidinput)
{
struct apple_sc *asc = hid_get_drvdata(hdev);
if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) {
hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n");
asc->quirks = 0;
}
return 0;
}
static int apple_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@ -588,6 +605,7 @@ static struct hid_driver apple_driver = {
.event = apple_event,
.input_mapping = apple_input_mapping,
.input_mapped = apple_input_mapped,
.input_configured = apple_input_configured,
};
module_hid_driver(apple_driver);

View File

@ -526,7 +526,8 @@ static int steam_register(struct steam_device *steam)
steam_battery_register(steam);
mutex_lock(&steam_devices_lock);
list_add(&steam->list, &steam_devices);
if (list_empty(&steam->list))
list_add(&steam->list, &steam_devices);
mutex_unlock(&steam_devices_lock);
}
@ -552,7 +553,7 @@ static void steam_unregister(struct steam_device *steam)
hid_info(steam->hdev, "Steam Controller '%s' disconnected",
steam->serial_no);
mutex_lock(&steam_devices_lock);
list_del(&steam->list);
list_del_init(&steam->list);
mutex_unlock(&steam_devices_lock);
steam->serial_no[0] = 0;
}
@ -738,6 +739,7 @@ static int steam_probe(struct hid_device *hdev,
mutex_init(&steam->mutex);
steam->quirks = id->driver_data;
INIT_WORK(&steam->work_connect, steam_work_connect_cb);
INIT_LIST_HEAD(&steam->list);
steam->client_hdev = steam_create_client_hid(hdev);
if (IS_ERR(steam->client_hdev)) {

View File

@ -373,6 +373,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
},
.driver_data = (void *)&sipodev_desc
},
{
.ident = "Mediacom FlexBook edge 13",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook_edge13-M-FBE13"),
},
.driver_data = (void *)&sipodev_desc
},
{
.ident = "Odys Winbook 13",
.matches = {

View File

@ -880,6 +880,8 @@ static int aspeed_create_fan(struct device *dev,
ret = of_property_read_u32(child, "reg", &pwm_port);
if (ret)
return ret;
if (pwm_port >= ARRAY_SIZE(pwm_port_params))
return -EINVAL;
aspeed_create_pwm_port(priv, (u8)pwm_port);
ret = of_property_count_u8_elems(child, "cooling-levels");

View File

@ -364,6 +364,7 @@ MODULE_DEVICE_TABLE(i2c, adm1275_id);
static int adm1275_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
s32 (*config_read_fn)(const struct i2c_client *client, u8 reg);
u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
int config, device_config;
int ret;
@ -408,11 +409,16 @@ static int adm1275_probe(struct i2c_client *client,
"Device mismatch: Configured %s, detected %s\n",
id->name, mid->name);
config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
if (mid->driver_data == adm1272 || mid->driver_data == adm1278 ||
mid->driver_data == adm1293 || mid->driver_data == adm1294)
config_read_fn = i2c_smbus_read_word_data;
else
config_read_fn = i2c_smbus_read_byte_data;
config = config_read_fn(client, ADM1275_PMON_CONFIG);
if (config < 0)
return config;
device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG);
device_config = config_read_fn(client, ADM1275_DEVICE_CONFIG);
if (device_config < 0)
return device_config;

View File

@ -99,7 +99,7 @@ static enum hwmon_sensor_types scmi_types[] = {
[ENERGY] = hwmon_energy,
};
static u32 hwmon_attributes[] = {
static u32 hwmon_attributes[hwmon_max] = {
[hwmon_chip] = HWMON_C_REGISTER_TZ,
[hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL,
[hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL,

View File

@ -863,6 +863,7 @@ static int rcar_unreg_slave(struct i2c_client *slave)
/* disable irqs and ensure none is running before clearing ptr */
rcar_i2c_write(priv, ICSIER, 0);
rcar_i2c_write(priv, ICSCR, 0);
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
synchronize_irq(priv->irq);
priv->slave = NULL;
@ -966,6 +967,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
goto out_pm_put;
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
if (priv->devtype == I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (!IS_ERR(priv->rstc)) {

View File

@ -356,7 +356,8 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
vma = find_vma(mm, ib_umem_start(umem));
if (!vma || !is_vm_hugetlb_page(vma)) {
up_read(&mm->mmap_sem);
return -EINVAL;
ret_val = -EINVAL;
goto out_mm;
}
h = hstate_vma(vma);
umem->page_shift = huge_page_shift(h);

View File

@ -182,6 +182,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0093", /* T480 */
"LEN0096", /* X280 */
"LEN0097", /* X280 -> ALPS trackpoint */
"LEN0099", /* X1 Extreme 1st */
"LEN009b", /* T580 */
"LEN200f", /* T450s */
"LEN2044", /* L470 */

View File

@ -4508,9 +4508,10 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
if (!fn)
return -ENOMEM;
iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu);
irq_domain_free_fwnode(fn);
if (!iommu->ir_domain)
if (!iommu->ir_domain) {
irq_domain_free_fwnode(fn);
return -ENOMEM;
}
iommu->ir_domain->parent = arch_get_ir_parent_domain();
iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain,

View File

@ -536,8 +536,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
0, INTR_REMAP_TABLE_ENTRIES,
fn, &intel_ir_domain_ops,
iommu);
irq_domain_free_fwnode(fn);
if (!iommu->ir_domain) {
irq_domain_free_fwnode(fn);
pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
goto out_free_bitmap;
}

View File

@ -2068,7 +2068,7 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
return;
spin_lock_irq(&ic->endio_wait.lock);
@ -2127,7 +2127,7 @@ static void integrity_recalc(struct work_struct *w)
next_chunk:
if (unlikely(dm_suspended(ic->ti)))
if (unlikely(dm_post_suspending(ic->ti)))
goto unlock_ret;
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);

View File

@ -143,6 +143,7 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
#define DMF_NOFLUSH_SUSPENDING 5
#define DMF_DEFERRED_REMOVE 6
#define DMF_SUSPENDED_INTERNALLY 7
#define DMF_POST_SUSPENDING 8
#define DM_NUMA_NODE NUMA_NO_NODE
static int dm_numa_node = DM_NUMA_NODE;
@ -2529,6 +2530,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
set_bit(DMF_SUSPENDED, &md->flags);
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
}
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
@ -2854,7 +2856,9 @@ retry:
if (r)
goto out_unlock;
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
clear_bit(DMF_POST_SUSPENDING, &md->flags);
out_unlock:
mutex_unlock(&md->suspend_lock);
@ -2951,7 +2955,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
DMF_SUSPENDED_INTERNALLY);
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
clear_bit(DMF_POST_SUSPENDING, &md->flags);
}
static void __dm_internal_resume(struct mapped_device *md)
@ -3112,6 +3118,11 @@ int dm_suspended_md(struct mapped_device *md)
return test_bit(DMF_SUSPENDED, &md->flags);
}
static int dm_post_suspending_md(struct mapped_device *md)
{
return test_bit(DMF_POST_SUSPENDING, &md->flags);
}
int dm_suspended_internally_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
@ -3128,6 +3139,12 @@ int dm_suspended(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_suspended);
int dm_post_suspending(struct dm_target *ti)
{
return dm_post_suspending_md(dm_table_get_md(ti->table));
}
EXPORT_SYMBOL_GPL(dm_post_suspending);
int dm_noflush_suspending(struct dm_target *ti)
{
return __noflush_suspending(dm_table_get_md(ti->table));

View File

@ -4817,15 +4817,19 @@ int bond_create(struct net *net, const char *name)
bond_dev->rtnl_link_ops = &bond_link_ops;
res = register_netdevice(bond_dev);
if (res < 0) {
free_netdev(bond_dev);
rtnl_unlock();
return res;
}
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
rtnl_unlock();
if (res < 0)
free_netdev(bond_dev);
return res;
return 0;
}
static int __net_init bond_net_init(struct net *net)

View File

@ -451,11 +451,10 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
return err;
err = register_netdevice(bond_dev);
netif_carrier_off(bond_dev);
if (!err) {
struct bonding *bond = netdev_priv(bond_dev);
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
}

View File

@ -1392,8 +1392,11 @@ static int bnxt_set_pauseparam(struct net_device *dev,
if (epause->tx_pause)
link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
if (netif_running(dev))
if (netif_running(dev)) {
mutex_lock(&bp->link_lock);
rc = bnxt_hwrm_set_pause(bp);
mutex_unlock(&bp->link_lock);
}
return rc;
}

View File

@ -215,7 +215,7 @@ io_error:
static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
{
u16 v;
u16 v = 0;
__gm_phy_read(hw, port, reg, &v);
return v;
}

View File

@ -587,7 +587,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
if (err)
return err;
goto err_trap_register;
err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
if (err)
@ -599,6 +599,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
err_emad_trap_set:
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
err_trap_register:
destroy_workqueue(mlxsw_core->emad_wq);
return err;
}

View File

@ -2074,8 +2074,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
DP_NOTICE(p_hwfn,
"Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE;
}

View File

@ -2287,7 +2287,7 @@ static int smc_drv_probe(struct platform_device *pdev)
ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio,
"power", 0, 0, 100);
if (ret)
return ret;
goto out_free_netdev;
/*
* Optional reset GPIO configured? Minimum 100 ns reset needed
@ -2296,7 +2296,7 @@ static int smc_drv_probe(struct platform_device *pdev)
ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio,
"reset", 0, 0, 100);
if (ret)
return ret;
goto out_free_netdev;
/*
* Need to wait for optional EEPROM to load, max 750 us according

View File

@ -1196,7 +1196,7 @@ static int ave_init(struct net_device *ndev)
ret = regmap_update_bits(priv->regmap, SG_ETPINMODE,
priv->pinmode_mask, priv->pinmode_val);
if (ret)
return ret;
goto out_reset_assert;
ave_global_reset(ndev);

View File

@ -1248,7 +1248,7 @@ static int rr_open(struct net_device *dev)
rrpriv->info = NULL;
}
if (rrpriv->rx_ctrl) {
pci_free_consistent(pdev, sizeof(struct ring_ctrl),
pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
rrpriv->rx_ctrl = NULL;
}

View File

@ -1270,7 +1270,7 @@ static int adf7242_probe(struct spi_device *spi)
WQ_MEM_RECLAIM);
if (unlikely(!lp->wqueue)) {
ret = -ENOMEM;
goto err_hw_init;
goto err_alloc_wq;
}
ret = adf7242_hw_init(lp);
@ -1302,6 +1302,8 @@ static int adf7242_probe(struct spi_device *spi)
return ret;
err_hw_init:
destroy_workqueue(lp->wqueue);
err_alloc_wq:
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);

View File

@ -1343,6 +1343,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L4;
dp83640->version = PTP_CLASS_V1;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
@ -1350,6 +1351,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L4;
dp83640->version = PTP_CLASS_V2;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
@ -1357,6 +1359,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L2;
dp83640->version = PTP_CLASS_V2;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
@ -1364,6 +1367,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
dp83640->version = PTP_CLASS_V2;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;

View File

@ -210,6 +210,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
if (ret < ETH_ALEN) {
netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
ret = -EIO;
goto free;
}
memcpy(dev->net->dev_addr, buf, ETH_ALEN);

View File

@ -308,7 +308,6 @@ static void lapbeth_setup(struct net_device *dev)
dev->netdev_ops = &lapbeth_netdev_ops;
dev->needs_free_netdev = true;
dev->type = ARPHRD_X25;
dev->hard_header_len = 3;
dev->mtu = 1000;
dev->addr_len = 0;
}
@ -329,6 +328,14 @@ static int lapbeth_new_device(struct net_device *dev)
if (!ndev)
goto out;
/* When transmitting data:
* first this driver removes a pseudo header of 1 byte,
* then the lapb module prepends an LAPB header of at most 3 bytes,
* then this driver prepends a length field of 2 bytes,
* then the underlying Ethernet device prepends its own header.
*/
ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len;
lapbeth = netdev_priv(ndev);
lapbeth->axdev = ndev;

View File

@ -643,9 +643,9 @@ err:
static void ath9k_hif_usb_rx_cb(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct hif_device_usb *hif_dev =
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
struct hif_device_usb *hif_dev = rx_buf->hif_dev;
struct sk_buff *skb = rx_buf->skb;
int ret;
if (!skb)
@ -685,14 +685,15 @@ resubmit:
return;
free:
kfree_skb(skb);
kfree(rx_buf);
}
static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
struct hif_device_usb *hif_dev = rx_buf->hif_dev;
struct sk_buff *skb = rx_buf->skb;
struct sk_buff *nskb;
struct hif_device_usb *hif_dev =
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
int ret;
if (!skb)
@ -732,11 +733,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
return;
}
rx_buf->skb = nskb;
usb_fill_int_urb(urb, hif_dev->udev,
usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
nskb->data, MAX_REG_IN_BUF_SIZE,
ath9k_hif_usb_reg_in_cb, nskb, 1);
ath9k_hif_usb_reg_in_cb, rx_buf, 1);
}
resubmit:
@ -750,6 +753,7 @@ resubmit:
return;
free:
kfree_skb(skb);
kfree(rx_buf);
urb->context = NULL;
}
@ -795,7 +799,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
init_usb_anchor(&hif_dev->mgmt_submitted);
for (i = 0; i < MAX_TX_URB_NUM; i++) {
tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL);
if (!tx_buf)
goto err;
@ -832,8 +836,9 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
{
struct urb *urb = NULL;
struct rx_buf *rx_buf = NULL;
struct sk_buff *skb = NULL;
struct urb *urb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->rx_submitted);
@ -841,6 +846,12 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
for (i = 0; i < MAX_RX_URB_NUM; i++) {
rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
if (!rx_buf) {
ret = -ENOMEM;
goto err_rxb;
}
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
@ -855,11 +866,14 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
rx_buf->hif_dev = hif_dev;
rx_buf->skb = skb;
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_WLAN_RX_PIPE),
skb->data, MAX_RX_BUF_SIZE,
ath9k_hif_usb_rx_cb, skb);
ath9k_hif_usb_rx_cb, rx_buf);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->rx_submitted);
@ -885,6 +899,8 @@ err_submit:
err_skb:
usb_free_urb(urb);
err_urb:
kfree(rx_buf);
err_rxb:
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
return ret;
}
@ -896,14 +912,21 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
{
struct urb *urb = NULL;
struct rx_buf *rx_buf = NULL;
struct sk_buff *skb = NULL;
struct urb *urb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->reg_in_submitted);
for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
if (!rx_buf) {
ret = -ENOMEM;
goto err_rxb;
}
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
@ -918,11 +941,14 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
rx_buf->hif_dev = hif_dev;
rx_buf->skb = skb;
usb_fill_int_urb(urb, hif_dev->udev,
usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
skb->data, MAX_REG_IN_BUF_SIZE,
ath9k_hif_usb_reg_in_cb, skb, 1);
ath9k_hif_usb_reg_in_cb, rx_buf, 1);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
@ -948,6 +974,8 @@ err_submit:
err_skb:
usb_free_urb(urb);
err_urb:
kfree(rx_buf);
err_rxb:
ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
return ret;
}

View File

@ -86,6 +86,11 @@ struct tx_buf {
struct list_head list;
};
struct rx_buf {
struct sk_buff *skb;
struct hif_device_usb *hif_dev;
};
#define HIF_USB_TX_STOP BIT(0)
#define HIF_USB_TX_FLUSH BIT(1)

View File

@ -704,9 +704,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
x86_vector_domain);
irq_domain_free_fwnode(fn);
if (!vmd->irq_domain)
if (!vmd->irq_domain) {
irq_domain_free_fwnode(fn);
return -ENODEV;
}
pci_add_resource(&resources, &vmd->resources[0]);
pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);

View File

@ -256,7 +256,7 @@ static const struct amd_pingroup kerncz_groups[] = {
{
.name = "uart0",
.pins = uart0_pins,
.npins = 9,
.npins = 5,
},
{
.name = "uart1",

View File

@ -352,7 +352,7 @@ store_spi_transport_##field(struct device *dev, \
struct spi_transport_attrs *tp \
= (struct spi_transport_attrs *)&starget->starget_data; \
\
if (i->f->set_##field) \
if (!i->f->set_##field) \
return -EINVAL; \
val = simple_strtoul(buf, NULL, 0); \
if (val > tp->max_##field) \

View File

@ -150,10 +150,10 @@ existing:
break;
}
ctrlr->dirty = (req->sleep_val != old_sleep_val ||
req->wake_val != old_wake_val) &&
req->sleep_val != UINT_MAX &&
req->wake_val != UINT_MAX;
ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
req->wake_val != old_wake_val) &&
req->sleep_val != UINT_MAX &&
req->wake_val != UINT_MAX;
unlock:
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);

View File

@ -878,9 +878,11 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
trans_mode);
}
}
return IRQ_HANDLED;
}
return IRQ_HANDLED;
return IRQ_NONE;
}
static const struct of_device_id fsl_dspi_dt_ids[] = {

View File

@ -41,7 +41,6 @@
#define SPI_CFG0_SCK_LOW_OFFSET 8
#define SPI_CFG0_CS_HOLD_OFFSET 16
#define SPI_CFG0_CS_SETUP_OFFSET 24
#define SPI_ADJUST_CFG0_SCK_LOW_OFFSET 16
#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
@ -53,6 +52,8 @@
#define SPI_CFG1_CS_IDLE_MASK 0xff
#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
#define SPI_CFG2_SCK_HIGH_OFFSET 0
#define SPI_CFG2_SCK_LOW_OFFSET 16
#define SPI_CMD_ACT BIT(0)
#define SPI_CMD_RESUME BIT(1)
@ -259,7 +260,7 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
static void mtk_spi_prepare_transfer(struct spi_master *master,
struct spi_transfer *xfer)
{
u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0;
u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
struct mtk_spi *mdata = spi_master_get_devdata(master);
spi_clk_hz = clk_get_rate(mdata->spi_clk);
@ -272,18 +273,18 @@ static void mtk_spi_prepare_transfer(struct spi_master *master,
cs_time = sck_time * 2;
if (mdata->dev_comp->enhance_timing) {
reg_val = (((sck_time - 1) & 0xffff)
<< SPI_CFG2_SCK_HIGH_OFFSET);
reg_val |= (((sck_time - 1) & 0xffff)
<< SPI_CFG0_SCK_HIGH_OFFSET);
reg_val |= (((sck_time - 1) & 0xffff)
<< SPI_ADJUST_CFG0_SCK_LOW_OFFSET);
<< SPI_CFG2_SCK_LOW_OFFSET);
writel(reg_val, mdata->base + SPI_CFG2_REG);
reg_val |= (((cs_time - 1) & 0xffff)
reg_val = (((cs_time - 1) & 0xffff)
<< SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
reg_val |= (((cs_time - 1) & 0xffff)
<< SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
writel(reg_val, mdata->base + SPI_CFG0_REG);
} else {
reg_val |= (((sck_time - 1) & 0xff)
reg_val = (((sck_time - 1) & 0xff)
<< SPI_CFG0_SCK_HIGH_OFFSET);
reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);

View File

@ -106,14 +106,22 @@ static int apci1032_cos_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct apci1032_private *devpriv = dev->private;
unsigned int shift, oldmask;
unsigned int shift, oldmask, himask, lomask;
switch (data[0]) {
case INSN_CONFIG_DIGITAL_TRIG:
if (data[1] != 0)
return -EINVAL;
shift = data[3];
oldmask = (1U << shift) - 1;
if (shift < 32) {
oldmask = (1U << shift) - 1;
himask = data[4] << shift;
lomask = data[5] << shift;
} else {
oldmask = 0xffffffffu;
himask = 0;
lomask = 0;
}
switch (data[2]) {
case COMEDI_DIGITAL_TRIG_DISABLE:
devpriv->ctrl = 0;
@ -136,8 +144,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev,
devpriv->mode2 &= oldmask;
}
/* configure specified channels */
devpriv->mode1 |= data[4] << shift;
devpriv->mode2 |= data[5] << shift;
devpriv->mode1 |= himask;
devpriv->mode2 |= lomask;
break;
case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS:
if (devpriv->ctrl != (APCI1032_CTRL_INT_ENA |
@ -154,8 +162,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev,
devpriv->mode2 &= oldmask;
}
/* configure specified channels */
devpriv->mode1 |= data[4] << shift;
devpriv->mode2 |= data[5] << shift;
devpriv->mode1 |= himask;
devpriv->mode2 |= lomask;
break;
default:
return -EINVAL;

View File

@ -452,13 +452,14 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev,
struct apci1500_private *devpriv = dev->private;
unsigned int trig = data[1];
unsigned int shift = data[3];
unsigned int hi_mask = data[4] << shift;
unsigned int lo_mask = data[5] << shift;
unsigned int chan_mask = hi_mask | lo_mask;
unsigned int old_mask = (1 << shift) - 1;
unsigned int hi_mask;
unsigned int lo_mask;
unsigned int chan_mask;
unsigned int old_mask;
unsigned int pm;
unsigned int pt;
unsigned int pp;
unsigned int invalid_chan;
if (trig > 1) {
dev_dbg(dev->class_dev,
@ -466,7 +467,20 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev,
return -EINVAL;
}
if (chan_mask > 0xffff) {
if (shift <= 16) {
hi_mask = data[4] << shift;
lo_mask = data[5] << shift;
old_mask = (1U << shift) - 1;
invalid_chan = (data[4] | data[5]) >> (16 - shift);
} else {
hi_mask = 0;
lo_mask = 0;
old_mask = 0xffff;
invalid_chan = data[4] | data[5];
}
chan_mask = hi_mask | lo_mask;
if (invalid_chan) {
dev_dbg(dev->class_dev, "invalid digital trigger channel\n");
return -EINVAL;
}

View File

@ -331,14 +331,22 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct apci1564_private *devpriv = dev->private;
unsigned int shift, oldmask;
unsigned int shift, oldmask, himask, lomask;
switch (data[0]) {
case INSN_CONFIG_DIGITAL_TRIG:
if (data[1] != 0)
return -EINVAL;
shift = data[3];
oldmask = (1U << shift) - 1;
if (shift < 32) {
oldmask = (1U << shift) - 1;
himask = data[4] << shift;
lomask = data[5] << shift;
} else {
oldmask = 0xffffffffu;
himask = 0;
lomask = 0;
}
switch (data[2]) {
case COMEDI_DIGITAL_TRIG_DISABLE:
devpriv->ctrl = 0;
@ -362,8 +370,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
devpriv->mode2 &= oldmask;
}
/* configure specified channels */
devpriv->mode1 |= data[4] << shift;
devpriv->mode2 |= data[5] << shift;
devpriv->mode1 |= himask;
devpriv->mode2 |= lomask;
break;
case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS:
if (devpriv->ctrl != (APCI1564_DI_IRQ_ENA |
@ -380,8 +388,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
devpriv->mode2 &= oldmask;
}
/* configure specified channels */
devpriv->mode1 |= data[4] << shift;
devpriv->mode2 |= data[5] << shift;
devpriv->mode1 |= himask;
devpriv->mode2 |= lomask;
break;
default:
return -EINVAL;

View File

@ -332,7 +332,7 @@ static int ni6527_intr_insn_config(struct comedi_device *dev,
case COMEDI_DIGITAL_TRIG_ENABLE_EDGES:
/* check shift amount */
shift = data[3];
if (shift >= s->n_chan) {
if (shift >= 32) {
mask = 0;
rising = 0;
falling = 0;

View File

@ -61,11 +61,25 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *dev;
const struct usb_endpoint_descriptor *epd;
const struct usb_host_interface *iface_desc = interface->cur_altsetting;
struct wlandevice *wlandev = NULL;
struct hfa384x *hw = NULL;
int result = 0;
if (iface_desc->desc.bNumEndpoints != 2) {
result = -ENODEV;
goto failed;
}
result = -EINVAL;
epd = &iface_desc->endpoint[1].desc;
if (!usb_endpoint_is_bulk_in(epd))
goto failed;
epd = &iface_desc->endpoint[2].desc;
if (!usb_endpoint_is_bulk_out(epd))
goto failed;
dev = interface_to_usbdev(interface);
wlandev = create_wlan();
if (!wlandev) {

View File

@ -527,6 +527,7 @@ static void __init serial8250_isa_init_ports(void)
*/
up->mcr_mask = ~ALPHA_KLUDGE_MCR;
up->mcr_force = ALPHA_KLUDGE_MCR;
serial8250_set_defaults(up);
}
/* chain base port ops to support Remote Supervisor Adapter */
@ -550,7 +551,6 @@ static void __init serial8250_isa_init_ports(void)
port->membase = old_serial_port[i].iomem_base;
port->iotype = old_serial_port[i].io_type;
port->regshift = old_serial_port[i].iomem_reg_shift;
serial8250_set_defaults(up);
port->irqflags |= irqflag;
if (serial8250_isa_config != NULL)

View File

@ -227,7 +227,17 @@ static void setup_gpio(struct pci_dev *pcidev, u8 __iomem *p)
* devices will export them as GPIOs, so we pre-configure them safely
* as inputs.
*/
u8 dir = pcidev->vendor == PCI_VENDOR_ID_EXAR ? 0xff : 0x00;
u8 dir = 0x00;
if ((pcidev->vendor == PCI_VENDOR_ID_EXAR) &&
(pcidev->subsystem_vendor != PCI_VENDOR_ID_SEALEVEL)) {
// Configure GPIO as inputs for Commtech adapters
dir = 0xff;
} else {
// Configure GPIO as outputs for SeaLevel adapters
dir = 0x00;
}
writeb(0x00, p + UART_EXAR_MPIOINT_7_0);
writeb(0x00, p + UART_EXAR_MPIOLVL_7_0);

View File

@ -36,8 +36,21 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned long flags;
unsigned int baud, quot;
/*
* Store the requested baud rate before calling the generic 8250
* set_termios method. Standard 8250 port expects bauds to be
* no higher than (uartclk / 16) so the baud will be clamped if it
* gets out of that bound. Mediatek 8250 port supports speed
* higher than that, therefore we'll get original baud rate back
* after calling the generic set_termios method and recalculate
* the speed later in this method.
*/
baud = tty_termios_baud_rate(termios);
serial8250_do_set_termios(port, termios, old);
tty_termios_encode_baud_rate(termios, baud, baud);
/*
* Mediatek UARTs use an extra highspeed register (UART_MTK_HIGHS)
*
@ -76,6 +89,11 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
*/
spin_lock_irqsave(&port->lock, flags);
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
/* set DLAB we have cval saved in up->lcr from the call to the core */
serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB);
serial_dl_write(up, quot);

View File

@ -1095,10 +1095,19 @@ static const struct tty_port_operations vc_port_ops = {
.destruct = vc_port_destruct,
};
/*
* Change # of rows and columns (0 means unchanged/the size of fg_console)
* [this is to be used together with some user program
* like resize that changes the hardware videomode]
*/
#define VC_MAXCOL (32767)
#define VC_MAXROW (32767)
int vc_allocate(unsigned int currcons) /* return 0 on success */
{
struct vt_notifier_param param;
struct vc_data *vc;
int err;
WARN_CONSOLE_UNLOCKED();
@ -1128,6 +1137,11 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
if (!*vc->vc_uni_pagedir_loc)
con_set_default_unimap(vc);
err = -EINVAL;
if (vc->vc_cols > VC_MAXCOL || vc->vc_rows > VC_MAXROW ||
vc->vc_screenbuf_size > KMALLOC_MAX_SIZE || !vc->vc_screenbuf_size)
goto err_free;
err = -ENOMEM;
vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
if (!vc->vc_screenbuf)
goto err_free;
@ -1146,7 +1160,7 @@ err_free:
visual_deinit(vc);
kfree(vc);
vc_cons[currcons].d = NULL;
return -ENOMEM;
return err;
}
static inline int resize_screen(struct vc_data *vc, int width, int height,
@ -1161,14 +1175,6 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
return err;
}
/*
* Change # of rows and columns (0 means unchanged/the size of fg_console)
* [this is to be used together with some user program
* like resize that changes the hardware videomode]
*/
#define VC_RESIZE_MAXCOL (32767)
#define VC_RESIZE_MAXROW (32767)
/**
* vc_do_resize - resizing method for the tty
* @tty: tty being resized
@ -1204,7 +1210,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
user = vc->vc_resize_user;
vc->vc_resize_user = 0;
if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW)
if (cols > VC_MAXCOL || lines > VC_MAXROW)
return -EINVAL;
new_cols = (cols ? cols : vc->vc_cols);
@ -1215,7 +1221,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
return 0;
if (new_screen_size > KMALLOC_MAX_SIZE)
if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size)
return -EINVAL;
newscreen = kzalloc(new_screen_size, GFP_USER);
if (!newscreen)
@ -3371,6 +3377,7 @@ static int __init con_init(void)
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
tty_port_init(&vc->port);
visual_init(vc, currcons, 1);
/* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */
vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
vc_init(vc, vc->vc_rows, vc->vc_cols,
currcons || !vc->vc_sw->con_save_screen);

View File

@ -1980,9 +1980,12 @@ static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
if (num == 0) {
_req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
if (!_req)
return -ENOMEM;
buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
if (!_req || !buf) {
/* possible _req freed by gr_probe via gr_remove */
if (!buf) {
gr_free_request(&ep->ep, _req);
return -ENOMEM;
}

View File

@ -275,6 +275,10 @@ static bool need_bw_sch(struct usb_host_endpoint *ep,
if (is_fs_or_ls(speed) && !has_tt)
return false;
/* skip endpoint with zero maxpkt */
if (usb_endpoint_maxp(&ep->desc) == 0)
return false;
return true;
}

View File

@ -231,6 +231,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == 0x1142)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == 0x2142)
xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)

View File

@ -216,7 +216,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
region.color = color;
region.rop = ROP_COPY;
if (rw && !bottom_only) {
if ((int) rw > 0 && !bottom_only) {
region.dx = info->var.xoffset + rs;
region.dy = 0;
region.width = rw;
@ -224,7 +224,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
info->fbops->fb_fillrect(info, &region);
}
if (bh) {
if ((int) bh > 0) {
region.dx = info->var.xoffset;
region.dy = info->var.yoffset + bs;
region.width = rs;

View File

@ -201,7 +201,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
region.color = color;
region.rop = ROP_COPY;
if (rw && !bottom_only) {
if ((int) rw > 0 && !bottom_only) {
region.dx = 0;
region.dy = info->var.yoffset;
region.height = rw;
@ -209,7 +209,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
info->fbops->fb_fillrect(info, &region);
}
if (bh) {
if ((int) bh > 0) {
region.dx = info->var.xoffset + bs;
region.dy = 0;
region.height = info->var.yres_virtual;

View File

@ -184,7 +184,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
region.color = color;
region.rop = ROP_COPY;
if (rw && !bottom_only) {
if ((int) rw > 0 && !bottom_only) {
region.dx = 0;
region.dy = info->var.yoffset + rs;
region.height = rw;
@ -192,7 +192,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
info->fbops->fb_fillrect(info, &region);
}
if (bh) {
if ((int) bh > 0) {
region.dx = info->var.xoffset;
region.dy = info->var.yoffset;
region.height = info->var.yres;

View File

@ -231,7 +231,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
region.color = color;
region.rop = ROP_COPY;
if (rw && !bottom_only) {
if ((int) rw > 0 && !bottom_only) {
region.dy = 0;
region.dx = info->var.xoffset;
region.width = rw;
@ -239,7 +239,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
info->fbops->fb_fillrect(info, &region);
}
if (bh) {
if ((int) bh > 0) {
region.dy = info->var.yoffset;
region.dx = info->var.xoffset;
region.height = bh;

View File

@ -1419,6 +1419,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
if (ret < 0 && ret != -ENOENT) {
ulist_free(tmp);
ulist_free(*roots);
*roots = NULL;
return ret;
}
node = ulist_next(tmp, &uiter);

View File

@ -1707,7 +1707,8 @@ static int __process_pages_contig(struct address_space *mapping,
if (!PageDirty(pages[i]) ||
pages[i]->mapping != mapping) {
unlock_page(pages[i]);
put_page(pages[i]);
for (; i < ret; i++)
put_page(pages[i]);
err = -EAGAIN;
goto out;
}

View File

@ -6934,6 +6934,14 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
mutex_lock(&uuid_mutex);
mutex_lock(&fs_info->chunk_mutex);
/*
* It is possible for mount and umount to race in such a way that
* we execute this code path, but open_fs_devices failed to clear
* total_rw_bytes. We certainly want it cleared before reading the
* device items, so clear it here.
*/
fs_info->fs_devices->total_rw_bytes = 0;
/*
* Read all device items, and then all the chunk items. All
* device items are found before any chunk item (their object id

View File

@ -1783,7 +1783,6 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
FILE_UNIX_BASIC_INFO *info_buf_target;
unsigned int xid;
int rc, tmprc;
bool new_target = d_really_is_negative(target_dentry);
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
@ -1860,13 +1859,8 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
*/
unlink_target:
/*
* If the target dentry was created during the rename, try
* unlinking it if it's not negative
*/
if (new_target &&
d_really_is_positive(target_dentry) &&
(rc == -EACCES || rc == -EEXIST)) {
/* Try unlinking the target dentry if it's not negative */
if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) {
if (d_is_dir(target_dentry))
tmprc = cifs_rmdir(target_dir, target_dentry);
else

View File

@ -838,7 +838,8 @@ static int fuse_check_page(struct page *page)
1 << PG_uptodate |
1 << PG_lru |
1 << PG_active |
1 << PG_reclaim))) {
1 << PG_reclaim |
1 << PG_waiters))) {
printk(KERN_WARNING "fuse: trying to steal weird page\n");
printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
return 1;

View File

@ -396,6 +396,8 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
{
struct inode *inode = dreq->inode;
inode_dio_end(inode);
if (dreq->iocb) {
long res = (long) dreq->error;
if (dreq->count != 0) {
@ -407,10 +409,7 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
complete(&dreq->completion);
igrab(inode);
nfs_direct_req_release(dreq);
inode_dio_end(inode);
iput(inode);
}
static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
@ -540,10 +539,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
* generic layer handle the completion.
*/
if (requested_bytes == 0) {
igrab(inode);
nfs_direct_req_release(dreq);
inode_dio_end(inode);
iput(inode);
nfs_direct_req_release(dreq);
return result < 0 ? result : -EIO;
}
@ -960,10 +957,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
* generic layer handle the completion.
*/
if (requested_bytes == 0) {
igrab(inode);
nfs_direct_req_release(dreq);
inode_dio_end(inode);
iput(inode);
nfs_direct_req_release(dreq);
return result < 0 ? result : -EIO;
}

View File

@ -82,7 +82,6 @@ nfs_file_release(struct inode *inode, struct file *filp)
dprintk("NFS: release(%pD2)\n", filp);
nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
inode_dio_wait(inode);
nfs_file_clear_open_context(filp);
return 0;
}

View File

@ -281,7 +281,8 @@
#define PAGE_ALIGNED_DATA(page_align) \
. = ALIGN(page_align); \
*(.data..page_aligned)
*(.data..page_aligned) \
. = ALIGN(page_align);
#define READ_MOSTLY_DATA(align) \
. = ALIGN(align); \
@ -654,7 +655,9 @@
. = ALIGN(bss_align); \
.bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
BSS_FIRST_SECTIONS \
. = ALIGN(PAGE_SIZE); \
*(.bss..page_aligned) \
. = ALIGN(PAGE_SIZE); \
*(.dynbss) \
*(BSS_MAIN) \
*(COMMON) \

View File

@ -425,6 +425,7 @@ const char *dm_device_name(struct mapped_device *md);
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
void dm_remap_zone_report(struct dm_target *ti, struct bio *bio,

View File

@ -120,9 +120,12 @@ io_mapping_init_wc(struct io_mapping *iomap,
resource_size_t base,
unsigned long size)
{
iomap->iomem = ioremap_wc(base, size);
if (!iomap->iomem)
return NULL;
iomap->base = base;
iomap->size = size;
iomap->iomem = ioremap_wc(base, size);
#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
#elif defined(pgprot_writecombine)

View File

@ -15,6 +15,7 @@ struct rt5670_platform_data {
int jd_mode;
bool in2_diff;
bool dev_gpio;
bool gpio1_is_ext_spk_en;
bool dmic_en;
unsigned int dmic1_data_pin;

View File

@ -795,10 +795,11 @@
#define SW_LINEIN_INSERT 0x0d /* set = inserted */
#define SW_MUTE_DEVICE 0x0e /* set = device disabled */
#define SW_PEN_INSERTED 0x0f /* set = pen inserted */
#define SW_HPHL_OVERCURRENT 0x10 /* set = over current on left hph */
#define SW_HPHR_OVERCURRENT 0x11 /* set = over current on right hph */
#define SW_MICROPHONE2_INSERT 0x12 /* set = inserted */
#define SW_UNSUPPORT_INSERT 0x13 /* set = unsupported device inserted */
#define SW_MACHINE_COVER 0x10 /* set = cover closed */
#define SW_HPHL_OVERCURRENT 0x11 /* set = over current on left hph */
#define SW_HPHR_OVERCURRENT 0x12 /* set = over current on right hph */
#define SW_MICROPHONE2_INSERT 0x13 /* set = inserted */
#define SW_UNSUPPORT_INSERT 0x14 /* set = unsupported device inserted */
#define SW_MAX 0x20
#define SW_CNT (SW_MAX+1)

View File

@ -1897,7 +1897,7 @@ static void handle_swbp(struct pt_regs *regs)
if (!uprobe) {
if (is_swbp > 0) {
/* No matching uprobe; signal SIGTRAP. */
send_sig(SIGTRAP, current, 0);
force_sig(SIGTRAP, current);
} else {
/*
* Either we raced with uprobe_unregister() or we can't

View File

@ -5147,7 +5147,6 @@ static void __mem_cgroup_clear_mc(void)
if (!mem_cgroup_is_root(mc.to))
page_counter_uncharge(&mc.to->memory, mc.moved_swap);
mem_cgroup_id_get_many(mc.to, mc.moved_swap);
css_put_many(&mc.to->css, mc.moved_swap);
mc.moved_swap = 0;
@ -5338,7 +5337,8 @@ put: /* get_mctgt_type() gets the page */
ent = target.ent;
if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
mc.precharge--;
/* we fixup refcnts and charges later. */
mem_cgroup_id_get_many(mc.to, 1);
/* we fixup other refcnts and charges later. */
mc.moved_swap++;
}
break;

View File

@ -130,6 +130,7 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
#ifdef CONFIG_MEMCG_KMEM
LIST_HEAD(slab_root_caches);
static DEFINE_SPINLOCK(memcg_kmem_wq_lock);
void slab_init_memcg_params(struct kmem_cache *s)
{
@ -310,6 +311,14 @@ int slab_unmergeable(struct kmem_cache *s)
if (s->refcount < 0)
return 1;
#ifdef CONFIG_MEMCG_KMEM
/*
* Skip the dying kmem_cache.
*/
if (s->memcg_params.dying)
return 1;
#endif
return 0;
}
@ -717,14 +726,22 @@ void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
WARN_ON_ONCE(s->memcg_params.deact_fn))
return;
/*
* memcg_kmem_wq_lock is used to synchronize memcg_params.dying
* flag and make sure that no new kmem_cache deactivation tasks
* are queued (see flush_memcg_workqueue() ).
*/
spin_lock_irq(&memcg_kmem_wq_lock);
if (s->memcg_params.root_cache->memcg_params.dying)
return;
goto unlock;
/* pin memcg so that @s doesn't get destroyed in the middle */
css_get(&s->memcg_params.memcg->css);
s->memcg_params.deact_fn = deact_fn;
call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
unlock:
spin_unlock_irq(&memcg_kmem_wq_lock);
}
void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
@ -832,12 +849,15 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
return 0;
}
static void memcg_set_kmem_cache_dying(struct kmem_cache *s)
{
spin_lock_irq(&memcg_kmem_wq_lock);
s->memcg_params.dying = true;
spin_unlock_irq(&memcg_kmem_wq_lock);
}
static void flush_memcg_workqueue(struct kmem_cache *s)
{
mutex_lock(&slab_mutex);
s->memcg_params.dying = true;
mutex_unlock(&slab_mutex);
/*
* SLUB deactivates the kmem_caches through call_rcu_sched. Make
* sure all registered rcu callbacks have been invoked.
@ -858,10 +878,6 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s)
{
return 0;
}
static inline void flush_memcg_workqueue(struct kmem_cache *s)
{
}
#endif /* CONFIG_MEMCG_KMEM */
void slab_kmem_cache_release(struct kmem_cache *s)
@ -879,8 +895,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (unlikely(!s))
return;
flush_memcg_workqueue(s);
get_online_cpus();
get_online_mems();
@ -890,6 +904,22 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (s->refcount)
goto out_unlock;
#ifdef CONFIG_MEMCG_KMEM
memcg_set_kmem_cache_dying(s);
mutex_unlock(&slab_mutex);
put_online_mems();
put_online_cpus();
flush_memcg_workqueue(s);
get_online_cpus();
get_online_mems();
mutex_lock(&slab_mutex);
#endif
err = shutdown_memcg_caches(s);
if (!err)
err = shutdown_cache(s);

View File

@ -2230,6 +2230,7 @@ static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
{
struct ieee80211_hdr *hdr = (void *)rx->skb->data;
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@ -2240,6 +2241,31 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
if (status->flag & RX_FLAG_DECRYPTED)
return 0;
/* check mesh EAPOL frames first */
if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
ieee80211_is_data(fc))) {
struct ieee80211s_hdr *mesh_hdr;
u16 hdr_len = ieee80211_hdrlen(fc);
u16 ethertype_offset;
__be16 ethertype;
if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
goto drop_check;
/* make sure fixed part of mesh header is there, also checks skb len */
if (!pskb_may_pull(rx->skb, hdr_len + 6))
goto drop_check;
mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
sizeof(rfc1042_header);
if (skb_copy_bits(rx->skb, ethertype_offset, &ethertype, 2) == 0 &&
ethertype == rx->sdata->control_port_protocol)
return 0;
}
drop_check:
/* Drop unencrypted frames if key is set. */
if (unlikely(!ieee80211_has_protected(fc) &&
!ieee80211_is_any_nullfunc(fc) &&

View File

@ -1717,6 +1717,8 @@ static int sync_thread_backup(void *data)
{
struct ip_vs_sync_thread_data *tinfo = data;
struct netns_ipvs *ipvs = tinfo->ipvs;
struct sock *sk = tinfo->sock->sk;
struct udp_sock *up = udp_sk(sk);
int len;
pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
@ -1724,12 +1726,14 @@ static int sync_thread_backup(void *data)
ipvs->bcfg.mcast_ifn, ipvs->bcfg.syncid, tinfo->id);
while (!kthread_should_stop()) {
wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
!skb_queue_empty(&tinfo->sock->sk->sk_receive_queue)
|| kthread_should_stop());
wait_event_interruptible(*sk_sleep(sk),
!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
!skb_queue_empty_lockless(&up->reader_queue) ||
kthread_should_stop());
/* do we have data now? */
while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
while (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
!skb_queue_empty_lockless(&up->reader_queue)) {
len = ip_vs_receive(tinfo->sock, tinfo->buf,
ipvs->bcfg.sync_maxlen);
if (len <= 0) {

View File

@ -181,7 +181,7 @@ static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
}
/* We have to transmit across all bearers */
skb_queue_head_init(&_xmitq);
__skb_queue_head_init(&_xmitq);
for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
if (!bb->dests[bearer_id])
continue;
@ -237,7 +237,7 @@ static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
struct sk_buff_head xmitq;
int rc = 0;
skb_queue_head_init(&xmitq);
__skb_queue_head_init(&xmitq);
tipc_bcast_lock(net);
if (tipc_link_bc_peers(l))
rc = tipc_link_xmit(l, pkts, &xmitq);
@ -267,7 +267,7 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
u32 dnode, selector;
selector = msg_link_selector(buf_msg(skb_peek(pkts)));
skb_queue_head_init(&_pkts);
__skb_queue_head_init(&_pkts);
list_for_each_entry_safe(dst, tmp, &dests->list, list) {
dnode = dst->node;
@ -299,7 +299,7 @@ int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
int rc = 0;
skb_queue_head_init(&inputq);
skb_queue_head_init(&localq);
__skb_queue_head_init(&localq);
/* Clone packets before they are consumed by next call */
if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {

View File

@ -199,7 +199,7 @@ void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcvbuf)
struct tipc_member *m, *tmp;
struct sk_buff_head xmitq;
skb_queue_head_init(&xmitq);
__skb_queue_head_init(&xmitq);
rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, &xmitq);
tipc_group_update_member(m, 0);
@ -435,7 +435,7 @@ bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
return true;
if (state == MBR_PENDING && adv == ADV_IDLE)
return true;
skb_queue_head_init(&xmitq);
__skb_queue_head_init(&xmitq);
tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, &xmitq);
tipc_node_distr_xmit(grp->net, &xmitq);
return true;

View File

@ -928,7 +928,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
int rc = 0;
if (unlikely(msg_size(hdr) > mtu)) {
skb_queue_purge(list);
__skb_queue_purge(list);
return -EMSGSIZE;
}
@ -957,7 +957,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
if (likely(skb_queue_len(transmq) < maxwin)) {
_skb = skb_clone(skb, GFP_ATOMIC);
if (!_skb) {
skb_queue_purge(list);
__skb_queue_purge(list);
return -ENOBUFS;
}
__skb_dequeue(list);
@ -1429,7 +1429,7 @@ void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
struct sk_buff *skb;
u32 dnode = l->addr;
skb_queue_head_init(&tnlq);
__skb_queue_head_init(&tnlq);
skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
INT_H_SIZE, BASIC_H_SIZE,
dnode, onode, 0, 0, 0);
@ -1465,8 +1465,8 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
if (!tnl)
return;
skb_queue_head_init(&tnlq);
skb_queue_head_init(&tmpxq);
__skb_queue_head_init(&tnlq);
__skb_queue_head_init(&tmpxq);
/* At least one packet required for safe algorithm => add dummy */
skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
@ -1476,7 +1476,7 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
pr_warn("%sunable to create tunnel packet\n", link_co_err);
return;
}
skb_queue_tail(&tnlq, skb);
__skb_queue_tail(&tnlq, skb);
tipc_link_xmit(l, &tnlq, &tmpxq);
__skb_queue_purge(&tmpxq);

View File

@ -1368,13 +1368,14 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
int rc;
if (in_own_node(net, dnode)) {
spin_lock_init(&list->lock);
tipc_sk_rcv(net, list);
return 0;
}
n = tipc_node_find(net, dnode);
if (unlikely(!n)) {
skb_queue_purge(list);
__skb_queue_purge(list);
return -EHOSTUNREACH;
}
@ -1383,7 +1384,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
if (unlikely(bearer_id == INVALID_BEARER_ID)) {
tipc_node_read_unlock(n);
tipc_node_put(n);
skb_queue_purge(list);
__skb_queue_purge(list);
return -EHOSTUNREACH;
}
@ -1415,7 +1416,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
{
struct sk_buff_head head;
skb_queue_head_init(&head);
__skb_queue_head_init(&head);
__skb_queue_tail(&head, skb);
tipc_node_xmit(net, &head, dnode, selector);
return 0;

View File

@ -800,7 +800,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
msg_set_nameupper(hdr, seq->upper);
/* Build message as chain of buffers */
skb_queue_head_init(&pkts);
__skb_queue_head_init(&pkts);
rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
/* Send message if build was successful */
@ -841,7 +841,7 @@ static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
/* Build message as chain of buffers */
skb_queue_head_init(&pkts);
__skb_queue_head_init(&pkts);
mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
@ -1046,7 +1046,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
msg_set_grp_bc_ack_req(hdr, ack);
/* Build message as chain of buffers */
skb_queue_head_init(&pkts);
__skb_queue_head_init(&pkts);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
return rc;
@ -1372,7 +1372,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
if (unlikely(rc))
return rc;
skb_queue_head_init(&pkts);
__skb_queue_head_init(&pkts);
mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
@ -1427,7 +1427,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
int send, sent = 0;
int rc = 0;
skb_queue_head_init(&pkts);
__skb_queue_head_init(&pkts);
if (unlikely(dlen > INT_MAX))
return -EMSGSIZE;
@ -1782,7 +1782,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
/* Send group flow control advertisement when applicable */
if (tsk->group && msg_in_group(hdr) && !grp_evt) {
skb_queue_head_init(&xmitq);
__skb_queue_head_init(&xmitq);
tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
msg_orignode(hdr), msg_origport(hdr),
&xmitq);

View File

@ -77,8 +77,8 @@ parse_symbol() {
return
fi
# Strip out the base of the path
code=${code#$basepath/}
# Strip out the base of the path on each line
code=$(while read -r line; do echo "${line#$basepath/}"; done <<< "$code")
# In the case of inlines, move everything to same line
code=${code//$'\n'/' '}

View File

@ -96,7 +96,7 @@ lx-symbols command."""
return ""
attrs = sect_attrs['attrs']
section_name_to_address = {
attrs[n]['name'].string(): attrs[n]['address']
attrs[n]['battr']['attr']['name'].string(): attrs[n]['address']
for n in range(int(sect_attrs['nsections']))}
args = []
for section_name in [".data", ".data..read_mostly", ".rodata", ".bss",

Some files were not shown because too many files have changed in this diff Show More