This is the 4.19.116 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl6ZbYYACgkQONu9yGCS
 aT76ohAAn4lIjSuMRCILy/lq0DXVWDy7q6YdfyzNBITxc86tVfnfjMeQxUBviE/1
 OzShWgMRXeqrb0xJTJ5Rv6mt5Kf9a3DpPWt2jwo1iqWkl4AihDtDV7Z2Bh+QdnSX
 +lQ1xGPqDi4MMgoYlpMtlFc3wq/pJV0i8Q7amXC/KbsDkt5dlDrQYeEZHe2P7pR9
 ZljKLHEdGRE3uGqXmEM8qb6aLjQudnHmH/9uChP4UX6b+ZADDCc05DMhEkhEoCZT
 jdxiqVZvRdiiXTc1r6ckGv0xae77s0IAAZMQAd+24zFK94QByi6d9Cw0y6qyyDi7
 1rfHIWSjvetY3+4DCQDOu/k2/pLt/Vqh9zuvtaf8Tu8cKM9rxow0Hl9FlL3fZpBN
 btpqeCY6twFxApHoAp9ZDK6otaVEOtbg1MCsmpUbVxWIF9IR8cPqMGyYK3lR2Ao1
 HgdKEFkYOycAOu51ujuHsDLx/9k2ZqeSPyh0yrdVpFUVvMV/YqoYP9X3jzGRVllL
 hgYfFcywgrVgxK4c02/6cPiJNbFskTpLllDPVVXGIjO+9R4vTRUgJ74CNrqL25aT
 ioSFWJA00UvXObnbCDdA+otYYWAmYOJX7HVvEieb0oDqPYHZHa1UW6+1WlYSAQLm
 WAsHiejOv6PwzRmCDI6RyuZKQjjX6bppAWFq0/RLPO0uEqjXlxc=
 =Iq3k
 -----END PGP SIGNATURE-----

Merge 4.19.116 into android-4.19

Changes in 4.19.116
	ARM: dts: sun8i-a83t-tbs-a711: HM5065 doesn't like such a high voltage
	bus: sunxi-rsb: Return correct data when mixing 16-bit and 8-bit reads
	net: vxge: fix wrong __VA_ARGS__ usage
	hinic: fix a bug of waitting for IO stopped
	hinic: fix wrong para of wait_for_completion_timeout
	cxgb4/ptp: pass the sign of offset delta in FW CMD
	qlcnic: Fix bad kzalloc null test
	i2c: st: fix missing struct parameter description
	cpufreq: imx6q: Fixes unwanted cpu overclocking on i.MX6ULL
	media: venus: hfi_parser: Ignore HEVC encoding for V1
	firmware: arm_sdei: fix double-lock on hibernate with shared events
	null_blk: Fix the null_add_dev() error path
	null_blk: Handle null_add_dev() failures properly
	null_blk: fix spurious IO errors after failed past-wp access
	xhci: bail out early if driver can't accress host in resume
	x86: Don't let pgprot_modify() change the page encryption bit
	block: keep bdi->io_pages in sync with max_sectors_kb for stacked devices
	irqchip/versatile-fpga: Handle chained IRQs properly
	sched: Avoid scale real weight down to zero
	selftests/x86/ptrace_syscall_32: Fix no-vDSO segfault
	PCI/switchtec: Fix init_completion race condition with poll_wait()
	media: i2c: video-i2c: fix build errors due to 'imply hwmon'
	libata: Remove extra scsi_host_put() in ata_scsi_add_hosts()
	pstore/platform: fix potential mem leak if pstore_init_fs failed
	gfs2: Don't demote a glock until its revokes are written
	x86/boot: Use unsigned comparison for addresses
	efi/x86: Ignore the memory attributes table on i386
	genirq/irqdomain: Check pointer in irq_domain_alloc_irqs_hierarchy()
	block: Fix use-after-free issue accessing struct io_cq
	media: i2c: ov5695: Fix power on and off sequences
	usb: dwc3: core: add support for disabling SS instances in park mode
	irqchip/gic-v4: Provide irq_retrigger to avoid circular locking dependency
	md: check arrays is suspended in mddev_detach before call quiesce operations
	firmware: fix a double abort case with fw_load_sysfs_fallback
	locking/lockdep: Avoid recursion in lockdep_count_{for,back}ward_deps()
	block, bfq: fix use-after-free in bfq_idle_slice_timer_body
	btrfs: qgroup: ensure qgroup_rescan_running is only set when the worker is at least queued
	btrfs: remove a BUG_ON() from merge_reloc_roots()
	btrfs: track reloc roots based on their commit root bytenr
	IB/mlx5: Replace tunnel mpls capability bits for tunnel_offloads
	uapi: rename ext2_swab() to swab() and share globally in swab.h
	slub: improve bit diffusion for freelist ptr obfuscation
	ASoC: fix regwmask
	ASoC: dapm: connect virtual mux with default value
	ASoC: dpcm: allow start or stop during pause for backend
	ASoC: topology: use name_prefix for new kcontrol
	usb: gadget: f_fs: Fix use after free issue as part of queue failure
	usb: gadget: composite: Inform controller driver of self-powered
	ALSA: usb-audio: Add mixer workaround for TRX40 and co
	ALSA: hda: Add driver blacklist
	ALSA: hda: Fix potential access overflow in beep helper
	ALSA: ice1724: Fix invalid access for enumerated ctl items
	ALSA: pcm: oss: Fix regression by buffer overflow fix
	ALSA: doc: Document PC Beep Hidden Register on Realtek ALC256
	ALSA: hda/realtek - Set principled PC Beep configuration for ALC256
	ALSA: hda/realtek - Remove now-unnecessary XPS 13 headphone noise fixups
	ALSA: hda/realtek - Add quirk for MSI GL63
	media: ti-vpe: cal: fix disable_irqs to only the intended target
	acpi/x86: ignore unspecified bit positions in the ACPI global lock field
	thermal: devfreq_cooling: inline all stubs for CONFIG_DEVFREQ_THERMAL=n
	nvme-fc: Revert "add module to ops template to allow module references"
	nvme: Treat discovery subsystems as unique subsystems
	PCI: pciehp: Fix indefinite wait on sysfs requests
	PCI/ASPM: Clear the correct bits when enabling L1 substates
	PCI: Add boot interrupt quirk mechanism for Xeon chipsets
	PCI: endpoint: Fix for concurrent memory allocation in OB address region
	tpm: Don't make log failures fatal
	tpm: tpm1_bios_measurements_next should increase position index
	tpm: tpm2_bios_measurements_next should increase position index
	KEYS: reaching the keys quotas correctly
	irqchip/versatile-fpga: Apply clear-mask earlier
	pstore: pstore_ftrace_seq_next should increase position index
	MIPS/tlbex: Fix LDDIR usage in setup_pw() for Loongson-3
	MIPS: OCTEON: irq: Fix potential NULL pointer dereference
	ath9k: Handle txpower changes even when TPC is disabled
	signal: Extend exec_id to 64bits
	x86/entry/32: Add missing ASM_CLAC to general_protection entry
	KVM: nVMX: Properly handle userspace interrupt window request
	KVM: s390: vsie: Fix region 1 ASCE sanity shadow address checks
	KVM: s390: vsie: Fix delivery of addressing exceptions
	KVM: x86: Allocate new rmap and large page tracking when moving memslot
	KVM: VMX: Always VMCLEAR in-use VMCSes during crash with kexec support
	KVM: x86: Gracefully handle __vmalloc() failure during VM allocation
	KVM: VMX: fix crash cleanup when KVM wasn't used
	CIFS: Fix bug which the return value by asynchronous read is error
	mtd: spinand: Stop using spinand->oobbuf for buffering bad block markers
	mtd: spinand: Do not erase the block before writing a bad block marker
	Btrfs: fix crash during unmount due to race with delayed inode workers
	btrfs: set update the uuid generation as soon as possible
	btrfs: drop block from cache on error in relocation
	btrfs: fix missing file extent item for hole after ranged fsync
	btrfs: fix missing semaphore unlock in btrfs_sync_file
	crypto: mxs-dcp - fix scatterlist linearization for hash
	erofs: correct the remaining shrink objects
	powerpc/pseries: Drop pointless static qualifier in vpa_debugfs_init()
	x86/speculation: Remove redundant arch_smt_update() invocation
	tools: gpio: Fix out-of-tree build regression
	mm: Use fixed constant in page_frag_alloc instead of size + 1
	net: qualcomm: rmnet: Allow configuration updates to existing devices
	arm64: dts: allwinner: h6: Fix PMU compatible
	dm writecache: add cond_resched to avoid CPU hangs
	dm verity fec: fix memory leak in verity_fec_dtr
	scsi: zfcp: fix missing erp_lock in port recovery trigger for point-to-point
	arm64: armv8_deprecated: Fix undef_hook mask for thumb setend
	selftests: vm: drop dependencies on page flags from mlock2 tests
	rtc: omap: Use define directive for PIN_CONFIG_ACTIVE_HIGH
	drm/etnaviv: rework perfmon query infrastructure
	powerpc/pseries: Avoid NULL pointer dereference when drmem is unavailable
	NFS: Fix a page leak in nfs_destroy_unlinked_subrequests()
	ext4: fix a data race at inode->i_blocks
	fs/filesystems.c: downgrade user-reachable WARN_ONCE() to pr_warn_once()
	ocfs2: no need try to truncate file beyond i_size
	perf tools: Support Python 3.8+ in Makefile
	s390/diag: fix display of diagnose call statistics
	Input: i8042 - add Acer Aspire 5738z to nomux list
	clk: ingenic/jz4770: Exit with error if CGU init failed
	kmod: make request_module() return an error when autoloading is disabled
	cpufreq: powernv: Fix use-after-free
	hfsplus: fix crash and filesystem corruption when deleting files
	libata: Return correct status in sata_pmp_eh_recover_pm() when ATA_DFLAG_DETACH is set
	ipmi: fix hung processes in __get_guid()
	xen/blkfront: fix memory allocation flags in blkfront_setup_indirect()
	powerpc/powernv/idle: Restore AMR/UAMOR/AMOR after idle
	powerpc/64/tm: Don't let userspace set regs->trap via sigreturn
	powerpc/hash64/devmap: Use H_PAGE_THP_HUGE when setting up huge devmap PTE entries
	powerpc/xive: Use XIVE_BAD_IRQ instead of zero to catch non configured IPIs
	powerpc/kprobes: Ignore traps that happened in real mode
	scsi: mpt3sas: Fix kernel panic observed on soft HBA unplug
	powerpc: Add attributes for setjmp/longjmp
	powerpc: Make setjmp/longjmp signature standard
	btrfs: use nofs allocations for running delayed items
	dm zoned: remove duplicate nr_rnd_zones increase in dmz_init_zone()
	crypto: caam - update xts sector size for large input length
	crypto: ccree - improve error handling
	crypto: ccree - zero out internal struct before use
	crypto: ccree - don't mangle the request assoclen
	crypto: ccree - dec auth tag size from cryptlen map
	crypto: ccree - only try to map auth tag if needed
	Revert "drm/dp_mst: Remove VCPI while disabling topology mgr"
	drm/dp_mst: Fix clearing payload state on topology disable
	drm: Remove PageReserved manipulation from drm_pci_alloc
	ftrace/kprobe: Show the maxactive number on kprobe_events
	powerpc/fsl_booke: Avoid creating duplicate tlb1 entry
	misc: echo: Remove unnecessary parentheses and simplify check for zero
	etnaviv: perfmon: fix total and idle HI cyleces readout
	mfd: dln2: Fix sanity checking for endpoints
	efi/x86: Fix the deletion of variables in mixed mode
	Linux 4.19.116

Change-Id: If09fbb53fcb11ea01eaaa7fee7ed21ed6234f352
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2020-04-18 13:33:51 +02:00
commit 95bff4cdab
163 changed files with 1218 additions and 831 deletions

View File

@ -8,3 +8,4 @@ HD-Audio
models
controls
dp-mst
realtek-pc-beep

View File

@ -216,8 +216,6 @@ alc298-dell-aio
ALC298 fixups on Dell AIO machines
alc275-dell-xps
ALC275 fixups on Dell XPS models
alc256-dell-xps13
ALC256 fixups on Dell XPS13
lenovo-spk-noise
Workaround for speaker noise on Lenovo machines
lenovo-hotkey

View File

@ -0,0 +1,129 @@
===============================
Realtek PC Beep Hidden Register
===============================
This file documents the "PC Beep Hidden Register", which is present in certain
Realtek HDA codecs and controls a muxer and pair of passthrough mixers that can
route audio between pins but aren't themselves exposed as HDA widgets. As far
as I can tell, these hidden routes are designed to allow flexible PC Beep output
for codecs that don't have mixer widgets in their output paths. Why it's easier
to hide a mixer behind an undocumented vendor register than to just expose it
as a widget, I have no idea.
Register Description
====================
The register is accessed via processing coefficient 0x36 on NID 20h. Bits not
identified below have no discernible effect on my machine, a Dell XPS 13 9350::
MSB LSB
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |h|S|L| | B |R| | Known bits
+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|0|0|1|1| 0x7 |0|0x0|1| 0x7 | Reset value
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1Ah input select (B): 2 bits
When zero, expose the PC Beep line (from the internal beep generator, when
enabled with the Set Beep Generation verb on NID 01h, or else from the
external PCBEEP pin) on the 1Ah pin node. When nonzero, expose the headphone
jack (or possibly Line In on some machines) input instead. If PC Beep is
selected, the 1Ah boost control has no effect.
Amplify 1Ah loopback, left (L): 1 bit
Amplify the left channel of 1Ah before mixing it into outputs as specified
by h and S bits. Does not affect the level of 1Ah exposed to other widgets.
Amplify 1Ah loopback, right (R): 1 bit
Amplify the right channel of 1Ah before mixing it into outputs as specified
by h and S bits. Does not affect the level of 1Ah exposed to other widgets.
Loopback 1Ah to 21h [active low] (h): 1 bit
When zero, mix 1Ah (possibly with amplification, depending on L and R bits)
into 21h (headphone jack on my machine). Mixed signal respects the mute
setting on 21h.
Loopback 1Ah to 14h (S): 1 bit
When one, mix 1Ah (possibly with amplification, depending on L and R bits)
into 14h (internal speaker on my machine). Mixed signal **ignores** the mute
setting on 14h and is present whenever 14h is configured as an output.
Path diagrams
=============
1Ah input selection (DIV is the PC Beep divider set on NID 01h)::
<Beep generator> <PCBEEP pin> <Headphone jack>
| | |
+--DIV--+--!DIV--+ {1Ah boost control}
| |
+--(b == 0)--+--(b != 0)--+
|
>1Ah (Beep/Headphone Mic/Line In)<
Loopback of 1Ah to 21h/14h::
<1Ah (Beep/Headphone Mic/Line In)>
|
{amplify if L/R}
|
+-----!h-----+-----S-----+
| |
{21h mute control} |
| |
>21h (Headphone)< >14h (Internal Speaker)<
Background
==========
All Realtek HDA codecs have a vendor-defined widget with node ID 20h which
provides access to a bank of registers that control various codec functions.
Registers are read and written via the standard HDA processing coefficient
verbs (Set/Get Coefficient Index, Set/Get Processing Coefficient). The node is
named "Realtek Vendor Registers" in public datasheets' verb listings and,
apart from that, is entirely undocumented.
This particular register, exposed at coefficient 0x36 and named in commits from
Realtek, is of note: unlike most registers, which seem to control detailed
amplifier parameters not in scope of the HDA specification, it controls audio
routing which could just as easily have been defined using standard HDA mixer
and selector widgets.
Specifically, it selects between two sources for the input pin widget with Node
ID (NID) 1Ah: the widget's signal can come either from an audio jack (on my
laptop, a Dell XPS 13 9350, it's the headphone jack, but comments in Realtek
commits indicate that it might be a Line In on some machines) or from the PC
Beep line (which is itself multiplexed between the codec's internal beep
generator and external PCBEEP pin, depending on if the beep generator is
enabled via verbs on NID 01h). Additionally, it can mix (with optional
amplification) that signal onto the 21h and/or 14h output pins.
The register's reset value is 0x3717, corresponding to PC Beep on 1Ah that is
then amplified and mixed into both the headphones and the speakers. Not only
does this violate the HDA specification, which says that "[a vendor defined
beep input pin] connection may be maintained *only* while the Link reset
(**RST#**) is asserted", it means that we cannot ignore the register if we care
about the input that 1Ah would otherwise expose or if the PCBEEP trace is
poorly shielded and picks up chassis noise (both of which are the case on my
machine).
Unfortunately, there are lots of ways to get this register configuration wrong.
Linux, it seems, has gone through most of them. For one, the register resets
after S3 suspend: judging by existing code, this isn't the case for all vendor
registers, and it's led to some fixes that improve behavior on cold boot but
don't last after suspend. Other fixes have successfully switched the 1Ah input
away from PC Beep but have failed to disable both loopback paths. On my
machine, this means that the headphone input is amplified and looped back to
the headphone output, which uses the exact same pins! As you might expect, this
causes terrible headphone noise, the character of which is controlled by the
1Ah boost control. (If you've seen instructions online to fix XPS 13 headphone
noise by changing "Headphone Mic Boost" in ALSA, now you know why.)
The information here has been obtained through black-box reverse engineering of
the ALC256 codec's behavior and is not guaranteed to be correct. It likely
also applies for the ALC255, ALC257, ALC235, and ALC236, since those codecs
seem to be close relatives of the ALC256. (They all share one initialization
function.) Additionally, other codecs like the ALC225 and ALC285 also have this
register, judging by existing fixups in ``patch_realtek.c``, but specific
data (e.g. node IDs, bit positions, pin mappings) for those codecs may differ
from what I've described here.

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 115
SUBLEVEL = 116
EXTRAVERSION =
NAME = "People's Front"

View File

@ -318,8 +318,8 @@
};
&reg_dldo3 {
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-name = "vdd-csi";
};

View File

@ -70,8 +70,7 @@
};
pmu {
compatible = "arm,cortex-a53-pmu",
"arm,armv8-pmuv3";
compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,

View File

@ -604,7 +604,7 @@ static struct undef_hook setend_hooks[] = {
},
{
/* Thumb mode */
.instr_mask = 0x0000fff7,
.instr_mask = 0xfffffff7,
.instr_val = 0x0000b650,
.pstate_mask = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK),
.pstate_val = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR),

View File

@ -2199,6 +2199,9 @@ static int octeon_irq_cib_map(struct irq_domain *d,
}
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
if (!cd)
return -ENOMEM;
cd->host_data = host_data;
cd->bit = hw;

View File

@ -1479,6 +1479,7 @@ static void build_r4000_tlb_refill_handler(void)
static void setup_pw(void)
{
unsigned int pwctl;
unsigned long pgd_i, pgd_w;
#ifndef __PAGETABLE_PMD_FOLDED
unsigned long pmd_i, pmd_w;
@ -1505,6 +1506,7 @@ static void setup_pw(void)
pte_i = ilog2(_PAGE_GLOBAL);
pte_w = 0;
pwctl = 1 << 30; /* Set PWDirExt */
#ifndef __PAGETABLE_PMD_FOLDED
write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i);
@ -1515,8 +1517,9 @@ static void setup_pw(void)
#endif
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
write_c0_pwctl(1 << 6 | psn);
pwctl |= (1 << 6 | psn);
#endif
write_c0_pwctl(pwctl);
write_c0_kpgd((long)swapper_pg_dir);
kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
}

View File

@ -145,6 +145,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
extern int hash__has_transparent_hugepage(void);
#endif
static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
{
BUG();
return pmd;
}
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */

View File

@ -233,7 +233,7 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
*/
static inline int hash__pmd_trans_huge(pmd_t pmd)
{
return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) ==
return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) ==
(_PAGE_PTE | H_PAGE_THP_HUGE));
}
@ -259,6 +259,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp);
extern int hash__has_transparent_hugepage(void);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
{
return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP));
}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */

View File

@ -1253,7 +1253,9 @@ extern void serialize_against_pte_lookup(struct mm_struct *mm);
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
if (radix_enabled())
return radix__pmd_mkdevmap(pmd);
return hash__pmd_mkdevmap(pmd);
}
static inline int pmd_devmap(pmd_t pmd)

View File

@ -255,6 +255,11 @@ extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
extern int radix__has_transparent_hugepage(void);
#endif
static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd)
{
return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
}
extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys);

View File

@ -28,12 +28,12 @@ struct drmem_lmb_info {
extern struct drmem_lmb_info *drmem_info;
#define for_each_drmem_lmb_in_range(lmb, start, end) \
for ((lmb) = (start); (lmb) <= (end); (lmb)++)
for ((lmb) = (start); (lmb) < (end); (lmb)++)
#define for_each_drmem_lmb(lmb) \
for_each_drmem_lmb_in_range((lmb), \
&drmem_info->lmbs[0], \
&drmem_info->lmbs[drmem_info->n_lmbs - 1])
&drmem_info->lmbs[drmem_info->n_lmbs])
/*
* The of_drconf_cell_v1 struct defines the layout of the LMB data

View File

@ -12,7 +12,9 @@
#define JMP_BUF_LEN 23
extern long setjmp(long *);
extern void longjmp(long *, long);
typedef long jmp_buf[JMP_BUF_LEN];
extern int setjmp(jmp_buf env) __attribute__((returns_twice));
extern void longjmp(jmp_buf env, int val) __attribute__((noreturn));
#endif /* _ASM_POWERPC_SETJMP_H */

View File

@ -5,9 +5,6 @@
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
# Avoid clang warnings around longjmp/setjmp declarations
CFLAGS_crash.o += -ffreestanding
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ifdef CONFIG_PPC64

View File

@ -170,8 +170,11 @@ core_idle_lock_held:
bne- core_idle_lock_held
blr
/* Reuse an unused pt_regs slot for IAMR */
/* Reuse some unused pt_regs slots for AMR/IAMR/UAMOR/UAMOR */
#define PNV_POWERSAVE_AMR _TRAP
#define PNV_POWERSAVE_IAMR _DAR
#define PNV_POWERSAVE_UAMOR _DSISR
#define PNV_POWERSAVE_AMOR RESULT
/*
* Pass requested state in r3:
@ -205,8 +208,16 @@ pnv_powersave_common:
SAVE_NVGPRS(r1)
BEGIN_FTR_SECTION
mfspr r4, SPRN_AMR
mfspr r5, SPRN_IAMR
mfspr r6, SPRN_UAMOR
std r4, PNV_POWERSAVE_AMR(r1)
std r5, PNV_POWERSAVE_IAMR(r1)
std r6, PNV_POWERSAVE_UAMOR(r1)
BEGIN_FTR_SECTION_NESTED(42)
mfspr r7, SPRN_AMOR
std r7, PNV_POWERSAVE_AMOR(r1)
END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfcr r5
@ -935,12 +946,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
REST_GPR(2, r1)
BEGIN_FTR_SECTION
/* IAMR was saved in pnv_powersave_common() */
/* These regs were saved in pnv_powersave_common() */
ld r4, PNV_POWERSAVE_AMR(r1)
ld r5, PNV_POWERSAVE_IAMR(r1)
ld r6, PNV_POWERSAVE_UAMOR(r1)
mtspr SPRN_AMR, r4
mtspr SPRN_IAMR, r5
mtspr SPRN_UAMOR, r6
BEGIN_FTR_SECTION_NESTED(42)
ld r7, PNV_POWERSAVE_AMOR(r1)
mtspr SPRN_AMOR, r7
END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
/*
* We don't need an isync here because the upcoming mtmsrd is
* execution synchronizing.
* We don't need an isync here after restoring IAMR because the upcoming
* mtmsrd is execution synchronizing.
*/
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)

View File

@ -277,6 +277,9 @@ int kprobe_handler(struct pt_regs *regs)
if (user_mode(regs))
return 0;
if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
return 0;
/*
* We don't want to be preempted for the entire
* duration of kprobe processing

View File

@ -477,8 +477,10 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
err |= __get_user(tsk->thread.ckpt_regs.ccr,
&sc->gp_regs[PT_CCR]);
/* Don't allow userspace to set the trap value */
regs->trap = 0;
/* These regs are not checkpointed; they can go in 'regs'. */
err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);

View File

@ -402,7 +402,7 @@ _GLOBAL(set_context)
* extern void loadcam_entry(unsigned int index)
*
* Load TLBCAM[index] entry in to the L2 CAM MMU
* Must preserve r7, r8, r9, and r10
* Must preserve r7, r8, r9, r10 and r11
*/
_GLOBAL(loadcam_entry)
mflr r5
@ -438,6 +438,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
*/
_GLOBAL(loadcam_multi)
mflr r8
/* Don't switch to AS=1 if already there */
mfmsr r11
andi. r11,r11,MSR_IS
bne 10f
/*
* Set up temporary TLB entry that is the same as what we're
@ -463,6 +467,7 @@ _GLOBAL(loadcam_multi)
mtmsr r6
isync
10:
mr r9,r3
add r10,r3,r4
2: bl loadcam_entry
@ -471,6 +476,10 @@ _GLOBAL(loadcam_multi)
mr r3,r9
blt 2b
/* Don't return to AS=0 if we were in AS=1 at function start */
andi. r11,r11,MSR_IS
bne 3f
/* Return to AS=0 and clear the temporary entry */
mfmsr r6
rlwinm. r6,r6,0,~(MSR_IS|MSR_DS)
@ -486,6 +495,7 @@ _GLOBAL(loadcam_multi)
tlbwe
isync
3:
mtlr r8
blr
#endif

View File

@ -227,7 +227,7 @@ static int get_lmb_range(u32 drc_index, int n_lmbs,
struct drmem_lmb **end_lmb)
{
struct drmem_lmb *lmb, *start, *end;
struct drmem_lmb *last_lmb;
struct drmem_lmb *limit;
start = NULL;
for_each_drmem_lmb(lmb) {
@ -240,10 +240,10 @@ static int get_lmb_range(u32 drc_index, int n_lmbs,
if (!start)
return -EINVAL;
end = &start[n_lmbs - 1];
end = &start[n_lmbs];
last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
if (end > last_lmb)
limit = &drmem_info->lmbs[drmem_info->n_lmbs];
if (end > limit)
return -EINVAL;
*start_lmb = start;

View File

@ -1056,7 +1056,7 @@ static int __init vpa_debugfs_init(void)
{
char name[16];
long i;
static struct dentry *vpa_dir;
struct dentry *vpa_dir;
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return 0;

View File

@ -72,13 +72,6 @@ static u32 xive_ipi_irq;
/* Xive state for each CPU */
static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
/*
* A "disabled" interrupt should never fire, to catch problems
* we set its logical number to this
*/
#define XIVE_BAD_IRQ 0x7fffffff
#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
/* An invalid CPU target */
#define XIVE_INVALID_TARGET (-1)
@ -1074,7 +1067,7 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
xc = per_cpu(xive_cpu, cpu);
/* Check if we are already setup */
if (xc->hw_ipi != 0)
if (xc->hw_ipi != XIVE_BAD_IRQ)
return 0;
/* Grab an IPI from the backend, this will populate xc->hw_ipi */
@ -1111,7 +1104,7 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
/* Disable the IPI and free the IRQ data */
/* Already cleaned up ? */
if (xc->hw_ipi == 0)
if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
/* Mask the IPI */
@ -1267,6 +1260,7 @@ static int xive_prepare_cpu(unsigned int cpu)
if (np)
xc->chip_id = of_get_ibm_chip_id(np);
of_node_put(np);
xc->hw_ipi = XIVE_BAD_IRQ;
per_cpu(xive_cpu, cpu) = xc;
}

View File

@ -311,7 +311,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
s64 rc;
/* Free the IPI */
if (!xc->hw_ipi)
if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
for (;;) {
rc = opal_xive_free_irq(xc->hw_ipi);
@ -319,7 +319,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
msleep(OPAL_BUSY_DELAY_MS);
continue;
}
xc->hw_ipi = 0;
xc->hw_ipi = XIVE_BAD_IRQ;
break;
}
}

View File

@ -509,11 +509,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
{
if (!xc->hw_ipi)
if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
xive_irq_bitmap_free(xc->hw_ipi);
xc->hw_ipi = 0;
xc->hw_ipi = XIVE_BAD_IRQ;
}
#endif /* CONFIG_SMP */

View File

@ -9,6 +9,13 @@
#ifndef __XIVE_INTERNAL_H
#define __XIVE_INTERNAL_H
/*
* A "disabled" interrupt should never fire, to catch problems
* we set its logical number to this
*/
#define XIVE_BAD_IRQ 0x7fffffff
#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
/* Each CPU carry one of these with various per-CPU state */
struct xive_cpu {
#ifdef CONFIG_SMP

View File

@ -1,9 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for xmon
# Avoid clang warnings around longjmp/setjmp declarations
subdir-ccflags-y := -ffreestanding
subdir-ccflags-$(CONFIG_PPC_WERROR) += -Werror
GCOV_PROFILE := n

View File

@ -79,7 +79,7 @@ static int show_diag_stat(struct seq_file *m, void *v)
static void *show_diag_stat_start(struct seq_file *m, loff_t *pos)
{
return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
return *pos <= NR_DIAG_STAT ? (void *)((unsigned long) *pos + 1) : NULL;
}
static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos)

View File

@ -1024,6 +1024,7 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->iprcc = PGM_ADDRESSING;
scb_s->pgmilc = 4;
scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
rc = 1;
}
return rc;
}

View File

@ -787,14 +787,18 @@ static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
static inline unsigned long *gmap_table_walk(struct gmap *gmap,
unsigned long gaddr, int level)
{
const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
unsigned long *table;
if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
return NULL;
if (gmap_is_shadow(gmap) && gmap->removed)
return NULL;
if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
if (asce_type != _ASCE_TYPE_REGION1 &&
gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
return NULL;
table = gmap->table;
switch (gmap->asce & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1:

View File

@ -106,7 +106,7 @@ ENTRY(startup_32)
notl %eax
andl %eax, %ebx
cmpl $LOAD_PHYSICAL_ADDR, %ebx
jge 1f
jae 1f
#endif
movl $LOAD_PHYSICAL_ADDR, %ebx
1:

View File

@ -106,7 +106,7 @@ ENTRY(startup_32)
notl %eax
andl %eax, %ebx
cmpl $LOAD_PHYSICAL_ADDR, %ebx
jge 1f
jae 1f
#endif
movl $LOAD_PHYSICAL_ADDR, %ebx
1:
@ -297,7 +297,7 @@ ENTRY(startup_64)
notq %rax
andq %rax, %rbp
cmpq $LOAD_PHYSICAL_ADDR, %rbp
jge 1f
jae 1f
#endif
movq $LOAD_PHYSICAL_ADDR, %rbp
1:

View File

@ -1489,6 +1489,7 @@ ENTRY(int3)
END(int3)
ENTRY(general_protection)
ASM_CLAC
pushl $do_general_protection
jmp common_exception
END(general_protection)

View File

@ -1070,7 +1070,7 @@ struct kvm_x86_ops {
bool (*xsaves_supported)(void);
bool (*umip_emulated)(void);
int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
int (*check_nested_events)(struct kvm_vcpu *vcpu);
void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
void (*sched_in)(struct kvm_vcpu *kvm, int cpu);

View File

@ -608,12 +608,15 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
return __pmd(val);
}
/* mprotect needs to preserve PAT bits when updating vm_page_prot */
/*
* mprotect needs to preserve PAT and encryption bits when updating
* vm_page_prot
*/
#define pgprot_modify pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
pgprotval_t addbits = pgprot_val(newprot);
pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
return __pgprot(preservebits | addbits);
}

View File

@ -124,7 +124,7 @@
*/
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
/*

View File

@ -1752,7 +1752,7 @@ int __acpi_acquire_global_lock(unsigned int *lock)
new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
val = cmpxchg(lock, old, new);
} while (unlikely (val != old));
return (new < 3) ? -1 : 0;
return ((new & 0x3) < 3) ? -1 : 0;
}
int __acpi_release_global_lock(unsigned int *lock)

View File

@ -1917,6 +1917,10 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
static struct kvm *svm_vm_alloc(void)
{
struct kvm_svm *kvm_svm = vzalloc(sizeof(struct kvm_svm));
if (!kvm_svm)
return NULL;
return &kvm_svm->kvm;
}

View File

@ -2156,43 +2156,15 @@ static void vmcs_load(struct vmcs *vmcs)
}
#ifdef CONFIG_KEXEC_CORE
/*
* This bitmap is used to indicate whether the vmclear
* operation is enabled on all cpus. All disabled by
* default.
*/
static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
static inline void crash_enable_local_vmclear(int cpu)
{
cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
}
static inline void crash_disable_local_vmclear(int cpu)
{
cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
}
static inline int crash_local_vmclear_enabled(int cpu)
{
return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
}
static void crash_vmclear_local_loaded_vmcss(void)
{
int cpu = raw_smp_processor_id();
struct loaded_vmcs *v;
if (!crash_local_vmclear_enabled(cpu))
return;
list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
loaded_vmcss_on_cpu_link)
vmcs_clear(v->vmcs);
}
#else
static inline void crash_enable_local_vmclear(int cpu) { }
static inline void crash_disable_local_vmclear(int cpu) { }
#endif /* CONFIG_KEXEC_CORE */
static void __loaded_vmcs_clear(void *arg)
@ -2204,19 +2176,24 @@ static void __loaded_vmcs_clear(void *arg)
return; /* vcpu migration can race with cpu offline */
if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
crash_disable_local_vmclear(cpu);
vmcs_clear(loaded_vmcs->vmcs);
if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
vmcs_clear(loaded_vmcs->shadow_vmcs);
list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
/*
* we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
* is before setting loaded_vmcs->vcpu to -1 which is done in
* loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
* then adds the vmcs into percpu list before it is deleted.
* Ensure all writes to loaded_vmcs, including deleting it from its
* current percpu list, complete before setting loaded_vmcs->vcpu to
* -1, otherwise a different cpu can see vcpu == -1 first and add
* loaded_vmcs to its percpu list before it's deleted from this cpu's
* list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
*/
smp_wmb();
loaded_vmcs_init(loaded_vmcs);
crash_enable_local_vmclear(cpu);
loaded_vmcs->cpu = -1;
loaded_vmcs->launched = 0;
}
static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
@ -3067,18 +3044,17 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (!already_loaded) {
loaded_vmcs_clear(vmx->loaded_vmcs);
local_irq_disable();
crash_disable_local_vmclear(cpu);
/*
* Read loaded_vmcs->cpu should be before fetching
* loaded_vmcs->loaded_vmcss_on_cpu_link.
* See the comments in __loaded_vmcs_clear().
* Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to
* this cpu's percpu list, otherwise it may not yet be deleted
* from its previous cpu's percpu list. Pairs with the
* smb_wmb() in __loaded_vmcs_clear().
*/
smp_rmb();
list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
&per_cpu(loaded_vmcss_on_cpu, cpu));
crash_enable_local_vmclear(cpu);
local_irq_enable();
}
@ -4422,21 +4398,6 @@ static int hardware_enable(void)
!hv_get_vp_assist_page(cpu))
return -EFAULT;
INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
/*
* Now we can enable the vmclear operation in kdump
* since the loaded_vmcss_on_cpu list on this cpu
* has been initialized.
*
* Though the cpu is not in VMX operation now, there
* is no problem to enable the vmclear operation
* for the loaded_vmcss_on_cpu list is empty!
*/
crash_enable_local_vmclear(cpu);
rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
test_bits = FEATURE_CONTROL_LOCKED;
@ -6954,8 +6915,13 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
{
return (!to_vmx(vcpu)->nested.nested_run_pending &&
vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
if (to_vmx(vcpu)->nested.nested_run_pending)
return false;
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
return true;
return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
}
@ -11016,6 +10982,10 @@ STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
static struct kvm *vmx_vm_alloc(void)
{
struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
if (!kvm_vmx)
return NULL;
return &kvm_vmx->kvm;
}
@ -12990,7 +12960,7 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
}
}
static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qual;
@ -13028,8 +12998,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
return 0;
}
if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
nested_exit_on_intr(vcpu)) {
if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(vcpu)) {
if (block_nested_events)
return -EBUSY;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
@ -13607,17 +13576,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
if (likely(!vmx->fail)) {
/*
* TODO: SDM says that with acknowledge interrupt on
* exit, bit 31 of the VM-exit interrupt information
* (valid interrupt) is always set to 1 on
* EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
* need kvm_cpu_has_interrupt(). See the commit
* message for details.
*/
if (nested_exit_intr_ack_set(vcpu) &&
exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
kvm_cpu_has_interrupt(vcpu)) {
if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
nested_exit_intr_ack_set(vcpu)) {
int irq = kvm_cpu_get_interrupt(vcpu);
WARN_ON(irq < 0);
vmcs12->vm_exit_intr_info = irq |
@ -14590,7 +14550,7 @@ module_exit(vmx_exit);
static int __init vmx_init(void)
{
int r;
int r, cpu;
#if IS_ENABLED(CONFIG_HYPERV)
/*
@ -14641,6 +14601,12 @@ static int __init vmx_init(void)
}
}
for_each_possible_cpu(cpu) {
INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
}
#ifdef CONFIG_KEXEC_CORE
rcu_assign_pointer(crash_vmclear_loaded_vmcss,
crash_vmclear_local_loaded_vmcss);

View File

@ -7124,7 +7124,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
}
static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
static int inject_pending_event(struct kvm_vcpu *vcpu)
{
int r;
@ -7160,7 +7160,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
* from L2 to L1.
*/
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
r = kvm_x86_ops->check_nested_events(vcpu);
if (r != 0)
return r;
}
@ -7210,7 +7210,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
* KVM_REQ_EVENT only on certain events and not unconditionally?
*/
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
r = kvm_x86_ops->check_nested_events(vcpu);
if (r != 0)
return r;
}
@ -7683,7 +7683,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto out;
}
if (inject_pending_event(vcpu, req_int_win) != 0)
if (inject_pending_event(vcpu) != 0)
req_immediate_exit = true;
else {
/* Enable SMI/NMI/IRQ window open exits if needed.
@ -7894,7 +7894,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
kvm_x86_ops->check_nested_events(vcpu, false);
kvm_x86_ops->check_nested_events(vcpu);
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted);
@ -9229,6 +9229,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
{
int i;
/*
* Clear out the previous array pointers for the KVM_MR_MOVE case. The
* old arrays will be freed by __kvm_set_memory_region() if installing
* the new memslot is successful.
*/
memset(&slot->arch, 0, sizeof(slot->arch));
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
struct kvm_lpage_info *linfo;
unsigned long ugfn;
@ -9303,6 +9310,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
if (change == KVM_MR_MOVE)
return kvm_arch_create_memslot(kvm, memslot,
mem->memory_size >> PAGE_SHIFT);
return 0;
}

View File

@ -833,7 +833,7 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
phys_vendor = virt_to_phys_or_null(vnd);
phys_data = virt_to_phys_or_null_size(data, data_size);
if (!phys_name || !phys_data)
if (!phys_name || (data && !phys_data))
status = EFI_INVALID_PARAMETER;
else
status = efi_thunk(set_variable, phys_name, phys_vendor,
@ -864,7 +864,7 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
phys_vendor = virt_to_phys_or_null(vnd);
phys_data = virt_to_phys_or_null_size(data, data_size);
if (!phys_name || !phys_data)
if (!phys_name || (data && !phys_data))
status = EFI_INVALID_PARAMETER;
else
status = efi_thunk(set_variable, phys_name, phys_vendor,

View File

@ -5156,20 +5156,28 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
return bfqq;
}
static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
static void
bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
struct bfq_data *bfqd = bfqq->bfqd;
enum bfqq_expiration reason;
unsigned long flags;
spin_lock_irqsave(&bfqd->lock, flags);
bfq_clear_bfqq_wait_request(bfqq);
/*
* Considering that bfqq may be in race, we should firstly check
* whether bfqq is in service before doing something on it. If
* the bfqq in race is not in service, it has already been expired
* through __bfq_bfqq_expire func and its wait_request flags has
* been cleared in __bfq_bfqd_reset_in_service func.
*/
if (bfqq != bfqd->in_service_queue) {
spin_unlock_irqrestore(&bfqd->lock, flags);
return;
}
bfq_clear_bfqq_wait_request(bfqq);
if (bfq_bfqq_budget_timeout(bfqq))
/*
* Also here the queue can be safely expired
@ -5214,7 +5222,7 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
* early.
*/
if (bfqq)
bfq_idle_slice_timer_body(bfqq);
bfq_idle_slice_timer_body(bfqd, bfqq);
return HRTIMER_NORESTART;
}

View File

@ -87,6 +87,7 @@ static void ioc_destroy_icq(struct io_cq *icq)
* making it impossible to determine icq_cache. Record it in @icq.
*/
icq->__rcu_icq_cache = et->icq_cache;
icq->flags |= ICQ_DESTROYED;
call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
}
@ -230,15 +231,21 @@ static void __ioc_clear_queue(struct list_head *icq_list)
{
unsigned long flags;
rcu_read_lock();
while (!list_empty(icq_list)) {
struct io_cq *icq = list_entry(icq_list->next,
struct io_cq, q_node);
struct io_context *ioc = icq->ioc;
spin_lock_irqsave(&ioc->lock, flags);
if (icq->flags & ICQ_DESTROYED) {
spin_unlock_irqrestore(&ioc->lock, flags);
continue;
}
ioc_destroy_icq(icq);
spin_unlock_irqrestore(&ioc->lock, flags);
}
rcu_read_unlock();
}
/**

View File

@ -717,6 +717,9 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
top, bottom);
}
t->backing_dev_info->io_pages =
t->limits.max_sectors >> (PAGE_SHIFT - 9);
}
EXPORT_SYMBOL(disk_stack_limits);

View File

@ -764,6 +764,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
if (dev->flags & ATA_DFLAG_DETACH) {
detach = 1;
rc = -ENODEV;
goto fail;
}

View File

@ -4570,22 +4570,19 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
*/
shost->max_host_blocked = 1;
rc = scsi_add_host_with_dma(ap->scsi_host,
&ap->tdev, ap->host->dev);
rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev);
if (rc)
goto err_add;
goto err_alloc;
}
return 0;
err_add:
scsi_host_put(host->ports[i]->scsi_host);
err_alloc:
while (--i >= 0) {
struct Scsi_Host *shost = host->ports[i]->scsi_host;
/* scsi_host_put() is in ata_devres_release() */
scsi_remove_host(shost);
scsi_host_put(shost);
}
return rc;
}

View File

@ -572,7 +572,7 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
}
retval = fw_sysfs_wait_timeout(fw_priv, timeout);
if (retval < 0) {
if (retval < 0 && retval != -ENOENT) {
mutex_lock(&fw_lock);
fw_load_abort(fw_sysfs);
mutex_unlock(&fw_lock);

View File

@ -571,6 +571,7 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
if (tag != -1U) {
cmd = &nq->cmds[tag];
cmd->tag = tag;
cmd->error = BLK_STS_OK;
cmd->nq = nq;
if (nq->dev->irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
@ -1433,6 +1434,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->timer.function = null_cmd_timer_expired;
}
cmd->rq = bd->rq;
cmd->error = BLK_STS_OK;
cmd->nq = nq;
blk_mq_start_request(bd->rq);
@ -1480,7 +1482,12 @@ static void cleanup_queues(struct nullb *nullb)
static void null_del_dev(struct nullb *nullb)
{
struct nullb_device *dev = nullb->dev;
struct nullb_device *dev;
if (!nullb)
return;
dev = nullb->dev;
ida_simple_remove(&nullb_indexes, nullb->index);
@ -1844,6 +1851,7 @@ out_cleanup_queues:
cleanup_queues(nullb);
out_free_nullb:
kfree(nullb);
dev->nullb = NULL;
out:
return rv;
}

View File

@ -47,6 +47,7 @@
#include <linux/bitmap.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/sched/mm.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@ -2188,10 +2189,12 @@ static void blkfront_setup_discard(struct blkfront_info *info)
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
{
unsigned int psegs, grants;
unsigned int psegs, grants, memflags;
int err, i;
struct blkfront_info *info = rinfo->dev_info;
memflags = memalloc_noio_save();
if (info->max_indirect_segments == 0) {
if (!HAS_EXTRA_REQ)
grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
@ -2223,7 +2226,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
BUG_ON(!list_empty(&rinfo->indirect_pages));
for (i = 0; i < num; i++) {
struct page *indirect_page = alloc_page(GFP_NOIO);
struct page *indirect_page = alloc_page(GFP_KERNEL);
if (!indirect_page)
goto out_of_memory;
list_add(&indirect_page->lru, &rinfo->indirect_pages);
@ -2234,15 +2237,15 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
rinfo->shadow[i].grants_used =
kvcalloc(grants,
sizeof(rinfo->shadow[i].grants_used[0]),
GFP_NOIO);
GFP_KERNEL);
rinfo->shadow[i].sg = kvcalloc(psegs,
sizeof(rinfo->shadow[i].sg[0]),
GFP_NOIO);
GFP_KERNEL);
if (info->max_indirect_segments)
rinfo->shadow[i].indirect_grants =
kvcalloc(INDIRECT_GREFS(grants),
sizeof(rinfo->shadow[i].indirect_grants[0]),
GFP_NOIO);
GFP_KERNEL);
if ((rinfo->shadow[i].grants_used == NULL) ||
(rinfo->shadow[i].sg == NULL) ||
(info->max_indirect_segments &&
@ -2251,6 +2254,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
sg_init_table(rinfo->shadow[i].sg, psegs);
}
memalloc_noio_restore(memflags);
return 0;
@ -2270,6 +2274,9 @@ out_of_memory:
__free_page(indirect_page);
}
}
memalloc_noio_restore(memflags);
return -ENOMEM;
}

View File

@ -345,7 +345,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
if (ret)
goto unlock;
*buf = readl(rsb->regs + RSB_DATA);
*buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0);
unlock:
mutex_unlock(&rsb->lock);

View File

@ -3134,8 +3134,8 @@ static void __get_guid(struct ipmi_smi *intf)
if (rv)
/* Send failed, no GUID available. */
bmc->dyn_guid_set = 0;
wait_event(intf->waitq, bmc->dyn_guid_set != 2);
else
wait_event(intf->waitq, bmc->dyn_guid_set != 2);
/* dyn_guid_set makes the guid data available. */
smp_rmb();

View File

@ -104,11 +104,8 @@ static int tpm_read_log(struct tpm_chip *chip)
*
* If an event log is found then the securityfs files are setup to
* export it to userspace, otherwise nothing is done.
*
* Returns -ENODEV if the firmware has no event log or securityfs is not
* supported.
*/
int tpm_bios_log_setup(struct tpm_chip *chip)
void tpm_bios_log_setup(struct tpm_chip *chip)
{
const char *name = dev_name(&chip->dev);
unsigned int cnt;
@ -117,7 +114,7 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
rc = tpm_read_log(chip);
if (rc < 0)
return rc;
return;
log_version = rc;
cnt = 0;
@ -163,13 +160,12 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
cnt++;
}
return 0;
return;
err:
rc = PTR_ERR(chip->bios_dir[cnt]);
chip->bios_dir[cnt] = NULL;
tpm_bios_log_teardown(chip);
return rc;
return;
}
void tpm_bios_log_teardown(struct tpm_chip *chip)

View File

@ -129,6 +129,7 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
u32 converted_event_size;
u32 converted_event_type;
(*pos)++;
converted_event_size = do_endian_conversion(event->event_size);
v += sizeof(struct tcpa_event) + converted_event_size;
@ -146,7 +147,6 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
((v + sizeof(struct tcpa_event) + converted_event_size) >= limit))
return NULL;
(*pos)++;
return v;
}

View File

@ -143,6 +143,7 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
size_t event_size;
void *marker;
(*pos)++;
event_header = log->bios_event_log;
if (v == SEQ_START_TOKEN) {
@ -167,7 +168,6 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
if (((v + event_size) >= limit) || (event_size == 0))
return NULL;
(*pos)++;
return v;
}

View File

@ -463,9 +463,7 @@ int tpm_chip_register(struct tpm_chip *chip)
tpm_sysfs_add_device(chip);
rc = tpm_bios_log_setup(chip);
if (rc != 0 && rc != -ENODEV)
return rc;
tpm_bios_log_setup(chip);
tpm_add_ppi(chip);

View File

@ -602,6 +602,6 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc,
int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
u32 cc, u8 *buf, size_t *bufsiz);
int tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_teardown(struct tpm_chip *chip);
#endif

View File

@ -436,8 +436,10 @@ static void __init jz4770_cgu_init(struct device_node *np)
cgu = ingenic_cgu_new(jz4770_cgu_clocks,
ARRAY_SIZE(jz4770_cgu_clocks), np);
if (!cgu)
if (!cgu) {
pr_err("%s: failed to initialise CGU\n", __func__);
return;
}
retval = ingenic_cgu_register_clocks(cgu);
if (retval)

View File

@ -310,6 +310,9 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
void __iomem *base;
np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
if (!np)
np = of_find_compatible_node(NULL, NULL,
"fsl,imx6ull-ocotp");
if (!np)
return -ENOENT;

View File

@ -1081,6 +1081,12 @@ free_and_return:
static inline void clean_chip_info(void)
{
int i;
/* flush any pending work items */
if (chips)
for (i = 0; i < nr_chips; i++)
cancel_work_sync(&chips[i].throttle);
kfree(chips);
}

View File

@ -1457,7 +1457,13 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
*/
void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
{
__be64 sector_size = cpu_to_be64(512);
/*
* Set sector size to a big value, practically disabling
* sector size segmentation in xts implementation. We cannot
* take full advantage of this HW feature with existing
* crypto API / dm-crypt SW architecture.
*/
__be64 sector_size = cpu_to_be64(BIT(15));
u32 *key_jump_cmd;
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
@ -1509,7 +1515,13 @@ EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
*/
void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
{
__be64 sector_size = cpu_to_be64(512);
/*
* Set sector size to a big value, practically disabling
* sector size segmentation in xts implementation. We cannot
* take full advantage of this HW feature with existing
* crypto API / dm-crypt SW architecture.
*/
__be64 sector_size = cpu_to_be64(BIT(15));
u32 *key_jump_cmd;
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);

View File

@ -731,7 +731,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
dev_dbg(dev, "ASSOC buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
areq->assoclen, NS_BIT);
areq_ctx->assoclen, NS_BIT);
set_flow_mode(&desc[idx], flow_mode);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
areq_ctx->cryptlen > 0)
@ -1080,9 +1080,11 @@ static void cc_proc_header_desc(struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
/* Hash associated data */
if (req->assoclen > 0)
if (areq_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
/* Hash IV */
@ -1310,7 +1312,7 @@ static int validate_data_size(struct cc_aead_ctx *ctx,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
unsigned int assoclen = req->assoclen;
unsigned int assoclen = areq_ctx->assoclen;
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
(req->cryptlen - ctx->authsize) : req->cryptlen;
@ -1469,7 +1471,7 @@ static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
idx++;
/* process assoc data */
if (req->assoclen > 0) {
if (req_ctx->assoclen > 0) {
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
} else {
hw_desc_init(&desc[idx]);
@ -1561,7 +1563,7 @@ static int config_ccm_adata(struct aead_request *req)
* NIST Special Publication 800-38C
*/
*b0 |= (8 * ((m - 2) / 2));
if (req->assoclen > 0)
if (req_ctx->assoclen > 0)
*b0 |= 64; /* Enable bit 6 if Adata exists. */
rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
@ -1572,7 +1574,7 @@ static int config_ccm_adata(struct aead_request *req)
/* END of "taken from crypto/ccm.c" */
/* l(a) - size of associated data. */
req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
req->iv[15] = 1;
@ -1604,7 +1606,7 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
CCM_BLOCK_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
req->assoclen -= CCM_BLOCK_IV_SIZE;
areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
}
static void cc_set_ghash_desc(struct aead_request *req,
@ -1812,7 +1814,7 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
// for gcm and rfc4106.
cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
if (req->assoclen > 0)
if (req_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
cc_set_gctr_desc(req, desc, seq_size);
/* process(gctr+ghash) */
@ -1836,8 +1838,8 @@ static int config_gcm_context(struct aead_request *req)
(req->cryptlen - ctx->authsize);
__be32 counter = cpu_to_be32(2);
dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
__func__, cryptlen, req->assoclen, ctx->authsize);
dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
__func__, cryptlen, req_ctx->assoclen, ctx->authsize);
memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
@ -1853,7 +1855,7 @@ static int config_gcm_context(struct aead_request *req)
if (!req_ctx->plaintext_authenticate_only) {
__be64 temp64;
temp64 = cpu_to_be64(req->assoclen * 8);
temp64 = cpu_to_be64(req_ctx->assoclen * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = cpu_to_be64(cryptlen * 8);
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@ -1863,8 +1865,8 @@ static int config_gcm_context(struct aead_request *req)
*/
__be64 temp64;
temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
cryptlen) * 8);
temp64 = cpu_to_be64((req_ctx->assoclen +
GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = 0;
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@ -1884,7 +1886,7 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
GCM_BLOCK_RFC4_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}
static int cc_proc_aead(struct aead_request *req,
@ -1909,7 +1911,7 @@ static int cc_proc_aead(struct aead_request *req,
/* Check data length according to mode */
if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
req->cryptlen, req->assoclen);
req->cryptlen, areq_ctx->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@ -2058,8 +2060,11 @@ static int cc_aead_encrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
memset(areq_ctx, 0, sizeof(*areq_ctx));
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = false;
@ -2087,8 +2092,11 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
goto out;
}
memset(areq_ctx, 0, sizeof(*areq_ctx));
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
@ -2106,8 +2114,11 @@ static int cc_aead_decrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
memset(areq_ctx, 0, sizeof(*areq_ctx));
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = false;
@ -2133,8 +2144,11 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
goto out;
}
memset(areq_ctx, 0, sizeof(*areq_ctx));
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
@ -2250,8 +2264,11 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
goto out;
}
memset(areq_ctx, 0, sizeof(*areq_ctx));
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
@ -2273,11 +2290,14 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
memset(areq_ctx, 0, sizeof(*areq_ctx));
//plaintext is not encryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
cc_proc_rfc4_gcm(req);
@ -2305,8 +2325,11 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
goto out;
}
memset(areq_ctx, 0, sizeof(*areq_ctx));
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
@ -2328,11 +2351,14 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
memset(areq_ctx, 0, sizeof(*areq_ctx));
//plaintext is not decryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
cc_proc_rfc4_gcm(req);

View File

@ -67,6 +67,7 @@ struct aead_req_ctx {
u8 backup_mac[MAX_MAC_SIZE];
u8 *backup_iv; /*store iv for generated IV flow*/
u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
u32 assoclen; /* internal assoclen */
dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
/* buffer for internal ccm configurations */
dma_addr_t ccm_iv0_dma_addr;

View File

@ -65,7 +65,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
u32 skip = req->assoclen + req->cryptlen;
u32 skip = areq_ctx->assoclen + req->cryptlen;
if (areq_ctx->is_gcm4543)
skip += crypto_aead_ivsize(tfm);
@ -460,10 +460,8 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
/* Map the src SGL */
rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
if (rc) {
rc = -ENOMEM;
if (rc)
goto cipher_exit;
}
if (mapped_nents > 1)
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
@ -477,12 +475,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
}
} else {
/* Map the dst sg */
if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
&req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents)) {
rc = -ENOMEM;
rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
&req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents);
if (rc)
goto cipher_exit;
}
if (mapped_nents > 1)
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
@ -577,8 +574,8 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
req->assoclen, req->cryptlen);
size_to_unmap = req->assoclen + req->cryptlen;
areq_ctx->assoclen, req->cryptlen);
size_to_unmap = areq_ctx->assoclen + req->cryptlen;
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
size_to_unmap += areq_ctx->req_authsize;
if (areq_ctx->is_gcm4543)
@ -720,7 +717,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
struct scatterlist *current_sg = req->src;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int sg_index = 0;
u32 size_of_assoc = req->assoclen;
u32 size_of_assoc = areq_ctx->assoclen;
struct device *dev = drvdata_to_dev(drvdata);
if (areq_ctx->is_gcm4543)
@ -731,7 +728,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
goto chain_assoc_exit;
}
if (req->assoclen == 0) {
if (areq_ctx->assoclen == 0) {
areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
areq_ctx->assoc.nents = 0;
areq_ctx->assoc.mlli_nents = 0;
@ -791,7 +788,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
cc_dma_buf_type(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents);
cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
req->assoclen, 0, is_last,
areq_ctx->assoclen, 0, is_last,
&areq_ctx->assoc.mlli_nents);
areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
}
@ -975,11 +972,11 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
u32 offset = 0;
/* non-inplace mode */
unsigned int size_for_map = req->assoclen + req->cryptlen;
unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
u32 sg_index = 0;
bool is_gcm4543 = areq_ctx->is_gcm4543;
u32 size_to_skip = req->assoclen;
u32 size_to_skip = areq_ctx->assoclen;
if (is_gcm4543)
size_to_skip += crypto_aead_ivsize(tfm);
@ -1023,9 +1020,13 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
areq_ctx->src_offset = offset;
if (req->src != req->dst) {
size_for_map = req->assoclen + req->cryptlen;
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0;
size_for_map = areq_ctx->assoclen + req->cryptlen;
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
size_for_map += authsize;
else
size_for_map -= authsize;
if (is_gcm4543)
size_for_map += crypto_aead_ivsize(tfm);
@ -1033,10 +1034,8 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
&areq_ctx->dst.nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
&dst_mapped_nents);
if (rc) {
rc = -ENOMEM;
if (rc)
goto chain_data_exit;
}
}
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
@ -1190,11 +1189,10 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
}
areq_ctx->ccm_iv0_dma_addr = dma_addr;
if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
&sg_data, req->assoclen)) {
rc = -ENOMEM;
rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
&sg_data, areq_ctx->assoclen);
if (rc)
goto aead_map_failure;
}
}
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
@ -1243,10 +1241,12 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
}
size_to_map = req->cryptlen + req->assoclen;
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
size_to_map = req->cryptlen + areq_ctx->assoclen;
/* If we do in-place encryption, we also need the auth tag */
if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
(req->src == req->dst)) {
size_to_map += authsize;
}
if (is_gcm4543)
size_to_map += crypto_aead_ivsize(tfm);
rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
@ -1254,10 +1254,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
LLI_MAX_NUM_OF_DATA_ENTRIES),
&dummy, &mapped_nents);
if (rc) {
rc = -ENOMEM;
if (rc)
goto aead_map_failure;
}
if (areq_ctx->is_single_pass) {
/*
@ -1341,6 +1339,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
@ -1360,18 +1359,18 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
/*TODO: copy data in case that buffer is enough for operation */
/* map the previous buffer */
if (*curr_buff_cnt) {
if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
&sg_data)) {
return -ENOMEM;
}
rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
&sg_data);
if (rc)
return rc;
}
if (src && nbytes > 0 && do_update) {
if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
&areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents)) {
rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
&areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents);
if (rc)
goto unmap_curr_buff;
}
if (src && mapped_nents == 1 &&
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
memcpy(areq_ctx->buff_sg, src,
@ -1390,7 +1389,8 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
/* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
0, true, &areq_ctx->mlli_nents);
if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
if (rc)
goto fail_unmap_din;
}
/* change the buffer index for the unmap function */
@ -1406,7 +1406,7 @@ unmap_curr_buff:
if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
return -ENOMEM;
return rc;
}
int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
@ -1425,6 +1425,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
unsigned int swap_index = 0;
int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
@ -1469,21 +1470,21 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
}
if (*curr_buff_cnt) {
if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
&sg_data)) {
return -ENOMEM;
}
rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
&sg_data);
if (rc)
return rc;
/* change the buffer index for next operation */
swap_index = 1;
}
if (update_data_len > *curr_buff_cnt) {
if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
DMA_TO_DEVICE, &areq_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
&mapped_nents)) {
rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
DMA_TO_DEVICE, &areq_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
&mapped_nents);
if (rc)
goto unmap_curr_buff;
}
if (mapped_nents == 1 &&
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
/* only one entry in the SG and no previous data */
@ -1503,7 +1504,8 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
(update_data_len - *curr_buff_cnt), 0, true,
&areq_ctx->mlli_nents);
if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
if (rc)
goto fail_unmap_din;
}
areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
@ -1517,7 +1519,7 @@ unmap_curr_buff:
if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
return -ENOMEM;
return rc;
}
void cc_unmap_hash_request(struct device *dev, void *ctx,

View File

@ -25,6 +25,7 @@
#include <crypto/sha.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#define DCP_MAX_CHANS 4
#define DCP_BUF_SZ PAGE_SIZE
@ -621,49 +622,46 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
const int nents = sg_nents(req->src);
uint8_t *in_buf = sdcp->coh->sha_in_buf;
uint8_t *out_buf = sdcp->coh->sha_out_buf;
uint8_t *src_buf;
struct scatterlist *src;
unsigned int i, len, clen;
unsigned int i, len, clen, oft = 0;
int ret;
int fin = rctx->fini;
if (fin)
rctx->fini = 0;
for_each_sg(req->src, src, nents, i) {
src_buf = sg_virt(src);
len = sg_dma_len(src);
src = req->src;
len = req->nbytes;
do {
if (actx->fill + len > DCP_BUF_SZ)
clen = DCP_BUF_SZ - actx->fill;
else
clen = len;
while (len) {
if (actx->fill + len > DCP_BUF_SZ)
clen = DCP_BUF_SZ - actx->fill;
else
clen = len;
memcpy(in_buf + actx->fill, src_buf, clen);
len -= clen;
src_buf += clen;
actx->fill += clen;
scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
0);
/*
* If we filled the buffer and still have some
* more data, submit the buffer.
*/
if (len && actx->fill == DCP_BUF_SZ) {
ret = mxs_dcp_run_sha(req);
if (ret)
return ret;
actx->fill = 0;
rctx->init = 0;
}
} while (len);
len -= clen;
oft += clen;
actx->fill += clen;
/*
* If we filled the buffer and still have some
* more data, submit the buffer.
*/
if (len && actx->fill == DCP_BUF_SZ) {
ret = mxs_dcp_run_sha(req);
if (ret)
return ret;
actx->fill = 0;
rctx->init = 0;
}
}
if (fin) {

View File

@ -489,11 +489,6 @@ static int _sdei_event_unregister(struct sdei_event *event)
{
lockdep_assert_held(&sdei_events_lock);
spin_lock(&sdei_list_lock);
event->reregister = false;
event->reenable = false;
spin_unlock(&sdei_list_lock);
if (event->type == SDEI_EVENT_TYPE_SHARED)
return sdei_api_event_unregister(event->event_num);
@ -516,6 +511,11 @@ int sdei_event_unregister(u32 event_num)
break;
}
spin_lock(&sdei_list_lock);
event->reregister = false;
event->reenable = false;
spin_unlock(&sdei_list_lock);
err = _sdei_event_unregister(event);
if (err)
break;
@ -583,26 +583,15 @@ static int _sdei_event_register(struct sdei_event *event)
lockdep_assert_held(&sdei_events_lock);
spin_lock(&sdei_list_lock);
event->reregister = true;
spin_unlock(&sdei_list_lock);
if (event->type == SDEI_EVENT_TYPE_SHARED)
return sdei_api_event_register(event->event_num,
sdei_entry_point,
event->registered,
SDEI_EVENT_REGISTER_RM_ANY, 0);
err = sdei_do_cross_call(_local_event_register, event);
if (err) {
spin_lock(&sdei_list_lock);
event->reregister = false;
event->reenable = false;
spin_unlock(&sdei_list_lock);
if (err)
sdei_do_cross_call(_local_event_unregister, event);
}
return err;
}
@ -630,8 +619,17 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
break;
}
spin_lock(&sdei_list_lock);
event->reregister = true;
spin_unlock(&sdei_list_lock);
err = _sdei_event_register(event);
if (err) {
spin_lock(&sdei_list_lock);
event->reregister = false;
event->reenable = false;
spin_unlock(&sdei_list_lock);
sdei_event_destroy(event);
pr_warn("Failed to register event %u: %d\n", event_num,
err);

View File

@ -572,7 +572,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
}
}
if (efi_enabled(EFI_MEMMAP))
if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
efi_memattr_init();
efi_tpm_eventlog_init();

View File

@ -2162,9 +2162,9 @@ static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
{
int ret = 0;
int i = 0;
struct drm_dp_mst_branch *mstb = NULL;
mutex_lock(&mgr->payload_lock);
mutex_lock(&mgr->lock);
if (mst_state == mgr->mst_state)
goto out_unlock;
@ -2223,25 +2223,18 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* this can fail if the device is gone */
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
mutex_lock(&mgr->payload_lock);
memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
memset(mgr->payloads, 0,
mgr->max_payloads * sizeof(mgr->payloads[0]));
memset(mgr->proposed_vcpis, 0,
mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
mgr->payload_mask = 0;
set_bit(0, &mgr->payload_mask);
for (i = 0; i < mgr->max_payloads; i++) {
struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
if (vcpi) {
vcpi->vcpi = 0;
vcpi->num_slots = 0;
}
mgr->proposed_vcpis[i] = NULL;
}
mgr->vcpi_mask = 0;
mutex_unlock(&mgr->payload_lock);
}
out_unlock:
mutex_unlock(&mgr->lock);
mutex_unlock(&mgr->payload_lock);
if (mstb)
drm_dp_put_mst_branch_device(mstb);
return ret;

View File

@ -46,8 +46,6 @@
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
unsigned long addr;
size_t sz;
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
@ -61,22 +59,13 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
return NULL;
dmah->size = size;
dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL);
if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL;
}
memset(dmah->vaddr, 0, size);
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Reserve */
for (addr = (unsigned long)dmah->vaddr, sz = size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
SetPageReserved(virt_to_page((void *)addr));
}
return dmah;
}
@ -89,19 +78,9 @@ EXPORT_SYMBOL(drm_pci_alloc);
*/
void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
unsigned long addr;
size_t sz;
if (dmah->vaddr) {
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Unreserve */
for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
ClearPageReserved(virt_to_page((void *)addr));
}
if (dmah->vaddr)
dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
}
}
/**

View File

@ -4,6 +4,7 @@
* Copyright (C) 2017 Zodiac Inflight Innovations
*/
#include "common.xml.h"
#include "etnaviv_gpu.h"
#include "etnaviv_perfmon.h"
#include "state_hi.xml.h"
@ -31,17 +32,11 @@ struct etnaviv_pm_domain {
};
struct etnaviv_pm_domain_meta {
unsigned int feature;
const struct etnaviv_pm_domain *domains;
u32 nr_domains;
};
static u32 simple_reg_read(struct etnaviv_gpu *gpu,
const struct etnaviv_pm_domain *domain,
const struct etnaviv_pm_signal *signal)
{
return gpu_read(gpu, signal->data);
}
static u32 perf_reg_read(struct etnaviv_gpu *gpu,
const struct etnaviv_pm_domain *domain,
const struct etnaviv_pm_signal *signal)
@ -75,6 +70,34 @@ static u32 pipe_reg_read(struct etnaviv_gpu *gpu,
return value;
}
static u32 hi_total_cycle_read(struct etnaviv_gpu *gpu,
const struct etnaviv_pm_domain *domain,
const struct etnaviv_pm_signal *signal)
{
u32 reg = VIVS_HI_PROFILE_TOTAL_CYCLES;
if (gpu->identity.model == chipModel_GC880 ||
gpu->identity.model == chipModel_GC2000 ||
gpu->identity.model == chipModel_GC2100)
reg = VIVS_MC_PROFILE_CYCLE_COUNTER;
return gpu_read(gpu, reg);
}
static u32 hi_total_idle_cycle_read(struct etnaviv_gpu *gpu,
const struct etnaviv_pm_domain *domain,
const struct etnaviv_pm_signal *signal)
{
u32 reg = VIVS_HI_PROFILE_IDLE_CYCLES;
if (gpu->identity.model == chipModel_GC880 ||
gpu->identity.model == chipModel_GC2000 ||
gpu->identity.model == chipModel_GC2100)
reg = VIVS_HI_PROFILE_TOTAL_CYCLES;
return gpu_read(gpu, reg);
}
static const struct etnaviv_pm_domain doms_3d[] = {
{
.name = "HI",
@ -84,13 +107,13 @@ static const struct etnaviv_pm_domain doms_3d[] = {
.signal = (const struct etnaviv_pm_signal[]) {
{
"TOTAL_CYCLES",
VIVS_HI_PROFILE_TOTAL_CYCLES,
&simple_reg_read
0,
&hi_total_cycle_read
},
{
"IDLE_CYCLES",
VIVS_HI_PROFILE_IDLE_CYCLES,
&simple_reg_read
0,
&hi_total_idle_cycle_read
},
{
"AXI_CYCLES_READ_REQUEST_STALLED",
@ -388,36 +411,78 @@ static const struct etnaviv_pm_domain doms_vg[] = {
static const struct etnaviv_pm_domain_meta doms_meta[] = {
{
.feature = chipFeatures_PIPE_3D,
.nr_domains = ARRAY_SIZE(doms_3d),
.domains = &doms_3d[0]
},
{
.feature = chipFeatures_PIPE_2D,
.nr_domains = ARRAY_SIZE(doms_2d),
.domains = &doms_2d[0]
},
{
.feature = chipFeatures_PIPE_VG,
.nr_domains = ARRAY_SIZE(doms_vg),
.domains = &doms_vg[0]
}
};
static unsigned int num_pm_domains(const struct etnaviv_gpu *gpu)
{
unsigned int num = 0, i;
for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
if (gpu->identity.features & meta->feature)
num += meta->nr_domains;
}
return num;
}
static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu,
unsigned int index)
{
const struct etnaviv_pm_domain *domain = NULL;
unsigned int offset = 0, i;
for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
if (!(gpu->identity.features & meta->feature))
continue;
if (meta->nr_domains < (index - offset)) {
offset += meta->nr_domains;
continue;
}
domain = meta->domains + (index - offset);
}
return domain;
}
int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
struct drm_etnaviv_pm_domain *domain)
{
const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe];
const unsigned int nr_domains = num_pm_domains(gpu);
const struct etnaviv_pm_domain *dom;
if (domain->iter >= meta->nr_domains)
if (domain->iter >= nr_domains)
return -EINVAL;
dom = meta->domains + domain->iter;
dom = pm_domain(gpu, domain->iter);
if (!dom)
return -EINVAL;
domain->id = domain->iter;
domain->nr_signals = dom->nr_signals;
strncpy(domain->name, dom->name, sizeof(domain->name));
domain->iter++;
if (domain->iter == meta->nr_domains)
if (domain->iter == nr_domains)
domain->iter = 0xff;
return 0;
@ -426,14 +491,16 @@ int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
struct drm_etnaviv_pm_signal *signal)
{
const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe];
const unsigned int nr_domains = num_pm_domains(gpu);
const struct etnaviv_pm_domain *dom;
const struct etnaviv_pm_signal *sig;
if (signal->domain >= meta->nr_domains)
if (signal->domain >= nr_domains)
return -EINVAL;
dom = meta->domains + signal->domain;
dom = pm_domain(gpu, signal->domain);
if (!dom)
return -EINVAL;
if (signal->iter >= dom->nr_signals)
return -EINVAL;

View File

@ -437,6 +437,7 @@ static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev)
/**
* st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode
* @i2c_dev: Controller's private data
* @max: Maximum amount of data to fill into the Tx FIFO
*
* This functions fills the Tx FIFO with fixed pattern when
* in read mode to trigger clock.

View File

@ -1070,12 +1070,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_GRE;
if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
MLX5_FLEX_PROTO_CW_MPLS_GRE)
if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
MLX5_FLEX_PROTO_CW_MPLS_UDP)
if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
}

View File

@ -534,6 +534,17 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
},
},
{
/*
* Acer Aspire 5738z
* Touchpad stops working in mux mode when dis- + re-enabled
* with the touchpad enable/disable toggle hotkey
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
},
},
{ }
};

View File

@ -2858,12 +2858,18 @@ static int its_vpe_set_irqchip_state(struct irq_data *d,
return 0;
}
static int its_vpe_retrigger(struct irq_data *d)
{
return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
}
static struct irq_chip its_vpe_irq_chip = {
.name = "GICv4-vpe",
.irq_mask = its_vpe_mask_irq,
.irq_unmask = its_vpe_unmask_irq,
.irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = its_vpe_set_affinity,
.irq_retrigger = its_vpe_retrigger,
.irq_set_irqchip_state = its_vpe_set_irqchip_state,
.irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
};

View File

@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/versatile-fpga.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d)
static void fpga_irq_handle(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
u32 status = readl(f->base + IRQ_STATUS);
u32 status;
chained_irq_enter(chip, desc);
status = readl(f->base + IRQ_STATUS);
if (status == 0) {
do_bad_IRQ(desc);
return;
goto out;
}
do {
@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc)
status &= ~(1 << irq);
generic_handle_irq(irq_find_mapping(f->domain, irq));
} while (status);
out:
chained_irq_exit(chip, desc);
}
/*
@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node,
if (of_property_read_u32(node, "valid-mask", &valid_mask))
valid_mask = 0;
writel(clear_mask, base + IRQ_ENABLE_CLEAR);
writel(clear_mask, base + FIQ_ENABLE_CLEAR);
/* Some chips are cascaded from a parent IRQ */
parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) {
@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node,
fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
writel(clear_mask, base + IRQ_ENABLE_CLEAR);
writel(clear_mask, base + FIQ_ENABLE_CLEAR);
/*
* On Versatile AB/PB, some secondary interrupts have a direct
* pass-thru to the primary controller for IRQs 20 and 22-31 which need

View File

@ -552,6 +552,7 @@ void verity_fec_dtr(struct dm_verity *v)
mempool_exit(&f->rs_pool);
mempool_exit(&f->prealloc_pool);
mempool_exit(&f->extra_pool);
mempool_exit(&f->output_pool);
kmem_cache_destroy(f->cache);
if (f->data_bufio)

View File

@ -878,6 +878,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
struct wc_entry *e = &wc->entries[b];
e->index = b;
e->write_in_progress = false;
cond_resched();
}
return 0;
@ -932,6 +933,7 @@ static void writecache_resume(struct dm_target *ti)
e->original_sector = le64_to_cpu(wme.original_sector);
e->seq_count = le64_to_cpu(wme.seq_count);
}
cond_resched();
}
#endif
for (b = 0; b < wc->n_blocks; b++) {
@ -1764,8 +1766,10 @@ static int init_memory(struct dm_writecache *wc)
pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
for (b = 0; b < wc->n_blocks; b++)
for (b = 0; b < wc->n_blocks; b++) {
write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
cond_resched();
}
writecache_flush_all_metadata(wc);
writecache_commit_flushed(wc, false);

View File

@ -1105,7 +1105,6 @@ static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) {
set_bit(DMZ_RND, &zone->flags);
zmd->nr_rnd_zones++;
} else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ ||
blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) {
set_bit(DMZ_SEQ, &zone->flags);

View File

@ -5874,7 +5874,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
static void mddev_detach(struct mddev *mddev)
{
md_bitmap_wait_behind_writes(mddev);
if (mddev->pers && mddev->pers->quiesce) {
if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}

View File

@ -974,16 +974,9 @@ unlock_and_return:
return ret;
}
/* Calculate the delay in us by clock rate and clock cycles */
static inline u32 ov5695_cal_delay(u32 cycles)
{
return DIV_ROUND_UP(cycles, OV5695_XVCLK_FREQ / 1000 / 1000);
}
static int __ov5695_power_on(struct ov5695 *ov5695)
{
int ret;
u32 delay_us;
int i, ret;
struct device *dev = &ov5695->client->dev;
ret = clk_prepare_enable(ov5695->xvclk);
@ -994,21 +987,28 @@ static int __ov5695_power_on(struct ov5695 *ov5695)
gpiod_set_value_cansleep(ov5695->reset_gpio, 1);
ret = regulator_bulk_enable(OV5695_NUM_SUPPLIES, ov5695->supplies);
if (ret < 0) {
dev_err(dev, "Failed to enable regulators\n");
goto disable_clk;
/*
* The hardware requires the regulators to be powered on in order,
* so enable them one by one.
*/
for (i = 0; i < OV5695_NUM_SUPPLIES; i++) {
ret = regulator_enable(ov5695->supplies[i].consumer);
if (ret) {
dev_err(dev, "Failed to enable %s: %d\n",
ov5695->supplies[i].supply, ret);
goto disable_reg_clk;
}
}
gpiod_set_value_cansleep(ov5695->reset_gpio, 0);
/* 8192 cycles prior to first SCCB transaction */
delay_us = ov5695_cal_delay(8192);
usleep_range(delay_us, delay_us * 2);
usleep_range(1000, 1200);
return 0;
disable_clk:
disable_reg_clk:
for (--i; i >= 0; i--)
regulator_disable(ov5695->supplies[i].consumer);
clk_disable_unprepare(ov5695->xvclk);
return ret;
@ -1016,9 +1016,22 @@ disable_clk:
static void __ov5695_power_off(struct ov5695 *ov5695)
{
struct device *dev = &ov5695->client->dev;
int i, ret;
clk_disable_unprepare(ov5695->xvclk);
gpiod_set_value_cansleep(ov5695->reset_gpio, 1);
regulator_bulk_disable(OV5695_NUM_SUPPLIES, ov5695->supplies);
/*
* The hardware requires the regulators to be powered off in order,
* so disable them one by one.
*/
for (i = OV5695_NUM_SUPPLIES - 1; i >= 0; i--) {
ret = regulator_disable(ov5695->supplies[i].consumer);
if (ret)
dev_err(dev, "Failed to disable %s: %d\n",
ov5695->supplies[i].supply, ret);
}
}
static int __maybe_unused ov5695_runtime_resume(struct device *dev)
@ -1288,7 +1301,7 @@ static int ov5695_probe(struct i2c_client *client,
if (clk_get_rate(ov5695->xvclk) != OV5695_XVCLK_FREQ)
dev_warn(dev, "xvclk mismatched, modes are based on 24MHz\n");
ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ov5695->reset_gpio)) {
dev_err(dev, "Failed to get reset-gpios\n");
return -EINVAL;

View File

@ -105,7 +105,7 @@ static int amg88xx_xfer(struct video_i2c_data *data, char *buf)
return (ret == 2) ? 0 : -EIO;
}
#if IS_ENABLED(CONFIG_HWMON)
#if IS_REACHABLE(CONFIG_HWMON)
static const u32 amg88xx_temp_config[] = {
HWMON_T_INPUT,

View File

@ -181,6 +181,7 @@ static void parse_codecs(struct venus_core *core, void *data)
if (IS_V1(core)) {
core->dec_codecs &= ~HFI_VIDEO_CODEC_HEVC;
core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK;
core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC;
}
}

View File

@ -541,16 +541,16 @@ static void enable_irqs(struct cal_ctx *ctx)
static void disable_irqs(struct cal_ctx *ctx)
{
u32 val;
/* Disable IRQ_WDMA_END 0/1 */
reg_write_field(ctx->dev,
CAL_HL_IRQENABLE_CLR(2),
CAL_HL_IRQ_CLEAR,
CAL_HL_IRQ_MASK(ctx->csi2_port));
val = 0;
set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(2), val);
/* Disable IRQ_WDMA_START 0/1 */
reg_write_field(ctx->dev,
CAL_HL_IRQENABLE_CLR(3),
CAL_HL_IRQ_CLEAR,
CAL_HL_IRQ_MASK(ctx->csi2_port));
val = 0;
set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(3), val);
/* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
}

View File

@ -93,6 +93,11 @@ struct dln2_mod_rx_slots {
spinlock_t lock;
};
enum dln2_endpoint {
DLN2_EP_OUT = 0,
DLN2_EP_IN = 1,
};
struct dln2_dev {
struct usb_device *usb_dev;
struct usb_interface *interface;
@ -736,10 +741,10 @@ static int dln2_probe(struct usb_interface *interface,
hostif->desc.bNumEndpoints < 2)
return -ENODEV;
epin = &hostif->endpoint[0].desc;
epout = &hostif->endpoint[1].desc;
epout = &hostif->endpoint[DLN2_EP_OUT].desc;
if (!usb_endpoint_is_bulk_out(epout))
return -ENODEV;
epin = &hostif->endpoint[DLN2_EP_IN].desc;
if (!usb_endpoint_is_bulk_in(epin))
return -ENODEV;

View File

@ -381,7 +381,7 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
*/
ec->factor = 0;
ec->shift = 0;
if ((ec->nonupdate_dwell == 0)) {
if (!ec->nonupdate_dwell) {
int p, logp, shift;
/* Determine:

View File

@ -629,18 +629,18 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
{
struct spinand_device *spinand = nand_to_spinand(nand);
u8 marker[2] = { };
struct nand_page_io_req req = {
.pos = *pos,
.ooblen = 2,
.ooblen = sizeof(marker),
.ooboffs = 0,
.oobbuf.in = spinand->oobbuf,
.oobbuf.in = marker,
.mode = MTD_OPS_RAW,
};
memset(spinand->oobbuf, 0, 2);
spinand_select_target(spinand, pos->target);
spinand_read_page(spinand, &req, false);
if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
if (marker[0] != 0xff || marker[1] != 0xff)
return true;
return false;
@ -664,15 +664,15 @@ static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
{
struct spinand_device *spinand = nand_to_spinand(nand);
u8 marker[2] = { };
struct nand_page_io_req req = {
.pos = *pos,
.ooboffs = 0,
.ooblen = 2,
.oobbuf.out = spinand->oobbuf,
.ooblen = sizeof(marker),
.oobbuf.out = marker,
};
int ret;
/* Erase block before marking it bad. */
ret = spinand_select_target(spinand, pos->target);
if (ret)
return ret;
@ -681,9 +681,6 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
spinand_erase_op(spinand, pos);
memset(spinand->oobbuf, 0, 2);
return spinand_write_page(spinand, &req);
}

View File

@ -246,6 +246,9 @@ static int cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta)
FW_PTP_CMD_PORTID_V(0));
c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
c.u.ts.sc = FW_PTP_SC_ADJ_FTIME;
c.u.ts.sign = (delta < 0) ? 1 : 0;
if (delta < 0)
delta = -delta;
c.u.ts.tm = cpu_to_be64(delta);
err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);

View File

@ -398,7 +398,8 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
spin_unlock_bh(&cmdq->cmdq_lock);
if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) {
if (!wait_for_completion_timeout(&done,
msecs_to_jiffies(CMDQ_TIMEOUT))) {
spin_lock_bh(&cmdq->cmdq_lock);
if (cmdq->errcode[curr_prod_idx] == &errcode)

View File

@ -370,50 +370,6 @@ static int wait_for_db_state(struct hinic_hwdev *hwdev)
return -EFAULT;
}
static int wait_for_io_stopped(struct hinic_hwdev *hwdev)
{
struct hinic_cmd_io_status cmd_io_status;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
unsigned long end;
u16 out_size;
int err;
if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
dev_err(&pdev->dev, "Unsupported PCI Function type\n");
return -EINVAL;
}
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT);
do {
err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
HINIC_COMM_CMD_IO_STATUS_GET,
&cmd_io_status, sizeof(cmd_io_status),
&cmd_io_status, &out_size,
HINIC_MGMT_MSG_SYNC);
if ((err) || (out_size != sizeof(cmd_io_status))) {
dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n",
err);
return err;
}
if (cmd_io_status.status == IO_STOPPED) {
dev_info(&pdev->dev, "IO stopped\n");
return 0;
}
msleep(20);
} while (time_before(jiffies, end));
dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n");
return -ETIMEDOUT;
}
/**
* clear_io_resource - set the IO resources as not active in the NIC
* @hwdev: the NIC HW device
@ -433,11 +389,8 @@ static int clear_io_resources(struct hinic_hwdev *hwdev)
return -EINVAL;
}
err = wait_for_io_stopped(hwdev);
if (err) {
dev_err(&pdev->dev, "IO has not stopped yet\n");
return err;
}
/* sleep 100ms to wait for firmware stopping I/O */
msleep(100);
cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif);

View File

@ -52,7 +52,7 @@
#define MSG_NOT_RESP 0xFFFF
#define MGMT_MSG_TIMEOUT 1000
#define MGMT_MSG_TIMEOUT 5000
#define mgmt_to_pfhwdev(pf_mgmt) \
container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
@ -276,7 +276,8 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
goto unlock_sync_msg;
}
if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) {
if (!wait_for_completion_timeout(recv_done,
msecs_to_jiffies(MGMT_MSG_TIMEOUT))) {
dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
err = -ETIMEDOUT;
goto unlock_sync_msg;

View File

@ -2065,7 +2065,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
(level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
if ((mask & VXGE_DEBUG_MASK) == mask) \
printk(fmt "\n", __VA_ARGS__); \
printk(fmt "\n", ##__VA_ARGS__); \
} while (0)
#else
#define vxge_debug_ll(level, mask, fmt, ...)

View File

@ -452,49 +452,49 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
#if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
#define vxge_debug_ll_config(level, fmt, ...) \
vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__)
vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_ll_config(level, fmt, ...)
#endif
#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
#define vxge_debug_init(level, fmt, ...) \
vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__)
vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_init(level, fmt, ...)
#endif
#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
#define vxge_debug_tx(level, fmt, ...) \
vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__)
vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_tx(level, fmt, ...)
#endif
#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
#define vxge_debug_rx(level, fmt, ...) \
vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__)
vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_rx(level, fmt, ...)
#endif
#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
#define vxge_debug_mem(level, fmt, ...) \
vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__)
vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_mem(level, fmt, ...)
#endif
#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
#define vxge_debug_entryexit(level, fmt, ...) \
vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__)
vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_entryexit(level, fmt, ...)
#endif
#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
#define vxge_debug_intr(level, fmt, ...) \
vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__)
vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_intr(level, fmt, ...)
#endif

View File

@ -1720,7 +1720,7 @@ static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_d
ahw->reset.seq_error = 0;
ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
if (p_dev->ahw->reset.buff == NULL)
if (ahw->reset.buff == NULL)
return -ENOMEM;
p_buff = p_dev->ahw->reset.buff;

View File

@ -288,7 +288,6 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
{
struct rmnet_priv *priv = netdev_priv(dev);
struct net_device *real_dev;
struct rmnet_endpoint *ep;
struct rmnet_port *port;
u16 mux_id;
@ -303,19 +302,27 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
if (data[IFLA_RMNET_MUX_ID]) {
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
if (rmnet_get_endpoint(port, mux_id)) {
NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
return -EINVAL;
if (mux_id != priv->mux_id) {
struct rmnet_endpoint *ep;
ep = rmnet_get_endpoint(port, priv->mux_id);
if (!ep)
return -ENODEV;
if (rmnet_get_endpoint(port, mux_id)) {
NL_SET_ERR_MSG_MOD(extack,
"MUX ID already exists");
return -EINVAL;
}
hlist_del_init_rcu(&ep->hlnode);
hlist_add_head_rcu(&ep->hlnode,
&port->muxed_ep[mux_id]);
ep->mux_id = mux_id;
priv->mux_id = mux_id;
}
ep = rmnet_get_endpoint(port, priv->mux_id);
if (!ep)
return -ENODEV;
hlist_del_init_rcu(&ep->hlnode);
hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
ep->mux_id = mux_id;
priv->mux_id = mux_id;
}
if (data[IFLA_RMNET_FLAGS]) {

View File

@ -1457,6 +1457,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
}
if (changed & IEEE80211_CONF_CHANGE_POWER)
ath9k_set_txpower(sc, NULL);
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);

View File

@ -2200,6 +2200,17 @@ static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
lockdep_assert_held(&nvme_subsystems_lock);
/*
* Fail matches for discovery subsystems. This results
* in each discovery controller bound to a unique subsystem.
* This avoids issues with validating controller values
* that can only be true when there is a single unique subsystem.
* There may be multiple and completely independent entities
* that provide discovery controllers.
*/
if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
return NULL;
list_for_each_entry(subsys, &nvme_subsystems, entry) {
if (strcmp(subsys->subnqn, subsysnqn))
continue;

View File

@ -342,8 +342,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
!template->ls_req || !template->fcp_io ||
!template->ls_abort || !template->fcp_abort ||
!template->max_hw_queues || !template->max_sgl_segments ||
!template->max_dif_sgl_segments || !template->dma_boundary ||
!template->module) {
!template->max_dif_sgl_segments || !template->dma_boundary) {
ret = -EINVAL;
goto out_reghost_failed;
}
@ -1987,7 +1986,6 @@ nvme_fc_ctrl_free(struct kref *ref)
{
struct nvme_fc_ctrl *ctrl =
container_of(ref, struct nvme_fc_ctrl, ref);
struct nvme_fc_lport *lport = ctrl->lport;
unsigned long flags;
if (ctrl->ctrl.tagset) {
@ -2013,7 +2011,6 @@ nvme_fc_ctrl_free(struct kref *ref)
if (ctrl->ctrl.opts)
nvmf_free_options(ctrl->ctrl.opts);
kfree(ctrl);
module_put(lport->ops->module);
}
static void
@ -3055,15 +3052,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_fail;
}
if (!try_module_get(lport->ops->module)) {
ret = -EUNATCH;
goto out_free_ctrl;
}
idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_mod_put;
goto out_free_ctrl;
}
ctrl->ctrl.opts = opts;
@ -3205,8 +3197,6 @@ out_free_queues:
out_free_ida:
put_device(ctrl->dev);
ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
out_mod_put:
module_put(lport->ops->module);
out_free_ctrl:
kfree(ctrl);
out_fail:

View File

@ -825,7 +825,6 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
static struct nvme_fc_port_template fctemplate = {
.module = THIS_MODULE,
.localport_delete = fcloop_localport_delete,
.remoteport_delete = fcloop_remoteport_delete,
.create_queue = fcloop_create_queue,

View File

@ -79,6 +79,7 @@ int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size,
mem->page_size = page_size;
mem->pages = pages;
mem->size = size;
mutex_init(&mem->lock);
epc->mem = mem;
@ -122,7 +123,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size)
{
int pageno;
void __iomem *virt_addr;
void __iomem *virt_addr = NULL;
struct pci_epc_mem *mem = epc->mem;
unsigned int page_shift = ilog2(mem->page_size);
int order;
@ -130,15 +131,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
mutex_lock(&mem->lock);
pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
if (pageno < 0)
return NULL;
goto ret;
*phys_addr = mem->phys_base + (pageno << page_shift);
virt_addr = ioremap(*phys_addr, size);
if (!virt_addr)
bitmap_release_region(mem->bitmap, pageno, order);
ret:
mutex_unlock(&mem->lock);
return virt_addr;
}
EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
@ -164,7 +168,9 @@ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
pageno = (phys_addr - mem->phys_base) >> page_shift;
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
mutex_lock(&mem->lock);
bitmap_release_region(mem->bitmap, pageno, order);
mutex_unlock(&mem->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);

View File

@ -627,17 +627,15 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
ret = pciehp_isr(irq, dev_id);
enable_irq(irq);
if (ret != IRQ_WAKE_THREAD) {
pci_config_pm_runtime_put(pdev);
return ret;
}
if (ret != IRQ_WAKE_THREAD)
goto out;
}
synchronize_hardirq(irq);
events = atomic_xchg(&ctrl->pending_events, 0);
if (!events) {
pci_config_pm_runtime_put(pdev);
return IRQ_NONE;
ret = IRQ_NONE;
goto out;
}
/* Check Attention Button Pressed */
@ -666,10 +664,12 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
pciehp_handle_presence_or_link_change(slot, events);
up_read(&ctrl->reset_lock);
ret = IRQ_HANDLED;
out:
pci_config_pm_runtime_put(pdev);
ctrl->ist_running = false;
wake_up(&ctrl->requester);
return IRQ_HANDLED;
return ret;
}
static int pciehp_poll(void *data)

View File

@ -747,9 +747,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
/* Enable what we need to enable */
pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
PCI_L1SS_CAP_L1_PM_SS, val);
PCI_L1SS_CTL1_L1SS_MASK, val);
pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
PCI_L1SS_CAP_L1_PM_SS, val);
PCI_L1SS_CTL1_L1SS_MASK, val);
}
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)

Some files were not shown because too many files have changed in this diff Show More