This is the 4.19.129 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl7wXf4ACgkQONu9yGCS
 aT6rDBAAg6jIYJhhb9lpK59hpMxNsaFnPfXdA3Z6qARqH7iIQa9TTP9JF5eFndS0
 +2wV8t/8Nz/3BWq9NQAF525QJdqyY6Ahcj5QQXzIzEZyb/p5fRVCBOUcBP7uaBCu
 gdORR7OhHI9+7aGLr05Svb7pVWPLi0Mk5vjvthEIkojEOIREGuGlERRZNlL1SN3y
 cYDBCCJtD2XiuhyZNLNxtwE/2/d/1xuIG7T3VRDS6oBtqfOXdsy5xoU9lpbbmZQg
 s1i3cjWgxEYjJOJqONwzfUSu9Zj4GUZfLTx3gtXG7iEiuUfEw3ljEvIrqSqtNxB5
 aTysoOu4MSdJTALHkA7Szhk2Q8Pecmo+NdKLfgMCxAWwIEbn1X9seea7QC5M5/lr
 Q1z150M2+Lcs6z9I/vR+vmPh9YKn1yGV4RbTeMXwiQLlWcRh7vh7jN+YJvrpmJSL
 BGbsRLB02J4i58CLW7n2rgeq5ycO41bJeWdXSSZjJg7KiZMvuD7mnDv1nUoj3Ad0
 lFxgfBRYYZzGCe53xLBXKnjua1lxp8rStUK4iotqkXyhaZqHo0J52okDxSqbP4VZ
 DYMfgyiFDufITd7l7qK5H6OeWQJ2IPtaude0HiMQf00bdOIrIsl+xXCtFHo6kx6z
 VxwFUAUZWIKZT9ZWo2DNmbbDSRmij3Pqm6ZiakDSPT+kZFqvkBo=
 =u5pA
 -----END PGP SIGNATURE-----

Merge 4.19.129 into android-4.19-stable

Changes in 4.19.129
	ipv6: fix IPV6_ADDRFORM operation logic
	net_failover: fixed rollback in net_failover_open()
	bridge: Avoid infinite loop when suppressing NS messages with invalid options
	vxlan: Avoid infinite loop when suppressing NS messages with invalid options
	tun: correct header offsets in napi frags mode
	selftests: bpf: fix use of undeclared RET_IF macro
	make 'user_access_begin()' do 'access_ok()'
	Fix 'acccess_ok()' on alpha and SH
	arch/openrisc: Fix issues with access_ok()
	x86: uaccess: Inhibit speculation past access_ok() in user_access_begin()
	lib: Reduce user_access_begin() boundaries in strncpy_from_user() and strnlen_user()
	btrfs: merge btrfs_find_device and find_device
	btrfs: Detect unbalanced tree with empty leaf before crashing btree operations
	crypto: talitos - fix ECB and CBC algs ivsize
	Input: mms114 - fix handling of mms345l
	ARM: 8977/1: ptrace: Fix mask for thumb breakpoint hook
	sched/fair: Don't NUMA balance for kthreads
	Input: synaptics - add a second working PNP_ID for Lenovo T470s
	drivers/net/ibmvnic: Update VNIC protocol version reporting
	powerpc/xive: Clear the page tables for the ESB IO mapping
	ath9k_htc: Silence undersized packet warnings
	RDMA/uverbs: Make the event_queue fds return POLLERR when disassociated
	x86/cpu/amd: Make erratum #1054 a legacy erratum
	perf probe: Accept the instance number of kretprobe event
	mm: add kvfree_sensitive() for freeing sensitive data objects
	aio: fix async fsync creds
	btrfs: tree-checker: Check level for leaves and nodes
	x86_64: Fix jiffies ODR violation
	x86/PCI: Mark Intel C620 MROMs as having non-compliant BARs
	x86/speculation: Prevent rogue cross-process SSBD shutdown
	x86/reboot/quirks: Add MacBook6,1 reboot quirk
	efi/efivars: Add missing kobject_put() in sysfs entry creation error path
	ALSA: es1688: Add the missed snd_card_free()
	ALSA: hda/realtek - add a pintbl quirk for several Lenovo machines
	ALSA: usb-audio: Fix inconsistent card PM state after resume
	ALSA: usb-audio: Add vendor, product and profile name for HP Thunderbolt Dock
	ACPI: sysfs: Fix reference count leak in acpi_sysfs_add_hotplug_profile()
	ACPI: CPPC: Fix reference count leak in acpi_cppc_processor_probe()
	ACPI: GED: add support for _Exx / _Lxx handler methods
	ACPI: PM: Avoid using power resources if there are none for D0
	cgroup, blkcg: Prepare some symbols for module and !CONFIG_CGROUP usages
	nilfs2: fix null pointer dereference at nilfs_segctor_do_construct()
	spi: dw: Fix controller unregister order
	spi: bcm2835aux: Fix controller unregister order
	spi: bcm-qspi: when tx/rx buffer is NULL set to 0
	PM: runtime: clk: Fix clk_pm_runtime_get() error path
	crypto: cavium/nitrox - Fix 'nitrox_get_first_device()' when ndevlist is fully iterated
	ALSA: pcm: disallow linking stream to itself
	x86/{mce,mm}: Unmap the entire page if the whole page is affected and poisoned
	KVM: x86: Fix APIC page invalidation race
	kvm: x86: Fix L1TF mitigation for shadow MMU
	KVM: x86/mmu: Consolidate "is MMIO SPTE" code
	KVM: x86: only do L1TF workaround on affected processors
	x86/speculation: Change misspelled STIPB to STIBP
	x86/speculation: Add support for STIBP always-on preferred mode
	x86/speculation: Avoid force-disabling IBPB based on STIBP and enhanced IBRS.
	x86/speculation: PR_SPEC_FORCE_DISABLE enforcement for indirect branches.
	spi: No need to assign dummy value in spi_unregister_controller()
	spi: Fix controller unregister order
	spi: pxa2xx: Fix controller unregister order
	spi: bcm2835: Fix controller unregister order
	spi: pxa2xx: Balance runtime PM enable/disable on error
	spi: pxa2xx: Fix runtime PM ref imbalance on probe error
	crypto: virtio: Fix use-after-free in virtio_crypto_skcipher_finalize_req()
	crypto: virtio: Fix src/dst scatterlist calculation in __virtio_crypto_skcipher_do_req()
	crypto: virtio: Fix dest length calculation in __virtio_crypto_skcipher_do_req()
	selftests/net: in rxtimestamp getopt_long needs terminating null entry
	ovl: initialize error in ovl_copy_xattr
	proc: Use new_inode not new_inode_pseudo
	video: fbdev: w100fb: Fix a potential double free.
	KVM: nSVM: fix condition for filtering async PF
	KVM: nSVM: leave ASID aside in copy_vmcb_control_area
	KVM: nVMX: Consult only the "basic" exit reason when routing nested exit
	KVM: MIPS: Define KVM_ENTRYHI_ASID to cpu_asid_mask(&boot_cpu_data)
	KVM: MIPS: Fix VPN2_MASK definition for variable cpu_vmbits
	KVM: arm64: Make vcpu_cp1x() work on Big Endian hosts
	scsi: megaraid_sas: TM command refire leads to controller firmware crash
	ath9k: Fix use-after-free Read in ath9k_wmi_ctrl_rx
	ath9k: Fix use-after-free Write in ath9k_htc_rx_msg
	ath9x: Fix stack-out-of-bounds Write in ath9k_hif_usb_rx_cb
	ath9k: Fix general protection fault in ath9k_hif_usb_rx_cb
	Smack: slab-out-of-bounds in vsscanf
	drm/vkms: Hold gem object while still in-use
	mm/slub: fix a memory leak in sysfs_slab_add()
	fat: don't allow to mount if the FAT length == 0
	perf: Add cond_resched() to task_function_call()
	agp/intel: Reinforce the barrier after GTT updates
	mmc: sdhci-msm: Clear tuning done flag while hs400 tuning
	ARM: dts: at91: sama5d2_ptc_ek: fix sdmmc0 node description
	mmc: sdio: Fix potential NULL pointer error in mmc_sdio_init_card()
	xen/pvcalls-back: test for errors when calling backend_connect()
	KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception
	ACPI: GED: use correct trigger type field in _Exx / _Lxx handling
	drm: bridge: adv7511: Extend list of audio sample rates
	crypto: ccp -- don't "select" CONFIG_DMADEVICES
	media: si2157: Better check for running tuner in init
	objtool: Ignore empty alternatives
	spi: pxa2xx: Apply CS clk quirk to BXT
	net: atlantic: make hw_get_regs optional
	net: ena: fix error returning in ena_com_get_hash_function()
	efi/libstub/x86: Work around LLVM ELF quirk build regression
	arm64: cacheflush: Fix KGDB trap detection
	spi: dw: Zero DMA Tx and Rx configurations on stack
	arm64: insn: Fix two bugs in encoding 32-bit logical immediates
	ixgbe: Fix XDP redirect on archs with PAGE_SIZE above 4K
	MIPS: Loongson: Build ATI Radeon GPU driver as module
	Bluetooth: Add SCO fallback for invalid LMP parameters error
	kgdb: Disable WARN_CONSOLE_UNLOCKED for all kgdb
	kgdb: Prevent infinite recursive entries to the debugger
	spi: dw: Enable interrupts in accordance with DMA xfer mode
	clocksource: dw_apb_timer: Make CPU-affiliation being optional
	clocksource: dw_apb_timer_of: Fix missing clockevent timers
	btrfs: do not ignore error from btrfs_next_leaf() when inserting checksums
	ARM: 8978/1: mm: make act_mm() respect THREAD_SIZE
	batman-adv: Revert "disable ethtool link speed detection when auto negotiation off"
	mmc: meson-mx-sdio: trigger a soft reset after a timeout or CRC error
	spi: dw: Fix Rx-only DMA transfers
	x86/kvm/hyper-v: Explicitly align hcall param for kvm_hyperv_exit
	net: vmxnet3: fix possible buffer overflow caused by bad DMA value in vmxnet3_get_rss()
	staging: android: ion: use vmap instead of vm_map_ram
	brcmfmac: fix wrong location to get firmware feature
	tools api fs: Make xxx__mountpoint() more scalable
	e1000: Distribute switch variables for initialization
	dt-bindings: display: mediatek: control dpi pins mode to avoid leakage
	audit: fix a net reference leak in audit_send_reply()
	media: dvb: return -EREMOTEIO on i2c transfer failure.
	media: platform: fcp: Set appropriate DMA parameters
	MIPS: Make sparse_init() using top-down allocation
	Bluetooth: btbcm: Add 2 missing models to subver tables
	audit: fix a net reference leak in audit_list_rules_send()
	netfilter: nft_nat: return EOPNOTSUPP if type or flags are not supported
	selftests/bpf: Fix memory leak in extract_build_id()
	net: bcmgenet: set Rx mode before starting netif
	lib/mpi: Fix 64-bit MIPS build with Clang
	exit: Move preemption fixup up, move blocking operations down
	sched/core: Fix illegal RCU from offline CPUs
	drivers/perf: hisi: Fix typo in events attribute array
	net: lpc-enet: fix error return code in lpc_mii_init()
	media: cec: silence shift wrapping warning in __cec_s_log_addrs()
	net: allwinner: Fix use correct return type for ndo_start_xmit()
	powerpc/spufs: fix copy_to_user while atomic
	xfs: clean up the error handling in xfs_swap_extents
	Crypto/chcr: fix for ccm(aes) failed test
	MIPS: Truncate link address into 32bit for 32bit kernel
	mips: cm: Fix an invalid error code of INTVN_*_ERR
	kgdb: Fix spurious true from in_dbg_master()
	xfs: reset buffer write failure state on successful completion
	xfs: fix duplicate verification from xfs_qm_dqflush()
	platform/x86: intel-vbtn: Use acpi_evaluate_integer()
	platform/x86: intel-vbtn: Split keymap into buttons and switches parts
	platform/x86: intel-vbtn: Do not advertise switches to userspace if they are not there
	platform/x86: intel-vbtn: Also handle tablet-mode switch on "Detachable" and "Portable" chassis-types
	nvme: refine the Qemu Identify CNS quirk
	ath10k: Remove msdu from idr when management pkt send fails
	wcn36xx: Fix error handling path in 'wcn36xx_probe()'
	net: qed*: Reduce RX and TX default ring count when running inside kdump kernel
	mt76: avoid rx reorder buffer overflow
	md: don't flush workqueue unconditionally in md_open
	veth: Adjust hard_start offset on redirect XDP frames
	net/mlx5e: IPoIB, Drop multicast packets that this interface sent
	rtlwifi: Fix a double free in _rtl_usb_tx_urb_setup()
	mwifiex: Fix memory corruption in dump_station
	x86/boot: Correct relocation destination on old linkers
	mips: MAAR: Use more precise address mask
	mips: Add udelay lpj numbers adjustment
	crypto: stm32/crc32 - fix ext4 chksum BUG_ON()
	crypto: stm32/crc32 - fix run-time self test issue.
	crypto: stm32/crc32 - fix multi-instance
	x86/mm: Stop printing BRK addresses
	m68k: mac: Don't call via_flush_cache() on Mac IIfx
	btrfs: qgroup: mark qgroup inconsistent if we're inherting snapshot to a new qgroup
	macvlan: Skip loopback packets in RX handler
	PCI: Don't disable decoding when mmio_always_on is set
	MIPS: Fix IRQ tracing when call handle_fpe() and handle_msa_fpe()
	bcache: fix refcount underflow in bcache_device_free()
	mmc: sdhci-msm: Set SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 quirk
	staging: greybus: sdio: Respect the cmd->busy_timeout from the mmc core
	mmc: via-sdmmc: Respect the cmd->busy_timeout from the mmc core
	ixgbe: fix signed-integer-overflow warning
	mmc: sdhci-esdhc-imx: fix the mask for tuning start point
	spi: dw: Return any value retrieved from the dma_transfer callback
	cpuidle: Fix three reference count leaks
	platform/x86: hp-wmi: Convert simple_strtoul() to kstrtou32()
	platform/x86: intel-hid: Add a quirk to support HP Spectre X2 (2015)
	platform/x86: intel-vbtn: Only blacklist SW_TABLET_MODE on the 9 / "Laptop" chasis-type
	string.h: fix incompatibility between FORTIFY_SOURCE and KASAN
	btrfs: include non-missing as a qualifier for the latest_bdev
	btrfs: send: emit file capabilities after chown
	mm: thp: make the THP mapcount atomic against __split_huge_pmd_locked()
	mm: initialize deferred pages with interrupts enabled
	ima: Fix ima digest hash table key calculation
	ima: Directly assign the ima_default_policy pointer to ima_rules
	evm: Fix possible memory leak in evm_calc_hmac_or_hash()
	ext4: fix EXT_MAX_EXTENT/INDEX to check for zeroed eh_max
	ext4: fix error pointer dereference
	ext4: fix race between ext4_sync_parent() and rename()
	PCI: Avoid Pericom USB controller OHCI/EHCI PME# defect
	PCI: Avoid FLR for AMD Matisse HD Audio & USB 3.0
	PCI: Avoid FLR for AMD Starship USB 3.0
	PCI: Add ACS quirk for iProc PAXB
	PCI: Add ACS quirk for Intel Root Complex Integrated Endpoints
	PCI: Remove unused NFP32xx IDs
	pci:ipmi: Move IPMI PCI class id defines to pci_ids.h
	hwmon/k10temp, x86/amd_nb: Consolidate shared device IDs
	x86/amd_nb: Add PCI device IDs for family 17h, model 30h
	PCI: add USR vendor id and use it in r8169 and w6692 driver
	PCI: Move Synopsys HAPS platform device IDs
	PCI: Move Rohm Vendor ID to generic list
	misc: pci_endpoint_test: Add the layerscape EP device support
	misc: pci_endpoint_test: Add support to test PCI EP in AM654x
	PCI: Add Synopsys endpoint EDDA Device ID
	PCI: Add NVIDIA GPU multi-function power dependencies
	PCI: Enable NVIDIA HDA controllers
	PCI: mediatek: Add controller support for MT7629
	x86/amd_nb: Add PCI device IDs for family 17h, model 70h
	ALSA: lx6464es - add support for LX6464ESe pci express variant
	PCI: Add Genesys Logic, Inc. Vendor ID
	PCI: Add Amazon's Annapurna Labs vendor ID
	PCI: vmd: Add device id for VMD device 8086:9A0B
	x86/amd_nb: Add Family 19h PCI IDs
	PCI: Add Loongson vendor ID
	serial: 8250_pci: Move Pericom IDs to pci_ids.h
	PCI: Make ACS quirk implementations more uniform
	PCI: Unify ACS quirk desired vs provided checking
	PCI: Generalize multi-function power dependency device links
	btrfs: fix error handling when submitting direct I/O bio
	btrfs: fix wrong file range cleanup after an error filling dealloc range
	ima: Call ima_calc_boot_aggregate() in ima_eventdigest_init()
	PCI: Program MPS for RCiEP devices
	e1000e: Disable TSO for buffer overrun workaround
	e1000e: Relax condition to trigger reset for ME workaround
	carl9170: remove P2P_GO support
	media: go7007: fix a miss of snd_card_free
	Bluetooth: hci_bcm: fix freeing not-requested IRQ
	b43legacy: Fix case where channel status is corrupted
	b43: Fix connection problem with WPA3
	b43_legacy: Fix connection problem with WPA3
	media: ov5640: fix use of destroyed mutex
	igb: Report speed and duplex as unknown when device is runtime suspended
	power: vexpress: add suppress_bind_attrs to true
	pinctrl: samsung: Correct setting of eint wakeup mask on s5pv210
	pinctrl: samsung: Save/restore eint_mask over suspend for EINT_TYPE GPIOs
	gnss: sirf: fix error return code in sirf_probe()
	sparc32: fix register window handling in genregs32_[gs]et()
	sparc64: fix misuses of access_process_vm() in genregs32_[sg]et()
	dm crypt: avoid truncating the logical block size
	alpha: fix memory barriers so that they conform to the specification
	kernel/cpu_pm: Fix uninitted local in cpu_pm
	ARM: tegra: Correct PL310 Auxiliary Control Register initialization
	ARM: dts: exynos: Fix GPIO polarity for thr GalaxyS3 CM36651 sensor's bus
	ARM: dts: at91: sama5d2_ptc_ek: fix vbus pin
	ARM: dts: s5pv210: Set keep-power-in-suspend for SDHCI1 on Aries
	drivers/macintosh: Fix memleak in windfarm_pm112 driver
	powerpc/64s: Don't let DT CPU features set FSCR_DSCR
	powerpc/64s: Save FSCR to init_task.thread.fscr after feature init
	kbuild: force to build vmlinux if CONFIG_MODVERSION=y
	sunrpc: svcauth_gss_register_pseudoflavor must reject duplicate registrations.
	sunrpc: clean up properly in gss_mech_unregister()
	mtd: rawnand: brcmnand: fix hamming oob layout
	mtd: rawnand: pasemi: Fix the probe error path
	w1: omap-hdq: cleanup to add missing newline for some dev_dbg
	perf probe: Do not show the skipped events
	perf probe: Fix to check blacklist address correctly
	perf probe: Check address correctness by map instead of _etext
	perf symbols: Fix debuginfo search for Ubuntu
	Linux 4.19.129

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I7b1108d90ee1109a28fe488a4358b7a3e101d9c9
This commit is contained in:
Greg Kroah-Hartman 2020-06-22 10:50:54 +02:00
commit 4d01d462e6
273 changed files with 2387 additions and 1134 deletions

View File

@ -16,6 +16,9 @@ Required properties:
Documentation/devicetree/bindings/graph.txt. This port should be connected
to the input port of an attached HDMI or LVDS encoder chip.
Optional properties:
- pinctrl-names: Contain "default" and "sleep".
Example:
dpi0: dpi@1401d000 {
@ -26,6 +29,9 @@ dpi0: dpi@1401d000 {
<&mmsys CLK_MM_DPI_ENGINE>,
<&apmixedsys CLK_APMIXED_TVDPLL>;
clock-names = "pixel", "engine", "pll";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&dpi_pin_func>;
pinctrl-1 = <&dpi_pin_idle>;
port {
dpi0_out: endpoint {

View File

@ -3999,9 +3999,11 @@ EOI was received.
#define KVM_EXIT_HYPERV_SYNIC 1
#define KVM_EXIT_HYPERV_HCALL 2
__u32 type;
__u32 pad1;
union {
struct {
__u32 msr;
__u32 pad2;
__u64 control;
__u64 evt_page;
__u64 msg_page;

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 128
SUBLEVEL = 129
EXTRAVERSION =
NAME = "People's Front"
@ -558,12 +558,8 @@ KBUILD_MODULES :=
KBUILD_BUILTIN := 1
# If we have only "make modules", don't compile built-in objects.
# When we're building modules with modversions, we need to consider
# the built-in objects during the descend as well, in order to
# make sure the checksums are up to date before we record them.
ifeq ($(MAKECMDGOALS),modules)
KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1)
KBUILD_BUILTIN :=
endif
# If we have "make <whatever> modules", compile modules
@ -1324,6 +1320,13 @@ ifdef CONFIG_MODULES
all: modules
# When we're building modules with modversions, we need to consider
# the built-in objects during the descend as well, in order to
# make sure the checksums are up to date before we record them.
ifdef CONFIG_MODVERSIONS
KBUILD_BUILTIN := 1
endif
# Build modules
#
# A module can be listed more than once in obj-m resulting in

View File

@ -327,14 +327,18 @@ static inline int __is_mmio(const volatile void __iomem *addr)
#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
extern inline unsigned int ioread8(void __iomem *addr)
{
unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
unsigned int ret;
mb();
ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
mb();
return ret;
}
extern inline unsigned int ioread16(void __iomem *addr)
{
unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
unsigned int ret;
mb();
ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
mb();
return ret;
}
@ -375,7 +379,9 @@ extern inline void outw(u16 b, unsigned long port)
#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
extern inline unsigned int ioread32(void __iomem *addr)
{
unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
unsigned int ret;
mb();
ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
mb();
return ret;
}
@ -420,14 +426,18 @@ extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
extern inline u8 readb(const volatile void __iomem *addr)
{
u8 ret = __raw_readb(addr);
u8 ret;
mb();
ret = __raw_readb(addr);
mb();
return ret;
}
extern inline u16 readw(const volatile void __iomem *addr)
{
u16 ret = __raw_readw(addr);
u16 ret;
mb();
ret = __raw_readw(addr);
mb();
return ret;
}
@ -468,14 +478,18 @@ extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
extern inline u32 readl(const volatile void __iomem *addr)
{
u32 ret = __raw_readl(addr);
u32 ret;
mb();
ret = __raw_readl(addr);
mb();
return ret;
}
extern inline u64 readq(const volatile void __iomem *addr)
{
u64 ret = __raw_readq(addr);
u64 ret;
mb();
ret = __raw_readq(addr);
mb();
return ret;
}
@ -504,14 +518,44 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
#define outb_p outb
#define outw_p outw
#define outl_p outl
#define readb_relaxed(addr) __raw_readb(addr)
#define readw_relaxed(addr) __raw_readw(addr)
#define readl_relaxed(addr) __raw_readl(addr)
#define readq_relaxed(addr) __raw_readq(addr)
#define writeb_relaxed(b, addr) __raw_writeb(b, addr)
#define writew_relaxed(b, addr) __raw_writew(b, addr)
#define writel_relaxed(b, addr) __raw_writel(b, addr)
#define writeq_relaxed(b, addr) __raw_writeq(b, addr)
extern u8 readb_relaxed(const volatile void __iomem *addr);
extern u16 readw_relaxed(const volatile void __iomem *addr);
extern u32 readl_relaxed(const volatile void __iomem *addr);
extern u64 readq_relaxed(const volatile void __iomem *addr);
#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
extern inline u8 readb_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readb(addr);
}
extern inline u16 readw_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readw(addr);
}
#endif
#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
extern inline u32 readl_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readl(addr);
}
extern inline u64 readq_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readq(addr);
}
#endif
#define writeb_relaxed writeb
#define writew_relaxed writew
#define writel_relaxed writel
#define writeq_relaxed writeq
#define mmiowb()

View File

@ -30,11 +30,13 @@
* Address valid if:
* - "addr" doesn't have any high-bits set
* - AND "size" doesn't have any high-bits set
* - AND "addr+size" doesn't have any high-bits set
* - AND "addr+size-(size != 0)" doesn't have any high-bits set
* - OR we are in kernel mode.
*/
#define __access_ok(addr, size) \
((get_fs().seg & (addr | size | (addr+size))) == 0)
#define __access_ok(addr, size) ({ \
unsigned long __ao_a = (addr), __ao_b = (size); \
unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \
(get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; })
#define access_ok(type, addr, size) \
({ \

View File

@ -16,21 +16,27 @@
unsigned int
ioread8(void __iomem *addr)
{
unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
unsigned int ret;
mb();
ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
mb();
return ret;
}
unsigned int ioread16(void __iomem *addr)
{
unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
unsigned int ret;
mb();
ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
mb();
return ret;
}
unsigned int ioread32(void __iomem *addr)
{
unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
unsigned int ret;
mb();
ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
mb();
return ret;
}
@ -148,28 +154,36 @@ EXPORT_SYMBOL(__raw_writeq);
u8 readb(const volatile void __iomem *addr)
{
u8 ret = __raw_readb(addr);
u8 ret;
mb();
ret = __raw_readb(addr);
mb();
return ret;
}
u16 readw(const volatile void __iomem *addr)
{
u16 ret = __raw_readw(addr);
u16 ret;
mb();
ret = __raw_readw(addr);
mb();
return ret;
}
u32 readl(const volatile void __iomem *addr)
{
u32 ret = __raw_readl(addr);
u32 ret;
mb();
ret = __raw_readl(addr);
mb();
return ret;
}
u64 readq(const volatile void __iomem *addr)
{
u64 ret = __raw_readq(addr);
u64 ret;
mb();
ret = __raw_readq(addr);
mb();
return ret;
}
@ -207,6 +221,38 @@ EXPORT_SYMBOL(writew);
EXPORT_SYMBOL(writel);
EXPORT_SYMBOL(writeq);
/*
* The _relaxed functions must be ordered w.r.t. each other, but they don't
* have to be ordered w.r.t. other memory accesses.
*/
u8 readb_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readb(addr);
}
u16 readw_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readw(addr);
}
u32 readl_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readl(addr);
}
u64 readq_relaxed(const volatile void __iomem *addr)
{
mb();
return __raw_readq(addr);
}
EXPORT_SYMBOL(readb_relaxed);
EXPORT_SYMBOL(readw_relaxed);
EXPORT_SYMBOL(readl_relaxed);
EXPORT_SYMBOL(readq_relaxed);
/*
* Read COUNT 8-bit bytes from port PORT into memory starting at SRC.

View File

@ -40,7 +40,7 @@
ahb {
usb0: gadget@300000 {
atmel,vbus-gpio = <&pioA PIN_PA27 GPIO_ACTIVE_HIGH>;
atmel,vbus-gpio = <&pioA PIN_PB11 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usba_vbus>;
status = "okay";
@ -125,8 +125,6 @@
bus-width = <8>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdmmc0_default>;
non-removable;
mmc-ddr-1_8v;
status = "okay";
};

View File

@ -50,7 +50,7 @@
i2c_cm36651: i2c-gpio-2 {
compatible = "i2c-gpio";
gpios = <&gpf0 0 GPIO_ACTIVE_LOW>, <&gpf0 1 GPIO_ACTIVE_LOW>;
gpios = <&gpf0 0 GPIO_ACTIVE_HIGH>, <&gpf0 1 GPIO_ACTIVE_HIGH>;
i2c-gpio,delay-us = <2>;
#address-cells = <1>;
#size-cells = <0>;

View File

@ -374,6 +374,7 @@
pinctrl-names = "default";
cap-sd-highspeed;
cap-mmc-highspeed;
keep-power-in-suspend;
mmc-pwrseq = <&wifi_pwrseq>;
non-removable;

View File

@ -364,4 +364,6 @@ static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
struct kvm *kvm_arch_alloc_vm(void);
void kvm_arch_free_vm(struct kvm *kvm);
#define kvm_arm_vcpu_loaded(vcpu) (false)
#endif /* __ARM_KVM_HOST_H__ */

View File

@ -229,8 +229,8 @@ static struct undef_hook arm_break_hook = {
};
static struct undef_hook thumb_break_hook = {
.instr_mask = 0xffff,
.instr_val = 0xde01,
.instr_mask = 0xffffffff,
.instr_val = 0x0000de01,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
.fn = break_trap,

View File

@ -112,8 +112,8 @@ static const char * const tegra_dt_board_compat[] = {
};
DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)")
.l2c_aux_val = 0x3c400001,
.l2c_aux_mask = 0xc20fc3fe,
.l2c_aux_val = 0x3c400000,
.l2c_aux_mask = 0xc20fc3ff,
.smp = smp_ops(tegra_smp_ops),
.map_io = tegra_map_common_io,
.init_early = tegra_init_early,

View File

@ -5,6 +5,7 @@
* VMA_VM_FLAGS
* VM_EXEC
*/
#include <linux/const.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
@ -30,7 +31,7 @@
* act_mm - get current->active_mm
*/
.macro act_mm, rd
bic \rd, sp, #8128
bic \rd, sp, #(THREAD_SIZE - 1) & ~63
bic \rd, \rd, #63
ldr \rd, [\rd, #TI_TASK]
.if (TSK_ACTIVE_MM > IMM12_MASK)

View File

@ -95,7 +95,7 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
* IPI all online CPUs so that they undergo a context synchronization
* event and are forced to refetch the new instructions.
*/
#ifdef CONFIG_KGDB
/*
* KGDB performs cache maintenance with interrupts disabled, so we
* will deadlock trying to IPI the secondary CPUs. In theory, we can
@ -105,9 +105,9 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
* the patching operation, so we don't need extra IPIs here anyway.
* In which case, add a KGDB-specific bodge and return early.
*/
if (kgdb_connected && irqs_disabled())
if (in_dbg_master())
return;
#endif
kick_all_cpus_sync();
}

View File

@ -335,8 +335,10 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
* CP14 and CP15 live in the same array, as they are backed by the
* same system registers.
*/
#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
struct kvm_vm_stat {
ulong remote_tlb_flush;
@ -535,4 +537,6 @@ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
struct kvm *kvm_arch_alloc_vm(void);
void kvm_arch_free_vm(struct kvm *kvm);
#define kvm_arm_vcpu_loaded(vcpu) ((vcpu)->arch.sysregs_loaded_on_cpu)
#endif /* __ARM64_KVM_HOST_H__ */

View File

@ -1490,16 +1490,10 @@ static u32 aarch64_encode_immediate(u64 imm,
u32 insn)
{
unsigned int immr, imms, n, ones, ror, esz, tmp;
u64 mask = ~0UL;
/* Can't encode full zeroes or full ones */
if (!imm || !~imm)
return AARCH64_BREAK_FAULT;
u64 mask;
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
if (upper_32_bits(imm))
return AARCH64_BREAK_FAULT;
esz = 32;
break;
case AARCH64_INSN_VARIANT_64BIT:
@ -1511,6 +1505,12 @@ static u32 aarch64_encode_immediate(u64 imm,
return AARCH64_BREAK_FAULT;
}
mask = GENMASK(esz - 1, 0);
/* Can't encode full zeroes, full ones, or value wider than the mask */
if (!imm || imm == mask || imm & ~mask)
return AARCH64_BREAK_FAULT;
/*
* Inverse of Replicate(). Try to spot a repeating pattern
* with a pow2 stride.

View File

@ -257,6 +257,7 @@ extern int rbv_present,via_alt_mapping;
struct irq_desc;
extern void via_l2_flush(int writeback);
extern void via_register_interrupts(void);
extern void via_irq_enable(int);
extern void via_irq_disable(int);

View File

@ -61,7 +61,6 @@ extern void iop_preinit(void);
extern void iop_init(void);
extern void via_init(void);
extern void via_init_clock(irq_handler_t func);
extern void via_flush_cache(void);
extern void oss_init(void);
extern void psc_init(void);
extern void baboon_init(void);
@ -132,21 +131,6 @@ int __init mac_parse_bootinfo(const struct bi_record *record)
return unknown;
}
/*
* Flip into 24bit mode for an instant - flushes the L2 cache card. We
* have to disable interrupts for this. Our IRQ handlers will crap
* themselves if they take an IRQ in 24bit mode!
*/
static void mac_cache_card_flush(int writeback)
{
unsigned long flags;
local_irq_save(flags);
via_flush_cache();
local_irq_restore(flags);
}
void __init config_mac(void)
{
if (!MACH_IS_MAC)
@ -178,9 +162,8 @@ void __init config_mac(void)
* not.
*/
if (macintosh_config->ident == MAC_MODEL_IICI
|| macintosh_config->ident == MAC_MODEL_IIFX)
mach_l2_flush = mac_cache_card_flush;
if (macintosh_config->ident == MAC_MODEL_IICI)
mach_l2_flush = via_l2_flush;
}

View File

@ -289,10 +289,14 @@ void via_debug_dump(void)
* the system into 24-bit mode for an instant.
*/
void via_flush_cache(void)
void via_l2_flush(int writeback)
{
unsigned long flags;
local_irq_save(flags);
via2[gBufB] &= ~VIA2B_vMode32;
via2[gBufB] |= VIA2B_vMode32;
local_irq_restore(flags);
}
/*

View File

@ -290,12 +290,23 @@ ifdef CONFIG_64BIT
endif
endif
# When linking a 32-bit executable the LLVM linker cannot cope with a
# 32-bit load address that has been sign-extended to 64 bits. Simply
# remove the upper 32 bits then, as it is safe to do so with other
# linkers.
ifdef CONFIG_64BIT
load-ld = $(load-y)
else
load-ld = $(subst 0xffffffff,0x,$(load-y))
endif
KBUILD_AFLAGS += $(cflags-y)
KBUILD_CFLAGS += $(cflags-y)
KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) -DLINKER_LOAD_ADDRESS=$(load-ld)
KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
LINKER_LOAD_ADDRESS=$(load-ld) \
VMLINUX_ENTRY_ADDRESS=$(entry-y) \
PLATFORM="$(platform-y)" \
ITS_INPUTS="$(its-y)"

View File

@ -90,7 +90,7 @@ ifneq ($(zload-y),)
VMLINUZ_LOAD_ADDRESS := $(zload-y)
else
VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
$(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
$(obj)/vmlinux.bin $(LINKER_LOAD_ADDRESS))
endif
UIMAGE_LOADADDR = $(VMLINUZ_LOAD_ADDRESS)

View File

@ -250,7 +250,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
CONFIG_DRM_RADEON=m
CONFIG_FB_RADEON=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=m

View File

@ -274,8 +274,12 @@ enum emulation_result {
#define MIPS3_PG_SHIFT 6
#define MIPS3_PG_FRAME 0x3fffffc0
#if defined(CONFIG_64BIT)
#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13)
#else
#define VPN2_MASK 0xffffe000
#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID
#endif
#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data)
#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)

View File

@ -749,7 +749,7 @@
/* MAAR bit definitions */
#define MIPS_MAAR_VH (_U64CAST_(1) << 63)
#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12)
#define MIPS_MAAR_ADDR GENMASK_ULL(55, 12)
#define MIPS_MAAR_ADDR_SHIFT 12
#define MIPS_MAAR_S (_ULCAST_(1) << 1)
#define MIPS_MAAR_VL (_ULCAST_(1) << 0)

View File

@ -477,20 +477,20 @@ NESTED(nmi_handler, PT_SIZE, sp)
.endm
.macro __build_clear_fpe
CLI
TRACE_IRQS_OFF
.set push
/* gas fails to assemble cfc1 for some archs (octeon).*/ \
.set mips1
SET_HARDFLOAT
cfc1 a1, fcr31
.set pop
CLI
TRACE_IRQS_OFF
.endm
.macro __build_clear_msa_fpe
_cfcmsa a1, MSA_CSR
CLI
TRACE_IRQS_OFF
_cfcmsa a1, MSA_CSR
.endm
.macro __build_clear_ade

View File

@ -123,9 +123,9 @@ static char *cm2_causes[32] = {
"COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
"0x08", "0x09", "0x0a", "0x0b",
"0x0c", "0x0d", "0x0e", "0x0f",
"0x10", "0x11", "0x12", "0x13",
"0x14", "0x15", "0x16", "INTVN_WR_ERR",
"INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
"0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13",
"0x14", "0x15", "0x16", "0x17",
"0x18", "0x19", "0x1a", "0x1b",
"0x1c", "0x1d", "0x1e", "0x1f"
};

View File

@ -933,7 +933,17 @@ static void __init arch_mem_init(char **cmdline_p)
BOOTMEM_DEFAULT);
#endif
device_tree_init();
/*
* In order to reduce the possibility of kernel panic when failed to
* get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
* low memory as small as possible before plat_swiotlb_setup(), so
* make sparse_init() using top-down allocation.
*/
memblock_set_bottom_up(false);
sparse_init();
memblock_set_bottom_up(true);
plat_swiotlb_setup();
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));

View File

@ -22,12 +22,82 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/div64.h>
#include <asm/time.h>
#ifdef CONFIG_CPU_FREQ
static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref);
static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq);
static unsigned long glb_lpj_ref;
static unsigned long glb_lpj_ref_freq;
static int cpufreq_callback(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
struct cpumask *cpus = freq->policy->cpus;
unsigned long lpj;
int cpu;
/*
* Skip lpj numbers adjustment if the CPU-freq transition is safe for
* the loops delay. (Is this possible?)
*/
if (freq->flags & CPUFREQ_CONST_LOOPS)
return NOTIFY_OK;
/* Save the initial values of the lpjes for future scaling. */
if (!glb_lpj_ref) {
glb_lpj_ref = boot_cpu_data.udelay_val;
glb_lpj_ref_freq = freq->old;
for_each_online_cpu(cpu) {
per_cpu(pcp_lpj_ref, cpu) =
cpu_data[cpu].udelay_val;
per_cpu(pcp_lpj_ref_freq, cpu) = freq->old;
}
}
/*
* Adjust global lpj variable and per-CPU udelay_val number in
* accordance with the new CPU frequency.
*/
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
loops_per_jiffy = cpufreq_scale(glb_lpj_ref,
glb_lpj_ref_freq,
freq->new);
for_each_cpu(cpu, cpus) {
lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu),
per_cpu(pcp_lpj_ref_freq, cpu),
freq->new);
cpu_data[cpu].udelay_val = (unsigned int)lpj;
}
}
return NOTIFY_OK;
}
static struct notifier_block cpufreq_notifier = {
.notifier_call = cpufreq_callback,
};
static int __init register_cpufreq_notifier(void)
{
return cpufreq_register_notifier(&cpufreq_notifier,
CPUFREQ_TRANSITION_NOTIFIER);
}
core_initcall(register_cpufreq_notifier);
#endif /* CONFIG_CPU_FREQ */
/*
* forward reference
*/

View File

@ -50,7 +50,7 @@ SECTIONS
/* . = 0xa800000000300000; */
. = 0xffffffff80300000;
#endif
. = VMLINUX_LOAD_ADDRESS;
. = LINKER_LOAD_ADDRESS;
/* read-only */
_text = .; /* Text and read-only data */
.text : {

View File

@ -58,8 +58,12 @@
/* Ensure that addr is below task's addr_limit */
#define __addr_ok(addr) ((unsigned long) addr < get_fs())
#define access_ok(type, addr, size) \
__range_ok((unsigned long)addr, (unsigned long)size)
#define access_ok(type, addr, size) \
({ \
unsigned long __ao_addr = (unsigned long)(addr); \
unsigned long __ao_size = (unsigned long)(size); \
__range_ok(__ao_addr, __ao_size); \
})
/*
* These are the main single-value transfer routines. They automatically

View File

@ -346,6 +346,14 @@ static int __init feat_enable_dscr(struct dt_cpu_feature *f)
{
u64 lpcr;
/*
* Linux relies on FSCR[DSCR] being clear, so that we can take the
* facility unavailable interrupt and track the task's usage of DSCR.
* See facility_unavailable_exception().
* Clear the bit here so that feat_enable() doesn't set it.
*/
f->fscr_bit_nr = -1;
feat_enable(f);
lpcr = mfspr(SPRN_LPCR);

View File

@ -685,6 +685,23 @@ static void __init tm_init(void)
static void tm_init(void) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_PPC64
static void __init save_fscr_to_task(void)
{
/*
* Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we
* have configured via the device tree features or via __init_FSCR().
* That value will then be propagated to pid 1 (init) and all future
* processes.
*/
if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
init_task.thread.fscr = mfspr(SPRN_FSCR);
}
#else
static inline void save_fscr_to_task(void) {};
#endif
void __init early_init_devtree(void *params)
{
phys_addr_t limit;
@ -770,6 +787,8 @@ void __init early_init_devtree(void *params)
BUG();
}
save_fscr_to_task();
#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
/* We'll later wait for secondaries to check in; there are
* NCPUS-1 non-boot CPUs :-)

View File

@ -1991,8 +1991,9 @@ static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
int ret;
struct spu_context *ctx = file->private_data;
u32 stat, data;
int ret;
if (!access_ok(VERIFY_WRITE, buf, len))
return -EFAULT;
@ -2001,11 +2002,16 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
ret = __spufs_mbox_info_read(ctx, buf, len, pos);
stat = ctx->csa.prob.mb_stat_R;
data = ctx->csa.prob.pu_mb_R;
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
return ret;
/* EOF if there's no entry in the mbox */
if (!(stat & 0x0000ff))
return 0;
return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
}
static const struct file_operations spufs_mbox_info_fops = {
@ -2032,6 +2038,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
u32 stat, data;
int ret;
if (!access_ok(VERIFY_WRITE, buf, len))
@ -2041,11 +2048,16 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
ret = __spufs_ibox_info_read(ctx, buf, len, pos);
stat = ctx->csa.prob.mb_stat_R;
data = ctx->csa.priv2.puint_mb_R;
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
return ret;
/* EOF if there's no entry in the ibox */
if (!(stat & 0xff0000))
return 0;
return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
}
static const struct file_operations spufs_ibox_info_fops = {
@ -2054,6 +2066,11 @@ static const struct file_operations spufs_ibox_info_fops = {
.llseek = generic_file_llseek,
};
static size_t spufs_wbox_info_cnt(struct spu_context *ctx)
{
return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32);
}
static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
char __user *buf, size_t len, loff_t *pos)
{
@ -2062,7 +2079,7 @@ static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
u32 wbox_stat;
wbox_stat = ctx->csa.prob.mb_stat_R;
cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
cnt = spufs_wbox_info_cnt(ctx);
for (i = 0; i < cnt; i++) {
data[i] = ctx->csa.spu_mailbox_data[i];
}
@ -2075,7 +2092,8 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
int ret;
u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)];
int ret, count;
if (!access_ok(VERIFY_WRITE, buf, len))
return -EFAULT;
@ -2084,11 +2102,13 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
ret = __spufs_wbox_info_read(ctx, buf, len, pos);
count = spufs_wbox_info_cnt(ctx);
memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data));
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
return ret;
return simple_read_from_buffer(buf, len, pos, &data,
count * sizeof(u32));
}
static const struct file_operations spufs_wbox_info_fops = {
@ -2097,27 +2117,33 @@ static const struct file_operations spufs_wbox_info_fops = {
.llseek = generic_file_llseek,
};
static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
char __user *buf, size_t len, loff_t *pos)
static void spufs_get_dma_info(struct spu_context *ctx,
struct spu_dma_info *info)
{
struct spu_dma_info info;
struct mfc_cq_sr *qp, *spuqp;
int i;
info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
info->dma_info_status = ctx->csa.spu_chnldata_RW[24];
info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
for (i = 0; i < 16; i++) {
qp = &info.dma_info_command_data[i];
spuqp = &ctx->csa.priv2.spuq[i];
struct mfc_cq_sr *qp = &info->dma_info_command_data[i];
struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i];
qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
}
}
static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
char __user *buf, size_t len, loff_t *pos)
{
struct spu_dma_info info;
spufs_get_dma_info(ctx, &info);
return simple_read_from_buffer(buf, len, pos, &info,
sizeof info);
@ -2127,6 +2153,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
struct spu_dma_info info;
int ret;
if (!access_ok(VERIFY_WRITE, buf, len))
@ -2136,11 +2163,12 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
ret = __spufs_dma_info_read(ctx, buf, len, pos);
spufs_get_dma_info(ctx, &info);
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
return ret;
return simple_read_from_buffer(buf, len, pos, &info,
sizeof(info));
}
static const struct file_operations spufs_dma_info_fops = {
@ -2149,13 +2177,31 @@ static const struct file_operations spufs_dma_info_fops = {
.llseek = no_llseek,
};
static void spufs_get_proxydma_info(struct spu_context *ctx,
struct spu_proxydma_info *info)
{
int i;
info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
for (i = 0; i < 8; i++) {
struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i];
struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i];
qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
}
}
static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
char __user *buf, size_t len, loff_t *pos)
{
struct spu_proxydma_info info;
struct mfc_cq_sr *qp, *puqp;
int ret = sizeof info;
int i;
if (len < ret)
return -EINVAL;
@ -2163,18 +2209,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
if (!access_ok(VERIFY_WRITE, buf, len))
return -EFAULT;
info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
for (i = 0; i < 8; i++) {
qp = &info.proxydma_info_command_data[i];
puqp = &ctx->csa.priv2.puq[i];
qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
}
spufs_get_proxydma_info(ctx, &info);
return simple_read_from_buffer(buf, len, pos, &info,
sizeof info);
@ -2184,17 +2219,19 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
struct spu_proxydma_info info;
int ret;
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
spufs_get_proxydma_info(ctx, &info);
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
return ret;
return simple_read_from_buffer(buf, len, pos, &info,
sizeof(info));
}
static const struct file_operations spufs_proxydma_info_fops = {

View File

@ -171,7 +171,6 @@ static void pnv_smp_cpu_kill_self(void)
/* Standard hot unplug procedure */
idle_task_exit();
current->active_mm = NULL; /* for sanity */
cpu = smp_processor_id();
DBG("CPU%d offline\n", cpu);
generic_set_cpu_dead(cpu);

View File

@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/msi.h>
#include <linux/vmalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
@ -933,12 +934,16 @@ EXPORT_SYMBOL_GPL(is_xive_irq);
void xive_cleanup_irq_data(struct xive_irq_data *xd)
{
if (xd->eoi_mmio) {
unmap_kernel_range((unsigned long)xd->eoi_mmio,
1u << xd->esb_shift);
iounmap(xd->eoi_mmio);
if (xd->eoi_mmio == xd->trig_mmio)
xd->trig_mmio = NULL;
xd->eoi_mmio = NULL;
}
if (xd->trig_mmio) {
unmap_kernel_range((unsigned long)xd->trig_mmio,
1u << xd->esb_shift);
iounmap(xd->trig_mmio);
xd->trig_mmio = NULL;
}

View File

@ -16,8 +16,11 @@
* sum := addr + size; carry? --> flag = true;
* if (sum >= addr_limit) flag = true;
*/
#define __access_ok(addr, size) \
(__addr_ok((addr) + (size)))
#define __access_ok(addr, size) ({ \
unsigned long __ao_a = (addr), __ao_b = (size); \
unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \
__ao_end >= __ao_a && __addr_ok(__ao_end); })
#define access_ok(type, addr, size) \
(__chk_user_ptr(addr), \
__access_ok((unsigned long __force)(addr), (size)))

View File

@ -46,82 +46,79 @@ enum sparc_regset {
REGSET_FP,
};
static int regwindow32_get(struct task_struct *target,
const struct pt_regs *regs,
u32 *uregs)
{
unsigned long reg_window = regs->u_regs[UREG_I6];
int size = 16 * sizeof(u32);
if (target == current) {
if (copy_from_user(uregs, (void __user *)reg_window, size))
return -EFAULT;
} else {
if (access_process_vm(target, reg_window, uregs, size,
FOLL_FORCE) != size)
return -EFAULT;
}
return 0;
}
static int regwindow32_set(struct task_struct *target,
const struct pt_regs *regs,
u32 *uregs)
{
unsigned long reg_window = regs->u_regs[UREG_I6];
int size = 16 * sizeof(u32);
if (target == current) {
if (copy_to_user((void __user *)reg_window, uregs, size))
return -EFAULT;
} else {
if (access_process_vm(target, reg_window, uregs, size,
FOLL_FORCE | FOLL_WRITE) != size)
return -EFAULT;
}
return 0;
}
static int genregs32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_regs *regs = target->thread.kregs;
unsigned long __user *reg_window;
unsigned long *k = kbuf;
unsigned long __user *u = ubuf;
unsigned long reg;
u32 uregs[16];
int ret;
if (target == current)
flush_user_windows();
pos /= sizeof(reg);
count /= sizeof(reg);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs->u_regs,
0, 16 * sizeof(u32));
if (ret || !count)
return ret;
if (kbuf) {
for (; count > 0 && pos < 16; count--)
*k++ = regs->u_regs[pos++];
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (get_user(*k++, &reg_window[pos++]))
return -EFAULT;
}
} else {
for (; count > 0 && pos < 16; count--) {
if (put_user(regs->u_regs[pos++], u++))
return -EFAULT;
}
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, &reg_window[pos++]) ||
put_user(reg, u++))
return -EFAULT;
}
}
while (count > 0) {
switch (pos) {
case 32: /* PSR */
reg = regs->psr;
break;
case 33: /* PC */
reg = regs->pc;
break;
case 34: /* NPC */
reg = regs->npc;
break;
case 35: /* Y */
reg = regs->y;
break;
case 36: /* WIM */
case 37: /* TBR */
reg = 0;
break;
default:
goto finish;
}
if (kbuf)
*k++ = reg;
else if (put_user(reg, u++))
if (pos < 32 * sizeof(u32)) {
if (regwindow32_get(target, regs, uregs))
return -EFAULT;
pos++;
count--;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
uregs,
16 * sizeof(u32), 32 * sizeof(u32));
if (ret || !count)
return ret;
}
finish:
pos *= sizeof(reg);
count *= sizeof(reg);
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
38 * sizeof(reg), -1);
uregs[0] = regs->psr;
uregs[1] = regs->pc;
uregs[2] = regs->npc;
uregs[3] = regs->y;
uregs[4] = 0; /* WIM */
uregs[5] = 0; /* TBR */
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
uregs,
32 * sizeof(u32), 38 * sizeof(u32));
}
static int genregs32_set(struct task_struct *target,
@ -130,82 +127,53 @@ static int genregs32_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = target->thread.kregs;
unsigned long __user *reg_window;
const unsigned long *k = kbuf;
const unsigned long __user *u = ubuf;
unsigned long reg;
u32 uregs[16];
u32 psr;
int ret;
if (target == current)
flush_user_windows();
pos /= sizeof(reg);
count /= sizeof(reg);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->u_regs,
0, 16 * sizeof(u32));
if (ret || !count)
return ret;
if (kbuf) {
for (; count > 0 && pos < 16; count--)
regs->u_regs[pos++] = *k++;
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (put_user(*k++, &reg_window[pos++]))
return -EFAULT;
}
} else {
for (; count > 0 && pos < 16; count--) {
if (get_user(reg, u++))
return -EFAULT;
regs->u_regs[pos++] = reg;
}
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, u++) ||
put_user(reg, &reg_window[pos++]))
return -EFAULT;
}
}
while (count > 0) {
unsigned long psr;
if (kbuf)
reg = *k++;
else if (get_user(reg, u++))
if (pos < 32 * sizeof(u32)) {
if (regwindow32_get(target, regs, uregs))
return -EFAULT;
switch (pos) {
case 32: /* PSR */
psr = regs->psr;
psr &= ~(PSR_ICC | PSR_SYSCALL);
psr |= (reg & (PSR_ICC | PSR_SYSCALL));
regs->psr = psr;
break;
case 33: /* PC */
regs->pc = reg;
break;
case 34: /* NPC */
regs->npc = reg;
break;
case 35: /* Y */
regs->y = reg;
break;
case 36: /* WIM */
case 37: /* TBR */
break;
default:
goto finish;
}
pos++;
count--;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
uregs,
16 * sizeof(u32), 32 * sizeof(u32));
if (ret)
return ret;
if (regwindow32_set(target, regs, uregs))
return -EFAULT;
if (!count)
return 0;
}
finish:
pos *= sizeof(reg);
count *= sizeof(reg);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&psr,
32 * sizeof(u32), 33 * sizeof(u32));
if (ret)
return ret;
regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) |
(psr & (PSR_ICC | PSR_SYSCALL));
if (!count)
return 0;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&regs->pc,
33 * sizeof(u32), 34 * sizeof(u32));
if (ret || !count)
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&regs->y,
34 * sizeof(u32), 35 * sizeof(u32));
if (ret || !count)
return ret;
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
38 * sizeof(reg), -1);
35 * sizeof(u32), 38 * sizeof(u32));
}
static int fpregs32_get(struct task_struct *target,

View File

@ -571,19 +571,13 @@ static int genregs32_get(struct task_struct *target,
for (; count > 0 && pos < 32; count--) {
if (access_process_vm(target,
(unsigned long)
&reg_window[pos],
&reg_window[pos++],
&reg, sizeof(reg),
FOLL_FORCE)
!= sizeof(reg))
return -EFAULT;
if (access_process_vm(target,
(unsigned long) u,
&reg, sizeof(reg),
FOLL_FORCE | FOLL_WRITE)
!= sizeof(reg))
if (put_user(reg, u++))
return -EFAULT;
pos++;
u++;
}
}
}
@ -683,12 +677,7 @@ static int genregs32_set(struct task_struct *target,
}
} else {
for (; count > 0 && pos < 32; count--) {
if (access_process_vm(target,
(unsigned long)
u,
&reg, sizeof(reg),
FOLL_FORCE)
!= sizeof(reg))
if (get_user(reg, u++))
return -EFAULT;
if (access_process_vm(target,
(unsigned long)

View File

@ -49,16 +49,17 @@
* Position Independent Executable (PIE) so that linker won't optimize
* R_386_GOT32X relocation to its fixed symbol address. Older
* linkers generate R_386_32 relocations against locally defined symbols,
* _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less
* _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less
* optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle
* R_386_32 relocations when relocating the kernel. To generate
* R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as
* R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as
* hidden:
*/
.hidden _bss
.hidden _ebss
.hidden _got
.hidden _egot
.hidden _end
__HEAD
ENTRY(startup_32)

View File

@ -42,6 +42,7 @@
.hidden _ebss
.hidden _got
.hidden _egot
.hidden _end
__HEAD
.code32

View File

@ -291,6 +291,7 @@
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */

View File

@ -232,6 +232,7 @@ enum spectre_v2_mitigation {
enum spectre_v2_user_mitigation {
SPECTRE_V2_USER_NONE,
SPECTRE_V2_USER_STRICT,
SPECTRE_V2_USER_STRICT_PREFERRED,
SPECTRE_V2_USER_PRCTL,
SPECTRE_V2_USER_SECCOMP,
};

View File

@ -90,28 +90,35 @@ void set_kernel_text_rw(void);
void set_kernel_text_ro(void);
#ifdef CONFIG_X86_64
static inline int set_mce_nospec(unsigned long pfn)
/*
* Prevent speculative access to the page by either unmapping
* it (if we do not require access to any part of the page) or
* marking it uncacheable (if we want to try to retrieve data
* from non-poisoned lines in the page).
*/
static inline int set_mce_nospec(unsigned long pfn, bool unmap)
{
unsigned long decoy_addr;
int rc;
/*
* Mark the linear address as UC to make sure we don't log more
* errors because of speculative access to the page.
* We would like to just call:
* set_memory_uc((unsigned long)pfn_to_kaddr(pfn), 1);
* set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
* but doing that would radically increase the odds of a
* speculative access to the poison page because we'd have
* the virtual address of the kernel 1:1 mapping sitting
* around in registers.
* Instead we get tricky. We create a non-canonical address
* that looks just like the one we want, but has bit 63 flipped.
* This relies on set_memory_uc() properly sanitizing any __pa()
* This relies on set_memory_XX() properly sanitizing any __pa()
* results with __PHYSICAL_MASK or PTE_PFN_MASK.
*/
decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
rc = set_memory_uc(decoy_addr, 1);
if (unmap)
rc = set_memory_np(decoy_addr, 1);
else
rc = set_memory_uc(decoy_addr, 1);
if (rc)
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
return rc;

View File

@ -717,7 +717,7 @@ static __must_check inline bool user_access_begin(int type,
{
if (unlikely(!access_ok(type, ptr, len)))
return 0;
__uaccess_begin();
__uaccess_begin_nospec();
return 1;
}

View File

@ -11,14 +11,17 @@
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/pci_ids.h>
#include <asm/amd_nb.h>
#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
/* Protect the PCI config register pairs used for SMN and DF indirect access. */
static DEFINE_MUTEX(smn_mutex);
@ -28,9 +31,11 @@ static u32 *flush_words;
static const struct pci_device_id amd_root_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
{}
};
#define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
const struct pci_device_id amd_nb_misc_ids[] = {
@ -44,7 +49,10 @@ const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
{}
};
EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
@ -57,6 +65,9 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};

View File

@ -1122,8 +1122,7 @@ static const int amd_erratum_383[] =
/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
static const int amd_erratum_1054[] =
AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{

View File

@ -61,7 +61,7 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
u64 __ro_after_init x86_amd_ls_cfg_base;
u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
/* Control conditional STIPB in switch_to() */
/* Control conditional STIBP in switch_to() */
DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
/* Control conditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
@ -581,7 +581,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline);
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
SPECTRE_V2_NONE;
static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
SPECTRE_V2_USER_NONE;
static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
SPECTRE_V2_USER_NONE;
#ifdef CONFIG_RETPOLINE
@ -633,10 +635,11 @@ enum spectre_v2_user_cmd {
};
static const char * const spectre_v2_user_strings[] = {
[SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
[SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
[SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
[SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
[SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
[SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
[SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
[SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
[SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
};
static const struct {
@ -748,23 +751,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
static_key_enabled(&switch_mm_always_ibpb) ?
"always-on" : "conditional");
spectre_v2_user_ibpb = mode;
}
/* If enhanced IBRS is enabled no STIPB required */
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
/*
* If enhanced IBRS is enabled or SMT impossible, STIBP is not
* required.
*/
if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return;
/*
* If SMT is not possible or STIBP is not available clear the STIPB
* mode.
* At this point, an STIBP mode other than "off" has been set.
* If STIBP support is not being forced, check if STIBP always-on
* is preferred.
*/
if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
if (mode != SPECTRE_V2_USER_STRICT &&
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
/*
* If STIBP is not available, clear the STIBP mode.
*/
if (!boot_cpu_has(X86_FEATURE_STIBP))
mode = SPECTRE_V2_USER_NONE;
spectre_v2_user_stibp = mode;
set_mode:
spectre_v2_user = mode;
/* Only print the STIBP mode when SMT possible */
if (smt_possible)
pr_info("%s\n", spectre_v2_user_strings[mode]);
pr_info("%s\n", spectre_v2_user_strings[mode]);
}
static const char * const spectre_v2_strings[] = {
@ -995,10 +1011,11 @@ void arch_smt_update(void)
{
mutex_lock(&spec_ctrl_mutex);
switch (spectre_v2_user) {
switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
break;
case SPECTRE_V2_USER_STRICT:
case SPECTRE_V2_USER_STRICT_PREFERRED:
update_stibp_strict();
break;
case SPECTRE_V2_USER_PRCTL:
@ -1227,13 +1244,19 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
{
switch (ctrl) {
case PR_SPEC_ENABLE:
if (spectre_v2_user == SPECTRE_V2_USER_NONE)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return 0;
/*
* Indirect branch speculation is always disabled in strict
* mode.
* mode. It can neither be enabled if it was force-disabled
* by a previous prctl call.
*/
if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
task_spec_ib_force_disable(task))
return -EPERM;
task_clear_spec_ib_disable(task);
task_update_spec_tif(task);
@ -1244,9 +1267,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
* Indirect branch speculation is always allowed when
* mitigation is force disabled.
*/
if (spectre_v2_user == SPECTRE_V2_USER_NONE)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return -EPERM;
if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
return 0;
task_set_spec_ib_disable(task);
if (ctrl == PR_SPEC_FORCE_DISABLE)
@ -1277,7 +1303,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
{
if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
}
#endif
@ -1306,21 +1333,24 @@ static int ib_prctl_get(struct task_struct *task)
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
return PR_SPEC_NOT_AFFECTED;
switch (spectre_v2_user) {
case SPECTRE_V2_USER_NONE:
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return PR_SPEC_ENABLE;
case SPECTRE_V2_USER_PRCTL:
case SPECTRE_V2_USER_SECCOMP:
else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
return PR_SPEC_DISABLE;
else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
if (task_spec_ib_force_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
if (task_spec_ib_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
case SPECTRE_V2_USER_STRICT:
return PR_SPEC_DISABLE;
default:
} else
return PR_SPEC_NOT_AFFECTED;
}
}
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
@ -1559,11 +1589,13 @@ static char *stibp_state(void)
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return "";
switch (spectre_v2_user) {
switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
return ", STIBP: disabled";
case SPECTRE_V2_USER_STRICT:
return ", STIBP: forced";
case SPECTRE_V2_USER_STRICT_PREFERRED:
return ", STIBP: always-on";
case SPECTRE_V2_USER_PRCTL:
case SPECTRE_V2_USER_SECCOMP:
if (static_key_enabled(&switch_to_cond_stibp))

View File

@ -535,6 +535,13 @@ bool mce_is_memory_error(struct mce *m)
}
EXPORT_SYMBOL_GPL(mce_is_memory_error);
static bool whole_page(struct mce *m)
{
if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV))
return true;
return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT;
}
bool mce_is_correctable(struct mce *m)
{
if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
@ -600,7 +607,7 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
pfn = mce->addr >> PAGE_SHIFT;
if (!memory_failure(pfn, 0))
set_mce_nospec(pfn);
set_mce_nospec(pfn, whole_page(mce));
}
return NOTIFY_OK;
@ -1101,7 +1108,7 @@ static int do_memory_failure(struct mce *m)
if (ret)
pr_err("Memory error not recovered");
else
set_mce_nospec(m->addr >> PAGE_SHIFT);
set_mce_nospec(m->addr >> PAGE_SHIFT, whole_page(m));
return ret;
}

View File

@ -413,28 +413,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
lockdep_assert_irqs_disabled();
/*
* If TIF_SSBD is different, select the proper mitigation
* method. Note that if SSBD mitigation is disabled or permanentely
* enabled this branch can't be taken because nothing can set
* TIF_SSBD.
*/
if (tif_diff & _TIF_SSBD) {
if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
/* Handle change of TIF_SSBD depending on the mitigation method. */
if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
if (tif_diff & _TIF_SSBD)
amd_set_ssb_virt_state(tifn);
} else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
} else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
if (tif_diff & _TIF_SSBD)
amd_set_core_ssb_state(tifn);
} else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
static_cpu_has(X86_FEATURE_AMD_SSBD)) {
msr |= ssbd_tif_to_spec_ctrl(tifn);
updmsr = true;
}
} else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
static_cpu_has(X86_FEATURE_AMD_SSBD)) {
updmsr |= !!(tif_diff & _TIF_SSBD);
msr |= ssbd_tif_to_spec_ctrl(tifn);
}
/*
* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
* otherwise avoid the MSR write.
*/
/* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
if (IS_ENABLED(CONFIG_SMP) &&
static_branch_unlikely(&switch_to_cond_stibp)) {
updmsr |= !!(tif_diff & _TIF_SPEC_IB);

View File

@ -19,7 +19,7 @@ static inline void switch_to_extra(struct task_struct *prev,
if (IS_ENABLED(CONFIG_SMP)) {
/*
* Avoid __switch_to_xtra() invocation when conditional
* STIPB is disabled and the only different bit is
* STIBP is disabled and the only different bit is
* TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not
* in the TIF_WORK_CTXSW masks.
*/

View File

@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
},
},
{ /* Handle problems with rebooting on Apple MacBook6,1 */
.callback = set_pci_reboot,
.ident = "Apple MacBook6,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
},
},
{ /* Handle problems with rebooting on Apple MacBookPro5 */
.callback = set_pci_reboot,
.ident = "Apple MacBookPro5",

View File

@ -294,11 +294,18 @@ kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
{
BUG_ON((mmio_mask & mmio_value) != mmio_value);
WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len));
WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK;
shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
static bool is_mmio_spte(u64 spte)
{
return (spte & shadow_mmio_mask) == shadow_mmio_value;
}
static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
{
return sp->role.ad_disabled;
@ -306,7 +313,7 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
static inline bool spte_ad_enabled(u64 spte)
{
MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
MMU_WARN_ON(is_mmio_spte(spte));
return !(spte & shadow_acc_track_value);
}
@ -317,13 +324,13 @@ static bool is_nx_huge_page_enabled(void)
static inline u64 spte_shadow_accessed_mask(u64 spte)
{
MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
MMU_WARN_ON(is_mmio_spte(spte));
return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
}
static inline u64 spte_shadow_dirty_mask(u64 spte)
{
MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
MMU_WARN_ON(is_mmio_spte(spte));
return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
}
@ -393,11 +400,6 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
mmu_spte_set(sptep, mask);
}
static bool is_mmio_spte(u64 spte)
{
return (spte & shadow_mmio_mask) == shadow_mmio_value;
}
static gfn_t get_mmio_spte_gfn(u64 spte)
{
u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
@ -479,16 +481,23 @@ static void kvm_mmu_reset_all_pte_masks(void)
* If the CPU has 46 or less physical address bits, then set an
* appropriate mask to guard against L1TF attacks. Otherwise, it is
* assumed that the CPU is not vulnerable to L1TF.
*
* Some Intel CPUs address the L1 cache using more PA bits than are
* reported by CPUID. Use the PA width of the L1 cache when possible
* to achieve more effective mitigation, e.g. if system RAM overlaps
* the most significant bits of legal physical address space.
*/
shadow_nonpresent_or_rsvd_mask = 0;
low_phys_bits = boot_cpu_data.x86_phys_bits;
if (boot_cpu_data.x86_phys_bits <
52 - shadow_nonpresent_or_rsvd_mask_len) {
if (boot_cpu_has_bug(X86_BUG_L1TF) &&
!WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
52 - shadow_nonpresent_or_rsvd_mask_len)) {
low_phys_bits = boot_cpu_data.x86_cache_bits
- shadow_nonpresent_or_rsvd_mask_len;
shadow_nonpresent_or_rsvd_mask =
rsvd_bits(boot_cpu_data.x86_phys_bits -
shadow_nonpresent_or_rsvd_mask_len,
boot_cpu_data.x86_phys_bits - 1);
low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
}
shadow_nonpresent_or_rsvd_lower_gfn_mask =
GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
}

View File

@ -3229,8 +3229,8 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
return NESTED_EXIT_HOST;
break;
case SVM_EXIT_EXCP_BASE + PF_VECTOR:
/* When we're shadowing, trap PFs, but not async PF */
if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
/* Trap async PF even if not shadowing */
if (!npt_enabled || svm->vcpu.arch.apf.host_apf_reason)
return NESTED_EXIT_HOST;
break;
default:
@ -3319,7 +3319,7 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr
dst->iopm_base_pa = from->iopm_base_pa;
dst->msrpm_base_pa = from->msrpm_base_pa;
dst->tsc_offset = from->tsc_offset;
dst->asid = from->asid;
/* asid not copied, it is handled manually for svm->vmcb. */
dst->tlb_ctl = from->tlb_ctl;
dst->int_ctl = from->int_ctl;
dst->int_vector = from->int_vector;

View File

@ -9683,7 +9683,7 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
KVM_ISA_VMX);
switch (exit_reason) {
switch ((u16)exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
if (is_nmi(intr_info))
return false;

View File

@ -7525,9 +7525,8 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
}
int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
unsigned long start, unsigned long end,
bool blockable)
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
unsigned long start, unsigned long end)
{
unsigned long apic_address;
@ -7538,8 +7537,6 @@ int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
if (start <= apic_address && apic_address < end)
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
return 0;
}
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)

View File

@ -120,8 +120,6 @@ __ref void *alloc_low_pages(unsigned int num)
} else {
pfn = pgt_buf_end;
pgt_buf_end += num;
printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n",
pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1);
}
for (i = 0; i < num; i++) {

View File

@ -572,6 +572,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar);
/*
* Device [1022:7808]

View File

@ -869,6 +869,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
"acpi_cppc");
if (ret) {
per_cpu(cpc_desc_ptr, pr->id) = NULL;
kobject_put(&cpc_ptr->kobj);
goto out_free;
}

View File

@ -172,7 +172,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
* possibly drop references to the power resources in use.
*/
state = ACPI_STATE_D3_HOT;
/* If _PR3 is not available, use D3hot as the target state. */
/* If D3cold is not supported, use D3hot as the target state. */
if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid)
target_state = state;
} else if (!device->power.states[state].flags.valid) {

View File

@ -88,6 +88,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
struct resource r;
struct acpi_resource_irq *p = &ares->data.irq;
struct acpi_resource_extended_irq *pext = &ares->data.extended_irq;
char ev_name[5];
u8 trigger;
if (ares->type == ACPI_RESOURCE_TYPE_END_TAG)
return AE_OK;
@ -96,14 +98,28 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
dev_err(dev, "unable to parse IRQ resource\n");
return AE_ERROR;
}
if (ares->type == ACPI_RESOURCE_TYPE_IRQ)
if (ares->type == ACPI_RESOURCE_TYPE_IRQ) {
gsi = p->interrupts[0];
else
trigger = p->triggering;
} else {
gsi = pext->interrupts[0];
trigger = pext->triggering;
}
irq = r.start;
if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) {
switch (gsi) {
case 0 ... 255:
sprintf(ev_name, "_%c%02hhX",
trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi);
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
break;
/* fall through */
default:
if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle)))
break;
dev_err(dev, "cannot locate _EVT method\n");
return AE_ERROR;
}

View File

@ -921,12 +921,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
if (buffer.length && package
&& package->type == ACPI_TYPE_PACKAGE
&& package->package.count) {
int err = acpi_extract_power_resources(package, 0,
&ps->resources);
if (!err)
device->power.flags.power_resources = 1;
}
&& package->package.count)
acpi_extract_power_resources(package, 0, &ps->resources);
ACPI_FREE(buffer.pointer);
}
@ -973,14 +970,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
acpi_bus_init_power_state(device, i);
INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources);
if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
/* Set defaults for D0 and D3hot states (always valid) */
/* Set the defaults for D0 and D3hot (always supported). */
device->power.states[ACPI_STATE_D0].flags.valid = 1;
device->power.states[ACPI_STATE_D0].power = 100;
device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1;
/*
* Use power resources only if the D0 list of them is populated, because
* some platforms may provide _PR3 only to indicate D3cold support and
* in those cases the power resources list returned by it may be bogus.
*/
if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) {
device->power.flags.power_resources = 1;
/*
* D3cold is supported if the D3hot list of power resources is
* not empty.
*/
if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
}
if (acpi_bus_init_power(device))
device->flags.power_manageable = 0;
}

View File

@ -990,8 +990,10 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
error = kobject_init_and_add(&hotplug->kobj,
&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
if (error)
if (error) {
kobject_put(&hotplug->kobj);
goto err_out;
}
kobject_uevent(&hotplug->kobj, KOBJ_ADD);
return;

View File

@ -324,6 +324,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x4103, "BCM4330B1" }, /* 002.001.003 */
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
{ 0x4606, "BCM4324B5" }, /* 002.006.006 */
{ 0x6109, "BCM4335C0" }, /* 003.001.009 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
{ 0x2122, "BCM4343A0" }, /* 001.001.034 */
@ -334,6 +335,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
};
static const struct bcm_subver_table bcm_usb_subver_table[] = {
{ 0x2105, "BCM20703A1" }, /* 001.001.005 */
{ 0x210b, "BCM43142A0" }, /* 001.001.011 */
{ 0x2112, "BCM4314A0" }, /* 001.001.018 */
{ 0x2118, "BCM20702A0" }, /* 001.001.024 */

View File

@ -115,6 +115,7 @@ struct bcm_device {
u32 oper_speed;
int irq;
bool irq_active_low;
bool irq_acquired;
#ifdef CONFIG_PM
struct hci_uart *hu;
@ -288,6 +289,8 @@ static int bcm_request_irq(struct bcm_data *bcm)
goto unlock;
}
bdev->irq_acquired = true;
device_init_wakeup(bdev->dev, true);
pm_runtime_set_autosuspend_delay(bdev->dev,
@ -456,7 +459,7 @@ static int bcm_close(struct hci_uart *hu)
}
if (bdev) {
if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) {
if (IS_ENABLED(CONFIG_PM) && bdev->irq_acquired) {
devm_free_irq(bdev->dev, bdev->irq, bdev);
device_init_wakeup(bdev->dev, false);
pm_runtime_disable(bdev->dev);

View File

@ -846,6 +846,7 @@ void intel_gtt_insert_page(dma_addr_t addr,
unsigned int flags)
{
intel_private.driver->write_entry(addr, pg, flags);
readl(intel_private.gtt + pg);
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
@ -871,7 +872,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
j++;
}
}
wmb();
readl(intel_private.gtt + j - 1);
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
@ -1105,6 +1106,7 @@ static void i9xx_cleanup(void)
static void i9xx_chipset_flush(void)
{
wmb();
if (intel_private.i9xx_flush_page)
writel(1, intel_private.i9xx_flush_page);
}

View File

@ -18,11 +18,6 @@ module_param_named(trypci, si_trypci, bool, 0);
MODULE_PARM_DESC(trypci, "Setting this to zero will disable the"
" default scan of the interfaces identified via pci");
#define PCI_CLASS_SERIAL_IPMI 0x0c07
#define PCI_CLASS_SERIAL_IPMI_SMIC 0x0c0700
#define PCI_CLASS_SERIAL_IPMI_KCS 0x0c0701
#define PCI_CLASS_SERIAL_IPMI_BT 0x0c0702
#define PCI_DEVICE_ID_HP_MMC 0x121A
static void ipmi_pci_cleanup(struct si_sm_io *io)

View File

@ -126,7 +126,11 @@ static int clk_pm_runtime_get(struct clk_core *core)
return 0;
ret = pm_runtime_get_sync(core->dev);
return ret < 0 ? ret : 0;
if (ret < 0) {
pm_runtime_put_noidle(core->dev);
return ret;
}
return 0;
}
static void clk_pm_runtime_put(struct clk_core *core)

View File

@ -225,7 +225,8 @@ static int apbt_next_event(unsigned long delta,
/**
* dw_apb_clockevent_init() - use an APB timer as a clock_event_device
*
* @cpu: The CPU the events will be targeted at.
* @cpu: The CPU the events will be targeted at or -1 if CPU affiliation
* isn't required.
* @name: The name used for the timer and the IRQ for it.
* @rating: The rating to give the timer.
* @base: I/O base for the timer registers.
@ -260,7 +261,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
dw_ced->ced.max_delta_ticks = 0x7fffffff;
dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced);
dw_ced->ced.min_delta_ticks = 5000;
dw_ced->ced.cpumask = cpumask_of(cpu);
dw_ced->ced.cpumask = cpu < 0 ? cpu_possible_mask : cpumask_of(cpu);
dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
dw_ced->ced.set_state_shutdown = apbt_shutdown;

View File

@ -146,10 +146,6 @@ static int num_called;
static int __init dw_apb_timer_init(struct device_node *timer)
{
switch (num_called) {
case 0:
pr_debug("%s: found clockevent timer\n", __func__);
add_clockevent(timer);
break;
case 1:
pr_debug("%s: found clocksource timer\n", __func__);
add_clocksource(timer);
@ -160,6 +156,8 @@ static int __init dw_apb_timer_init(struct device_node *timer)
#endif
break;
default:
pr_debug("%s: found clockevent timer\n", __func__);
add_clockevent(timer);
break;
}

View File

@ -467,7 +467,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle,
&kdev->kobj, "state%d", i);
if (ret) {
kfree(kobj);
kobject_put(&kobj->kobj);
goto error_state;
}
cpuidle_add_s2idle_attr_group(kobj);
@ -598,7 +598,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle,
&kdev->kobj, "driver");
if (ret) {
kfree(kdrv);
kobject_put(&kdrv->kobj);
return ret;
}
@ -692,7 +692,7 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
"cpuidle");
if (error) {
kfree(kdev);
kobject_put(&kdev->kobj);
return error;
}

View File

@ -183,7 +183,7 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
struct nitrox_device *nitrox_get_first_device(void)
{
struct nitrox_device *ndev = NULL;
struct nitrox_device *ndev;
mutex_lock(&devlist_lock);
list_for_each_entry(ndev, &ndevlist, list) {
@ -191,7 +191,7 @@ struct nitrox_device *nitrox_get_first_device(void)
break;
}
mutex_unlock(&devlist_lock);
if (!ndev)
if (&ndev->list == &ndevlist)
return NULL;
refcount_inc(&ndev->refcnt);

View File

@ -9,10 +9,9 @@ config CRYPTO_DEV_CCP_DD
config CRYPTO_DEV_SP_CCP
bool "Cryptographic Coprocessor device"
default y
depends on CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_CCP_DD && DMADEVICES
select HW_RANDOM
select DMA_ENGINE
select DMADEVICES
select CRYPTO_SHA1
select CRYPTO_SHA256
help

View File

@ -2764,7 +2764,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
unsigned int ccm_xtra;
unsigned char tag_offset = 0, auth_offset = 0;
unsigned int tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)

View File

@ -28,8 +28,10 @@
/* Registers values */
#define CRC_CR_RESET BIT(0)
#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
#define CRC_INIT_DEFAULT 0xFFFFFFFF
#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5))
#define CRC_CR_REV_IN_BYTE BIT(5)
#define CRC_CR_REV_OUT BIT(7)
#define CRC32C_INIT_DEFAULT 0xFFFFFFFF
#define CRC_AUTOSUSPEND_DELAY 50
@ -38,8 +40,6 @@ struct stm32_crc {
struct device *dev;
void __iomem *regs;
struct clk *clk;
u8 pending_data[sizeof(u32)];
size_t nb_pending_bytes;
};
struct stm32_crc_list {
@ -59,14 +59,13 @@ struct stm32_crc_ctx {
struct stm32_crc_desc_ctx {
u32 partial; /* crc32c: partial in first 4 bytes of that struct */
struct stm32_crc *crc;
};
static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
{
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = CRC_INIT_DEFAULT;
mctx->key = 0;
mctx->poly = CRC32_POLY_LE;
return 0;
}
@ -75,7 +74,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
{
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = CRC_INIT_DEFAULT;
mctx->key = CRC32C_INIT_DEFAULT;
mctx->poly = CRC32C_POLY_LE;
return 0;
}
@ -94,32 +93,42 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
return 0;
}
static struct stm32_crc *stm32_crc_get_next_crc(void)
{
struct stm32_crc *crc;
spin_lock_bh(&crc_list.lock);
crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
if (crc)
list_move_tail(&crc->list, &crc_list.dev_list);
spin_unlock_bh(&crc_list.lock);
return crc;
}
static int stm32_crc_init(struct shash_desc *desc)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
spin_lock_bh(&crc_list.lock);
list_for_each_entry(crc, &crc_list.dev_list, list) {
ctx->crc = crc;
break;
}
spin_unlock_bh(&crc_list.lock);
crc = stm32_crc_get_next_crc();
if (!crc)
return -ENODEV;
pm_runtime_get_sync(ctx->crc->dev);
pm_runtime_get_sync(crc->dev);
/* Reset, set key, poly and configure in bit reverse mode */
writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
/* Store partial result */
ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
ctx->crc->nb_pending_bytes = 0;
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
pm_runtime_mark_last_busy(ctx->crc->dev);
pm_runtime_put_autosuspend(ctx->crc->dev);
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
return 0;
}
@ -128,31 +137,49 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
unsigned int length)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc *crc = ctx->crc;
u32 *d32;
unsigned int i;
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
crc = stm32_crc_get_next_crc();
if (!crc)
return -ENODEV;
pm_runtime_get_sync(crc->dev);
if (unlikely(crc->nb_pending_bytes)) {
while (crc->nb_pending_bytes != sizeof(u32) && length) {
/* Fill in pending data */
crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
/*
* Restore previously calculated CRC for this context as init value
* Restore polynomial configuration
* Configure in register for word input data,
* Configure out register in reversed bit mode data.
*/
writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
if (d8 != PTR_ALIGN(d8, sizeof(u32))) {
/* Configure for byte data */
writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) {
writeb_relaxed(*d8++, crc->regs + CRC_DR);
length--;
}
if (crc->nb_pending_bytes == sizeof(u32)) {
/* Process completed pending data */
writel_relaxed(*(u32 *)crc->pending_data,
crc->regs + CRC_DR);
crc->nb_pending_bytes = 0;
}
/* Configure for word data */
writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
}
d32 = (u32 *)d8;
for (i = 0; i < length >> 2; i++)
/* Process 32 bits data */
writel_relaxed(*(d32++), crc->regs + CRC_DR);
for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32))
writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR);
if (length) {
/* Configure for byte data */
writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
while (length--)
writeb_relaxed(*d8++, crc->regs + CRC_DR);
}
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
@ -160,22 +187,6 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
/* Check for pending data (non 32 bits) */
length &= 3;
if (likely(!length))
return 0;
if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
/* Shall not happen */
dev_err(crc->dev, "Pending data overflow\n");
return -EINVAL;
}
d8 = (const u8 *)d32;
for (i = 0; i < length; i++)
/* Store pending data */
crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
return 0;
}
@ -204,6 +215,8 @@ static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
}
static unsigned int refcnt;
static DEFINE_MUTEX(refcnt_lock);
static struct shash_alg algs[] = {
/* CRC-32 */
{
@ -296,12 +309,18 @@ static int stm32_crc_probe(struct platform_device *pdev)
list_add(&crc->list, &crc_list.dev_list);
spin_unlock(&crc_list.lock);
ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
if (ret) {
dev_err(dev, "Failed to register\n");
clk_disable_unprepare(crc->clk);
return ret;
mutex_lock(&refcnt_lock);
if (!refcnt) {
ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
if (ret) {
mutex_unlock(&refcnt_lock);
dev_err(dev, "Failed to register\n");
clk_disable_unprepare(crc->clk);
return ret;
}
}
refcnt++;
mutex_unlock(&refcnt_lock);
dev_info(dev, "Initialized\n");
@ -322,7 +341,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
list_del(&crc->list);
spin_unlock(&crc_list.lock);
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
mutex_lock(&refcnt_lock);
if (!--refcnt)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
mutex_unlock(&refcnt_lock);
pm_runtime_disable(crc->dev);
pm_runtime_put_noidle(crc->dev);

View File

@ -2670,7 +2670,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@ -2704,6 +2703,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablkcipher_aes_setkey,
}
},

View File

@ -367,13 +367,18 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
int err;
unsigned long flags;
struct scatterlist outhdr, iv_sg, status_sg, **sgs;
int i;
u64 dst_len;
unsigned int num_out = 0, num_in = 0;
int sg_total;
uint8_t *iv;
struct scatterlist *sg;
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
pr_err("Invalid number of src SG.\n");
return src_nents;
}
dst_nents = sg_nents(req->dst);
pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
@ -419,6 +424,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
goto free;
}
dst_len = min_t(unsigned int, req->nbytes, dst_len);
pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
req->nbytes, dst_len);
@ -459,12 +465,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
vc_sym_req->iv = iv;
/* Source data */
for (i = 0; i < src_nents; i++)
sgs[num_out++] = &req->src[i];
for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
sgs[num_out++] = sg;
/* Destination data */
for (i = 0; i < dst_nents; i++)
sgs[num_out + num_in++] = &req->dst[i];
for (sg = req->dst; sg; sg = sg_next(sg))
sgs[num_out + num_in++] = sg;
/* Status */
sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
@ -594,10 +600,11 @@ static void virtio_crypto_ablkcipher_finalize_req(
scatterwalk_map_and_copy(req->info, req->dst,
req->nbytes - AES_BLOCK_SIZE,
AES_BLOCK_SIZE, 0);
crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
req, err);
kzfree(vc_sym_req->iv);
virtcrypto_clear_request(&vc_sym_req->base);
crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
req, err);
}
static struct virtio_crypto_algo virtio_crypto_algs[] = { {

View File

@ -972,7 +972,6 @@ static void pch_dma_remove(struct pci_dev *pdev)
}
/* PCI Device ID of DMA device */
#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026

View File

@ -586,8 +586,10 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
NULL, "%s", short_name);
kfree(short_name);
if (ret)
if (ret) {
kobject_put(&new_var->kobj);
return ret;
}
kobject_uevent(&new_var->kobj, KOBJ_ADD);
if (efivar_entry_add(new_var, &efivar_sysfs_list)) {

View File

@ -28,6 +28,7 @@ KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
-D__NO_FORTIFY \
$(call cc-option,-ffreestanding) \
$(call cc-option,-fno-stack-protector) \
$(call cc-option,-fno-addrsig) \
-D__DISABLE_EXPORTS \
$(DISABLE_LTO)

View File

@ -292,14 +292,18 @@ static int sirf_probe(struct serdev_device *serdev)
data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff",
GPIOD_OUT_LOW);
if (IS_ERR(data->on_off))
if (IS_ERR(data->on_off)) {
ret = PTR_ERR(data->on_off);
goto err_put_device;
}
if (data->on_off) {
data->wakeup = devm_gpiod_get_optional(dev, "sirf,wakeup",
GPIOD_IN);
if (IS_ERR(data->wakeup))
if (IS_ERR(data->wakeup)) {
ret = PTR_ERR(data->wakeup);
goto err_put_device;
}
/*
* Configurations where WAKEUP has been left not connected,

View File

@ -31,8 +31,6 @@
#define IOH_IRQ_BASE 0
#define PCI_VENDOR_ID_ROHM 0x10DB
struct ioh_reg_comn {
u32 ien;
u32 istatus;

View File

@ -524,7 +524,6 @@ static int pch_gpio_resume(struct pci_dev *pdev)
#define pch_gpio_resume NULL
#endif
#define PCI_VENDOR_ID_ROHM 0x10DB
static const struct pci_device_id pch_gpio_pcidev_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },

View File

@ -20,13 +20,15 @@ static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
{
switch (fs) {
case 32000:
*n = 4096;
case 48000:
case 96000:
case 192000:
*n = fs * 128 / 1000;
break;
case 44100:
*n = 6272;
break;
case 48000:
*n = 6144;
case 88200:
case 176400:
*n = fs * 128 / 900;
break;
}

View File

@ -62,11 +62,6 @@ int vkms_output_init(struct vkms_device *vkmsdev);
struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev);
/* Gem stuff */
struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
struct drm_file *file,
u32 *handle,
u64 size);
int vkms_gem_fault(struct vm_fault *vmf);
int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,

View File

@ -93,10 +93,10 @@ int vkms_gem_fault(struct vm_fault *vmf)
return ret;
}
struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
struct drm_file *file,
u32 *handle,
u64 size)
static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
struct drm_file *file,
u32 *handle,
u64 size)
{
struct vkms_gem_object *obj;
int ret;
@ -109,7 +109,6 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->gem, handle);
drm_gem_object_put_unlocked(&obj->gem);
if (ret)
return ERR_PTR(ret);
@ -138,6 +137,8 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = gem_obj->size;
args->pitch = pitch;
drm_gem_object_put_unlocked(gem_obj);
DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
return 0;

View File

@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <asm/amd_nb.h>
#include <asm/processor.h>
@ -41,14 +42,6 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
#define PCI_DEVICE_ID_AMD_15H_M70H_NB_F3 0x15b3
#endif
#ifndef PCI_DEVICE_ID_AMD_17H_DF_F3
#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
#endif
#ifndef PCI_DEVICE_ID_AMD_17H_M10H_DF_F3
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
#endif
/* CPUID function 0x80000001, ebx */
#define CPUID_PKGTYPE_MASK 0xf0000000
#define CPUID_PKGTYPE_F 0x00000000

View File

@ -177,7 +177,6 @@ static wait_queue_head_t pch_event;
static DEFINE_MUTEX(pch_mutex);
/* Definition for ML7213 by LAPIS Semiconductor */
#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_I2C 0x802D
#define PCI_DEVICE_ID_ML7223_I2C 0x8010
#define PCI_DEVICE_ID_ML7831_I2C 0x8817

View File

@ -360,6 +360,8 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
spin_lock_irq(&ev_queue->lock);
if (!list_empty(&ev_queue->event_list))
pollflags = EPOLLIN | EPOLLRDNORM;
else if (ev_queue->is_closed)
pollflags = EPOLLERR;
spin_unlock_irq(&ev_queue->lock);
return pollflags;

View File

@ -173,6 +173,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN005b", /* P50 */
"LEN005e", /* T560 */
"LEN006c", /* T470s */
"LEN007a", /* T470s */
"LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */

View File

@ -91,15 +91,15 @@ static int __mms114_read_reg(struct mms114_data *data, unsigned int reg,
if (reg <= MMS114_MODE_CONTROL && reg + len > MMS114_MODE_CONTROL)
BUG();
/* Write register: use repeated start */
/* Write register */
xfer[0].addr = client->addr;
xfer[0].flags = I2C_M_TEN | I2C_M_NOSTART;
xfer[0].flags = client->flags & I2C_M_TEN;
xfer[0].len = 1;
xfer[0].buf = &buf;
/* Read data */
xfer[1].addr = client->addr;
xfer[1].flags = I2C_M_RD;
xfer[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD;
xfer[1].len = len;
xfer[1].buf = val;
@ -428,10 +428,8 @@ static int mms114_probe(struct i2c_client *client,
const void *match_data;
int error;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_PROTOCOL_MANGLING)) {
dev_err(&client->dev,
"Need i2c bus that supports protocol mangling\n");
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "Not supported I2C adapter\n");
return -ENODEV;
}

View File

@ -52,10 +52,7 @@ static const struct w6692map w6692_map[] =
{W6692_USR, "USR W6692"}
};
#ifndef PCI_VENDOR_ID_USR
#define PCI_VENDOR_ID_USR 0x16ec
#define PCI_DEVICE_ID_USR_6692 0x3409
#endif
struct w6692_ch {
struct bchannel bch;

View File

@ -133,14 +133,6 @@ static int create_cpu_loop(int cpu)
s32 tmax;
int fmin;
/* Get PID params from the appropriate SAT */
hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL);
if (hdr == NULL) {
printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n");
return -EINVAL;
}
piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
/* Get FVT params to get Tmax; if not found, assume default */
hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL);
if (hdr) {
@ -153,6 +145,16 @@ static int create_cpu_loop(int cpu)
if (tmax < cpu_all_tmax)
cpu_all_tmax = tmax;
kfree(hdr);
/* Get PID params from the appropriate SAT */
hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL);
if (hdr == NULL) {
printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n");
return -EINVAL;
}
piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
/*
* Darwin has a minimum fan speed of 1000 rpm for the 4-way and
* 515 for the 2-way. That appears to be overkill, so for now,
@ -175,6 +177,9 @@ static int create_cpu_loop(int cpu)
pid.min = fmin;
wf_cpu_pid_init(&cpu_pid[cpu], &pid);
kfree(hdr);
return 0;
}

View File

@ -775,7 +775,9 @@ static void bcache_device_free(struct bcache_device *d)
bcache_device_detach(d);
if (disk) {
if (disk->flags & GENHD_FL_UP)
bool disk_added = (disk->flags & GENHD_FL_UP) != 0;
if (disk_added)
del_gendisk(disk);
if (disk->queue)
@ -783,7 +785,8 @@ static void bcache_device_free(struct bcache_device *d)
ida_simple_remove(&bcache_device_idx,
first_minor_to_idx(disk->first_minor));
put_disk(disk);
if (disk_added)
put_disk(disk);
}
bioset_exit(&d->bio_split);

View File

@ -3087,7 +3087,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->max_segment_size = PAGE_SIZE;
limits->logical_block_size =
max_t(unsigned short, limits->logical_block_size, cc->sector_size);
max_t(unsigned, limits->logical_block_size, cc->sector_size);
limits->physical_block_size =
max_t(unsigned, limits->physical_block_size, cc->sector_size);
limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);

View File

@ -7438,7 +7438,8 @@ static int md_open(struct block_device *bdev, fmode_t mode)
*/
mddev_put(mddev);
/* Wait until bdev->bd_disk is definitely gone */
flush_workqueue(md_misc_wq);
if (work_pending(&mddev->del_work))
flush_workqueue(md_misc_wq);
/* Then retry the open from the top */
return -ERESTARTSYS;
}

View File

@ -1668,6 +1668,10 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
unsigned j;
log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
dprintk(1, "unknown logical address type\n");
return -EINVAL;
}
if (type_mask & (1 << log_addrs->log_addr_type[i])) {
dprintk(1, "duplicate logical address type\n");
return -EINVAL;
@ -1688,10 +1692,6 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
dprintk(1, "invalid primary device type\n");
return -EINVAL;
}
if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
dprintk(1, "unknown logical address type\n");
return -EINVAL;
}
for (j = 0; j < feature_sz; j++) {
if ((features[j] & 0x80) == 0) {
if (op_is_dev_features)

View File

@ -2829,8 +2829,8 @@ static int ov5640_probe(struct i2c_client *client,
free_ctrls:
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
entity_cleanup:
mutex_destroy(&sensor->lock);
media_entity_cleanup(&sensor->sd.entity);
mutex_destroy(&sensor->lock);
return ret;
}
@ -2840,9 +2840,9 @@ static int ov5640_remove(struct i2c_client *client)
struct ov5640_dev *sensor = to_ov5640_dev(sd);
v4l2_async_unregister_subdev(&sensor->sd);
mutex_destroy(&sensor->lock);
media_entity_cleanup(&sensor->sd.entity);
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
mutex_destroy(&sensor->lock);
return 0;
}

View File

@ -8,6 +8,7 @@
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@ -21,6 +22,7 @@
struct rcar_fcp_device {
struct list_head list;
struct device *dev;
struct device_dma_parameters dma_parms;
};
static LIST_HEAD(fcp_devices);
@ -136,6 +138,9 @@ static int rcar_fcp_probe(struct platform_device *pdev)
fcp->dev = &pdev->dev;
fcp->dev->dma_parms = &fcp->dma_parms;
dma_set_max_seg_size(fcp->dev, DMA_BIT_MASK(32));
pm_runtime_enable(&pdev->dev);
mutex_lock(&fcp_lock);

View File

@ -84,24 +84,23 @@ static int si2157_init(struct dvb_frontend *fe)
struct si2157_cmd cmd;
const struct firmware *fw;
const char *fw_name;
unsigned int uitmp, chip_id;
unsigned int chip_id, xtal_trim;
dev_dbg(&client->dev, "\n");
/* Returned IF frequency is garbage when firmware is not running */
memcpy(cmd.args, "\x15\x00\x06\x07", 4);
/* Try to get Xtal trim property, to verify tuner still running */
memcpy(cmd.args, "\x15\x00\x04\x02", 4);
cmd.wlen = 4;
cmd.rlen = 4;
ret = si2157_cmd_execute(client, &cmd);
if (ret)
goto err;
uitmp = cmd.args[2] << 0 | cmd.args[3] << 8;
dev_dbg(&client->dev, "if_frequency kHz=%u\n", uitmp);
xtal_trim = cmd.args[2] | (cmd.args[3] << 8);
if (uitmp == dev->if_frequency / 1000)
if (ret == 0 && xtal_trim < 16)
goto warm;
dev->if_frequency = 0; /* we no longer know current tuner state */
/* power up */
if (dev->chiptype == SI2157_CHIPTYPE_SI2146) {
memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9);

View File

@ -84,7 +84,7 @@ static int dibusb_tuner_probe_and_attach(struct dvb_usb_adapter *adap)
if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) {
err("tuner i2c write failed.");
ret = -EREMOTEIO;
return -EREMOTEIO;
}
if (adap->fe_adap[0].fe->ops.i2c_gate_ctrl)

Some files were not shown because too many files have changed in this diff Show More