Merge remote-tracking branch 'aosp/android-4.19-stable' into android12-base

* aosp/android-4.19-stable:
  ANDROID: revert some RNG function signature changes
  ANDROID: cpu/hotplug: avoid breaking Android ABI by fusing cpuhp steps
  UPSTREAM: lib/crypto: blake2s: avoid indirect calls to compression function for Clang CFI
  BACKPORT: lib/crypto: add prompts back to crypto libraries
  BACKPORT: lib/crypto: blake2s: include as built-in
  Linux 4.19.251
  net: usb: qmi_wwan: add Telit 0x1070 composition
  net: usb: qmi_wwan: add Telit 0x1060 composition
  xen/arm: Fix race in RB-tree based P2M accounting
  xen/blkfront: force data bouncing when backend is untrusted
  xen/netfront: force data bouncing when backend is untrusted
  xen/netfront: fix leaking data in shared pages
  xen/blkfront: fix leaking data in shared pages
  ipv6/sit: fix ipip6_tunnel_get_prl return value
  sit: use min
  net: dsa: bcm_sf2: force pause link settings
  hwmon: (ibmaem) don't call platform_device_del() if platform_device_add() fails
  xen/gntdev: Avoid blocking in unmap_grant_pages()
  net: tun: avoid disabling NAPI twice
  NFC: nxp-nci: Don't issue a zero length i2c_master_read()
  nfc: nfcmrvl: Fix irq_of_parse_and_map() return value
  net: bonding: fix use-after-free after 802.3ad slave unbind
  net: bonding: fix possible NULL deref in rlb code
  netfilter: nft_dynset: restore set element counter when failing to update
  caif_virtio: fix race between virtio_device_ready() and ndo_open()
  net: ipv6: unexport __init-annotated seg6_hmac_net_init()
  usbnet: fix memory allocation in helpers
  RDMA/qedr: Fix reporting QP timeout attribute
  net: tun: stop NAPI when detaching queues
  net: tun: unlink NAPI from device on destruction
  selftests/net: pass ipv6_args to udpgso_bench's IPv6 TCP test
  virtio-net: fix race between ndo_open() and virtio_device_ready()
  net: usb: ax88179_178a: Fix packet receiving
  net: rose: fix UAF bugs caused by timer handler
  SUNRPC: Fix READ_PLUS crasher
  s390/archrandom: simplify back to earlier design and initialize earlier
  dm raid: fix KASAN warning in raid5_add_disks
  dm raid: fix accesses beyond end of raid member array
  nvdimm: Fix badblocks clear off-by-one error
  UPSTREAM: crypto: poly1305 - fix poly1305_core_setkey() declaration
  UPSTREAM: mm: fix misplaced unlock_page in do_wp_page()
  BACKPORT: mm: do_wp_page() simplification
  UPSTREAM: mm/ksm: Remove reuse_ksm_page()
  UPSTREAM: mm: reuse only-pte-mapped KSM page in do_wp_page()
  Linux 4.19.250
  swiotlb: skip swiotlb_bounce when orig_addr is zero
  net/sched: move NULL ptr check to qdisc_put() too
  net: mscc: ocelot: allow unregistered IP multicast flooding
  kexec_file: drop weak attribute from arch_kexec_apply_relocations[_add]
  fdt: Update CRC check for rng-seed
  xen: unexport __init-annotated xen_xlate_map_ballooned_pages()
  drm: remove drm_fb_helper_modinit
  powerpc/pseries: wire up rng during setup_arch()
  kbuild: link vmlinux only once for CONFIG_TRIM_UNUSED_KSYMS (2nd attempt)
  modpost: fix section mismatch check for exported init/exit sections
  ARM: cns3xxx: Fix refcount leak in cns3xxx_init
  ARM: Fix refcount leak in axxia_boot_secondary
  soc: bcm: brcmstb: pm: pm-arm: Fix refcount leak in brcmstb_pm_probe
  ARM: exynos: Fix refcount leak in exynos_map_pmu
  ARM: dts: imx6qdl: correct PU regulator ramp delay
  powerpc/powernv: wire up rng during setup_arch
  powerpc/rtas: Allow ibm,platform-dump RTAS call with null buffer address
  powerpc: Enable execve syscall exit tracepoint
  xtensa: Fix refcount leak bug in time.c
  xtensa: xtfpga: Fix refcount leak bug in setup
  iio: adc: axp288: Override TS pin bias current for some models
  iio: trigger: sysfs: fix use-after-free on remove
  iio: gyro: mpu3050: Fix the error handling in mpu3050_power_up()
  iio: accel: mma8452: ignore the return value of reset operation
  iio:accel:bma180: rearrange iio trigger get and register
  iio:chemical:ccs811: rearrange iio trigger get and register
  usb: chipidea: udc: check request status before setting device address
  xhci: turn off port power in shutdown
  iio: adc: vf610: fix conversion mode sysfs node name
  gpio: winbond: Fix error code in winbond_gpio_get()
  virtio_net: fix xdp_rxq_info bug after suspend/resume
  igb: Make DMA faster when CPU is active on the PCIe link
  afs: Fix dynamic root getattr
  MIPS: Remove repetitive increase irq_err_count
  x86/xen: Remove undefined behavior in setup_features()
  erspan: do not assume transport header is always set
  net/sched: sch_netem: Fix arithmetic in netem_dump() for 32-bit platforms
  bonding: ARP monitor spams NETDEV_NOTIFY_PEERS notifiers
  USB: serial: option: add Quectel RM500K module support
  USB: serial: option: add Quectel EM05-G modem
  USB: serial: option: add Telit LE910Cx 0x1250 composition
  random: quiet urandom warning ratelimit suppression message
  dm era: commit metadata in postsuspend after worker stops
  ata: libata: add qc->flags in ata_qc_complete_template tracepoint
  ALSA: hda/realtek: Add quirk for Clevo PD70PNT
  ALSA: hda/conexant: Fix missing beep setup
  ALSA: hda/via: Fix missing beep setup
  random: schedule mix_interrupt_randomness() less often
  vt: drop old FONT ioctls
  Linux 4.19.249
  Revert "hwmon: Make chip parameter for with_info API mandatory"
  tcp: drop the hash_32() part from the index calculation
  tcp: increase source port perturb table to 2^16
  tcp: dynamically allocate the perturb table used by source ports
  tcp: add small random increments to the source port
  tcp: use different parts of the port_offset for index and offset
  tcp: add some entropy in __inet_hash_connect()
  xprtrdma: fix incorrect header size calculations
  usb: gadget: u_ether: fix regression in setting fixed MAC address
  s390/mm: use non-quiescing sske for KVM switch to keyed guest
  powerpc/mm: Switch obsolete dssall to .long
  RISC-V: fix barrier() use in <vdso/processor.h>
  net: openvswitch: fix leak of nested actions
  net: openvswitch: fix misuse of the cached connection on tuple changes
  virtio-pci: Remove wrong address verification in vp_del_vqs()
  ext4: add reserved GDT blocks check
  ext4: make variable "count" signed
  ext4: fix bug_on ext4_mb_use_inode_pa
  serial: 8250: Store to lsr_save_flags after lsr read
  usb: gadget: lpc32xx_udc: Fix refcount leak in lpc32xx_udc_probe
  usb: dwc2: Fix memory leak in dwc2_hcd_init
  USB: serial: io_ti: add Agilent E5805A support
  USB: serial: option: add support for Cinterion MV31 with new baseline
  comedi: vmk80xx: fix expression for tx buffer size
  irqchip/gic-v3: Fix refcount leak in gic_populate_ppi_partitions
  irqchip/gic/realview: Fix refcount leak in realview_gic_of_init
  faddr2line: Fix overlapping text section failures, the sequel
  certs/blacklist_hashes.c: fix const confusion in certs blacklist
  arm64: ftrace: fix branch range checks
  net: bgmac: Fix an erroneous kfree() in bgmac_remove()
  mlxsw: spectrum_cnt: Reorder counter pools
  misc: atmel-ssc: Fix IRQ check in ssc_probe
  tty: goldfish: Fix free_irq() on remove
  i40e: Fix call trace in setup_tx_descriptors
  i40e: Fix adding ADQ filter to TC0
  pNFS: Don't keep retrying if the server replied NFS4ERR_LAYOUTUNAVAILABLE
  random: credit cpu and bootloader seeds by default
  net: ethernet: mtk_eth_soc: fix misuse of mem alloc interface netdev[napi]_alloc_frag
  ipv6: Fix signed integer overflow in l2tp_ip6_sendmsg
  nfc: nfcmrvl: Fix memory leak in nfcmrvl_play_deferred
  virtio-mmio: fix missing put_device() when vm_cmdline_parent registration failed
  scsi: pmcraid: Fix missing resource cleanup in error case
  scsi: ipr: Fix missing/incorrect resource cleanup in error case
  scsi: lpfc: Fix port stuck in bypassed state after LIP in PT2PT topology
  scsi: vmw_pvscsi: Expand vcpuHint to 16 bits
  ASoC: wm_adsp: Fix event generation for wm_adsp_fw_put()
  ASoC: es8328: Fix event generation for deemphasis control
  ASoC: wm8962: Fix suspend while playing music
  ata: libata-core: fix NULL pointer deref in ata_host_alloc_pinfo()
  ASoC: cs42l56: Correct typo in minimum level for SX volume controls
  ASoC: cs42l52: Correct TLV for Bypass Volume
  ASoC: cs53l30: Correct number of volume levels on SX controls
  ASoC: cs42l52: Fix TLV scales for mixer controls
  powerpc/kasan: Silence KASAN warnings in __get_wchan()
  random: account for arch randomness in bits
  random: mark bootloader randomness code as __init
  random: avoid checking crng_ready() twice in random_init()
  crypto: drbg - make reseeding from get_random_bytes() synchronous
  crypto: drbg - always try to free Jitter RNG instance
  crypto: drbg - move dynamic ->reseed_threshold adjustments to __drbg_seed()
  crypto: drbg - track whether DRBG was seeded with !rng_is_initialized()
  crypto: drbg - prepare for more fine-grained tracking of seeding state
  crypto: drbg - always seeded with SP800-90B compliant noise source
  crypto: drbg - add FIPS 140-2 CTRNG for noise source
  Revert "random: use static branch for crng_ready()"
  random: check for signals after page of pool writes
  random: wire up fops->splice_{read,write}_iter()
  random: convert to using fops->write_iter()
  random: move randomize_page() into mm where it belongs
  random: move initialization functions out of hot pages
  random: use proper return types on get_random_{int,long}_wait()
  random: remove extern from functions in header
  random: use static branch for crng_ready()
  random: credit architectural init the exact amount
  random: handle latent entropy and command line from random_init()
  random: use proper jiffies comparison macro
  random: remove ratelimiting for in-kernel unseeded randomness
  random: avoid initializing twice in credit race
  random: use symbolic constants for crng_init states
  siphash: use one source of truth for siphash permutations
  random: help compiler out with fast_mix() by using simpler arguments
  random: do not use input pool from hard IRQs
  random: order timer entropy functions below interrupt functions
  random: do not pretend to handle premature next security model
  random: do not use batches when !crng_ready()
  random: insist on random_get_entropy() existing in order to simplify
  xtensa: use fallback for random_get_entropy() instead of zero
  sparc: use fallback for random_get_entropy() instead of zero
  um: use fallback for random_get_entropy() instead of zero
  x86/tsc: Use fallback for random_get_entropy() instead of zero
  nios2: use fallback for random_get_entropy() instead of zero
  arm: use fallback for random_get_entropy() instead of zero
  mips: use fallback for random_get_entropy() instead of just c0 random
  m68k: use fallback for random_get_entropy() instead of zero
  timekeeping: Add raw clock fallback for random_get_entropy()
  powerpc: define get_cycles macro for arch-override
  alpha: define get_cycles macro for arch-override
  parisc: define get_cycles macro for arch-override
  s390: define get_cycles macro for arch-override
  ia64: define get_cycles macro for arch-override
  init: call time_init() before rand_initialize()
  random: fix sysctl documentation nits
  random: document crng_fast_key_erasure() destination possibility
  random: make random_get_entropy() return an unsigned long
  random: check for signals every PAGE_SIZE chunk of /dev/[u]random
  random: check for signal_pending() outside of need_resched() check
  random: do not allow user to keep crng key around on stack
  random: do not split fast init input in add_hwgenerator_randomness()
  random: mix build-time latent entropy into pool at init
  random: re-add removed comment about get_random_{u32,u64} reseeding
  random: treat bootloader trust toggle the same way as cpu trust toggle
  random: skip fast_init if hwrng provides large chunk of entropy
  random: check for signal and try earlier when generating entropy
  random: reseed more often immediately after booting
  random: make consistent usage of crng_ready()
  random: use SipHash as interrupt entropy accumulator
  random: replace custom notifier chain with standard one
  random: don't let 644 read-only sysctls be written to
  random: give sysctl_random_min_urandom_seed a more sensible value
  random: do crng pre-init loading in worker rather than irq
  random: unify cycles_t and jiffies usage and types
  random: cleanup UUID handling
  random: only wake up writers after zap if threshold was passed
  random: round-robin registers as ulong, not u32
  random: clear fast pool, crng, and batches in cpuhp bring up
  random: pull add_hwgenerator_randomness() declaration into random.h
  random: check for crng_init == 0 in add_device_randomness()
  random: unify early init crng load accounting
  random: do not take pool spinlock at boot
  random: defer fast pool mixing to worker
  random: rewrite header introductory comment
  random: group sysctl functions
  random: group userspace read/write functions
  random: group entropy collection functions
  random: group entropy extraction functions
  random: group initialization wait functions
  random: remove whitespace and reorder includes
  random: remove useless header comment
  random: introduce drain_entropy() helper to declutter crng_reseed()
  random: deobfuscate irq u32/u64 contributions
  random: add proper SPDX header
  random: remove unused tracepoints
  random: remove ifdef'd out interrupt bench
  random: tie batched entropy generation to base_crng generation
  random: zero buffer after reading entropy from userspace
  random: remove outdated INT_MAX >> 6 check in urandom_read()
  random: use hash function for crng_slow_load()
  random: absorb fast pool into input pool after fast load
  random: do not xor RDRAND when writing into /dev/random
  random: ensure early RDSEED goes through mixer on init
  random: inline leaves of rand_initialize()
  random: use RDSEED instead of RDRAND in entropy extraction
  random: fix locking in crng_fast_load()
  random: remove batched entropy locking
  random: remove use_input_pool parameter from crng_reseed()
  random: make credit_entropy_bits() always safe
  random: always wake up entropy writers after extraction
  random: use linear min-entropy accumulation crediting
  random: simplify entropy debiting
  random: use computational hash for entropy extraction
  random: only call crng_finalize_init() for primary_crng
  random: access primary_pool directly rather than through pointer
  random: continually use hwgenerator randomness
  random: simplify arithmetic function flow in account()
  random: access input_pool_data directly rather than through pointer
  random: cleanup fractional entropy shift constants
  random: prepend remaining pool constants with POOL_
  random: de-duplicate INPUT_POOL constants
  random: remove unused OUTPUT_POOL constants
  random: rather than entropy_store abstraction, use global
  random: remove unused extract_entropy() reserved argument
  random: remove incomplete last_data logic
  random: cleanup integer types
  random: cleanup poolinfo abstraction
  random: fix typo in comments
  random: don't reset crng_init_cnt on urandom_read()
  random: avoid superfluous call to RDRAND in CRNG extraction
  random: early initialization of ChaCha constants
  random: initialize ChaCha20 constants with correct endianness
  random: use IS_ENABLED(CONFIG_NUMA) instead of ifdefs
  random: harmonize "crng init done" messages
  random: mix bootloader randomness into pool
  random: do not re-init if crng_reseed completes before primary init
  random: do not sign extend bytes for rotation when mixing
  random: use BLAKE2s instead of SHA1 in extraction
  random: remove unused irq_flags argument from add_interrupt_randomness()
  random: document add_hwgenerator_randomness() with other input functions
  crypto: blake2s - adjust include guard naming
  crypto: blake2s - include <linux/bug.h> instead of <asm/bug.h>
  MAINTAINERS: co-maintain random.c
  random: remove dead code left over from blocking pool
  random: avoid arch_get_random_seed_long() when collecting IRQ randomness
  random: add arch_get_random_*long_early()
  powerpc: Use bool in archrandom.h
  linux/random.h: Mark CONFIG_ARCH_RANDOM functions __must_check
  linux/random.h: Use false with bool
  linux/random.h: Remove arch_has_random, arch_has_random_seed
  s390: Remove arch_has_random, arch_has_random_seed
  powerpc: Remove arch_has_random, arch_has_random_seed
  x86: Remove arch_has_random, arch_has_random_seed
  random: avoid warnings for !CONFIG_NUMA builds
  random: split primary/secondary crng init paths
  random: remove some dead code of poolinfo
  random: fix typo in add_timer_randomness()
  random: Add and use pr_fmt()
  random: convert to ENTROPY_BITS for better code readability
  random: remove unnecessary unlikely()
  random: remove kernel.random.read_wakeup_threshold
  random: delete code to pull data into pools
  random: remove the blocking pool
  random: fix crash on multiple early calls to add_bootloader_randomness()
  char/random: silence a lockdep splat with printk()
  random: make /dev/random be almost like /dev/urandom
  random: ignore GRND_RANDOM in getentropy(2)
  random: add GRND_INSECURE to return best-effort non-cryptographic bytes
  random: Add a urandom_read_nowait() for random APIs that don't warn
  random: Don't wake crng_init_wait when crng_init == 1
  lib/crypto: sha1: re-roll loops to reduce code size
  lib/crypto: blake2s: move hmac construction into wireguard
  crypto: blake2s - generic C library implementation and selftest
  Revert "hwrng: core - Freeze khwrng thread during suspend"
  char/random: Add a newline at the end of the file
  random: Use wait_event_freezable() in add_hwgenerator_randomness()
  fdt: add support for rng-seed
  random: Support freezable kthreads in add_hwgenerator_randomness()
  random: fix soft lockup when trying to read from an uninitialized blocking pool
  latent_entropy: avoid build error when plugin cflags are not set
  random: document get_random_int() family
  random: move rand_initialize() earlier
  random: only read from /dev/random after its pool has received 128 bits
  drivers/char/random.c: make primary_crng static
  drivers/char/random.c: remove unused stuct poolinfo::poolbits
  drivers/char/random.c: constify poolinfo_table
  9p: missing chunk of "fs/9p: Don't update file type when updating file attributes"
  Linux 4.19.248
  x86/speculation/mmio: Print SMT warning
  KVM: x86/speculation: Disable Fill buffer clear within guests
  x86/speculation/mmio: Reuse SRBDS mitigation for SBDS
  x86/speculation/srbds: Update SRBDS mitigation selection
  x86/speculation/mmio: Add sysfs reporting for Processor MMIO Stale Data
  x86/speculation/mmio: Enable CPU Fill buffer clearing on idle
  x86/bugs: Group MDS, TAA & Processor MMIO Stale Data mitigations
  x86/speculation/mmio: Add mitigation for Processor MMIO Stale Data
  x86/speculation: Add a common function for MD_CLEAR mitigation update
  x86/speculation/mmio: Enumerate Processor MMIO Stale Data bug
  Documentation: Add documentation for Processor MMIO Stale Data
  x86/cpu: Add another Alder Lake CPU to the Intel family
  x86/cpu: Add Lakefield, Alder Lake and Rocket Lake models to the to Intel CPU family
  x86/cpu: Add Jasper Lake to Intel family
  cpu/speculation: Add prototype for cpu_show_srbds()
  x86/cpu: Add Elkhart Lake to Intel family

Change-Id: I95c9388d6966feac7ba29518774efd9ca56edd3e
Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>

Conflicts:
	drivers/char/Kconfig
	mm/memory.c
This commit is contained in:
UtsavBalar1231 2022-07-09 10:34:17 +05:30
commit 9df7ef7315
228 changed files with 3639 additions and 3739 deletions

View File

@ -1,4 +1,4 @@
What: /sys/bus/iio/devices/iio:deviceX/conversion_mode
What: /sys/bus/iio/devices/iio:deviceX/in_conversion_mode
KernelVersion: 4.2
Contact: linux-iio@vger.kernel.org
Description:

View File

@ -481,6 +481,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/srbds
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Information about CPU vulnerabilities

View File

@ -15,3 +15,4 @@ are configurable at compile, boot or run time.
tsx_async_abort
multihit.rst
special-register-buffer-data-sampling.rst
processor_mmio_stale_data.rst

View File

@ -0,0 +1,246 @@
=========================================
Processor MMIO Stale Data Vulnerabilities
=========================================
Processor MMIO Stale Data Vulnerabilities are a class of memory-mapped I/O
(MMIO) vulnerabilities that can expose data. The sequences of operations for
exposing data range from simple to very complex. Because most of the
vulnerabilities require the attacker to have access to MMIO, many environments
are not affected. System environments using virtualization where MMIO access is
provided to untrusted guests may need mitigation. These vulnerabilities are
not transient execution attacks. However, these vulnerabilities may propagate
stale data into core fill buffers where the data can subsequently be inferred
by an unmitigated transient execution attack. Mitigation for these
vulnerabilities includes a combination of microcode update and software
changes, depending on the platform and usage model. Some of these mitigations
are similar to those used to mitigate Microarchitectural Data Sampling (MDS) or
those used to mitigate Special Register Buffer Data Sampling (SRBDS).
Data Propagators
================
Propagators are operations that result in stale data being copied or moved from
one microarchitectural buffer or register to another. Processor MMIO Stale Data
Vulnerabilities are operations that may result in stale data being directly
read into an architectural, software-visible state or sampled from a buffer or
register.
Fill Buffer Stale Data Propagator (FBSDP)
-----------------------------------------
Stale data may propagate from fill buffers (FB) into the non-coherent portion
of the uncore on some non-coherent writes. Fill buffer propagation by itself
does not make stale data architecturally visible. Stale data must be propagated
to a location where it is subject to reading or sampling.
Sideband Stale Data Propagator (SSDP)
-------------------------------------
The sideband stale data propagator (SSDP) is limited to the client (including
Intel Xeon server E3) uncore implementation. The sideband response buffer is
shared by all client cores. For non-coherent reads that go to sideband
destinations, the uncore logic returns 64 bytes of data to the core, including
both requested data and unrequested stale data, from a transaction buffer and
the sideband response buffer. As a result, stale data from the sideband
response and transaction buffers may now reside in a core fill buffer.
Primary Stale Data Propagator (PSDP)
------------------------------------
The primary stale data propagator (PSDP) is limited to the client (including
Intel Xeon server E3) uncore implementation. Similar to the sideband response
buffer, the primary response buffer is shared by all client cores. For some
processors, MMIO primary reads will return 64 bytes of data to the core fill
buffer including both requested data and unrequested stale data. This is
similar to the sideband stale data propagator.
Vulnerabilities
===============
Device Register Partial Write (DRPW) (CVE-2022-21166)
-----------------------------------------------------
Some endpoint MMIO registers incorrectly handle writes that are smaller than
the register size. Instead of aborting the write or only copying the correct
subset of bytes (for example, 2 bytes for a 2-byte write), more bytes than
specified by the write transaction may be written to the register. On
processors affected by FBSDP, this may expose stale data from the fill buffers
of the core that created the write transaction.
Shared Buffers Data Sampling (SBDS) (CVE-2022-21125)
----------------------------------------------------
After propagators may have moved data around the uncore and copied stale data
into client core fill buffers, processors affected by MFBDS can leak data from
the fill buffer. It is limited to the client (including Intel Xeon server E3)
uncore implementation.
Shared Buffers Data Read (SBDR) (CVE-2022-21123)
------------------------------------------------
It is similar to Shared Buffer Data Sampling (SBDS) except that the data is
directly read into the architectural software-visible state. It is limited to
the client (including Intel Xeon server E3) uncore implementation.
Affected Processors
===================
Not all the CPUs are affected by all the variants. For instance, most
processors for the server market (excluding Intel Xeon E3 processors) are
impacted by only Device Register Partial Write (DRPW).
Below is the list of affected Intel processors [#f1]_:
=================== ============ =========
Common name Family_Model Steppings
=================== ============ =========
HASWELL_X 06_3FH 2,4
SKYLAKE_L 06_4EH 3
BROADWELL_X 06_4FH All
SKYLAKE_X 06_55H 3,4,6,7,11
BROADWELL_D 06_56H 3,4,5
SKYLAKE 06_5EH 3
ICELAKE_X 06_6AH 4,5,6
ICELAKE_D 06_6CH 1
ICELAKE_L 06_7EH 5
ATOM_TREMONT_D 06_86H All
LAKEFIELD 06_8AH 1
KABYLAKE_L 06_8EH 9 to 12
ATOM_TREMONT 06_96H 1
ATOM_TREMONT_L 06_9CH 0
KABYLAKE 06_9EH 9 to 13
COMETLAKE 06_A5H 2,3,5
COMETLAKE_L 06_A6H 0,1
ROCKETLAKE 06_A7H 1
=================== ============ =========
If a CPU is in the affected processor list, but not affected by a variant, it
is indicated by new bits in MSR IA32_ARCH_CAPABILITIES. As described in a later
section, mitigation largely remains the same for all the variants, i.e. to
clear the CPU fill buffers via VERW instruction.
New bits in MSRs
================
Newer processors and microcode update on existing affected processors added new
bits to IA32_ARCH_CAPABILITIES MSR. These bits can be used to enumerate
specific variants of Processor MMIO Stale Data vulnerabilities and mitigation
capability.
MSR IA32_ARCH_CAPABILITIES
--------------------------
Bit 13 - SBDR_SSDP_NO - When set, processor is not affected by either the
Shared Buffers Data Read (SBDR) vulnerability or the sideband stale
data propagator (SSDP).
Bit 14 - FBSDP_NO - When set, processor is not affected by the Fill Buffer
Stale Data Propagator (FBSDP).
Bit 15 - PSDP_NO - When set, processor is not affected by Primary Stale Data
Propagator (PSDP).
Bit 17 - FB_CLEAR - When set, VERW instruction will overwrite CPU fill buffer
values as part of MD_CLEAR operations. Processors that do not
enumerate MDS_NO (meaning they are affected by MDS) but that do
enumerate support for both L1D_FLUSH and MD_CLEAR implicitly enumerate
FB_CLEAR as part of their MD_CLEAR support.
Bit 18 - FB_CLEAR_CTRL - Processor supports read and write to MSR
IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]. On such processors, the FB_CLEAR_DIS
bit can be set to cause the VERW instruction to not perform the
FB_CLEAR action. Not all processors that support FB_CLEAR will support
FB_CLEAR_CTRL.
MSR IA32_MCU_OPT_CTRL
---------------------
Bit 3 - FB_CLEAR_DIS - When set, VERW instruction does not perform the FB_CLEAR
action. This may be useful to reduce the performance impact of FB_CLEAR in
cases where system software deems it warranted (for example, when performance
is more critical, or the untrusted software has no MMIO access). Note that
FB_CLEAR_DIS has no impact on enumeration (for example, it does not change
FB_CLEAR or MD_CLEAR enumeration) and it may not be supported on all processors
that enumerate FB_CLEAR.
Mitigation
==========
Like MDS, all variants of Processor MMIO Stale Data vulnerabilities have the
same mitigation strategy to force the CPU to clear the affected buffers before
an attacker can extract the secrets.
This is achieved by using the otherwise unused and obsolete VERW instruction in
combination with a microcode update. The microcode clears the affected CPU
buffers when the VERW instruction is executed.
Kernel reuses the MDS function to invoke the buffer clearing:
mds_clear_cpu_buffers()
On MDS affected CPUs, the kernel already invokes CPU buffer clear on
kernel/userspace, hypervisor/guest and C-state (idle) transitions. No
additional mitigation is needed on such CPUs.
For CPUs not affected by MDS or TAA, mitigation is needed only for the attacker
with MMIO capability. Therefore, VERW is not required for kernel/userspace. For
virtualization case, VERW is only needed at VMENTER for a guest with MMIO
capability.
Mitigation points
-----------------
Return to user space
^^^^^^^^^^^^^^^^^^^^
Same mitigation as MDS when affected by MDS/TAA, otherwise no mitigation
needed.
C-State transition
^^^^^^^^^^^^^^^^^^
Control register writes by CPU during C-state transition can propagate data
from fill buffer to uncore buffers. Execute VERW before C-state transition to
clear CPU fill buffers.
Guest entry point
^^^^^^^^^^^^^^^^^
Same mitigation as MDS when processor is also affected by MDS/TAA, otherwise
execute VERW at VMENTER only for MMIO capable guests. On CPUs not affected by
MDS/TAA, guest without MMIO access cannot extract secrets using Processor MMIO
Stale Data vulnerabilities, so there is no need to execute VERW for such guests.
Mitigation control on the kernel command line
---------------------------------------------
The kernel command line allows to control the Processor MMIO Stale Data
mitigations at boot time with the option "mmio_stale_data=". The valid
arguments for this option are:
========== =================================================================
full If the CPU is vulnerable, enable mitigation; CPU buffer clearing
on exit to userspace and when entering a VM. Idle transitions are
protected as well. It does not automatically disable SMT.
full,nosmt Same as full, with SMT disabled on vulnerable CPUs. This is the
complete mitigation.
off Disables mitigation completely.
========== =================================================================
If the CPU is affected and mmio_stale_data=off is not supplied on the kernel
command line, then the kernel selects the appropriate mitigation.
Mitigation status information
-----------------------------
The Linux kernel provides a sysfs interface to enumerate the current
vulnerability status of the system: whether the system is vulnerable, and
which mitigations are active. The relevant sysfs file is:
/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
The possible values in this file are:
.. list-table::
* - 'Not affected'
- The processor is not vulnerable
* - 'Vulnerable'
- The processor is vulnerable, but no mitigation enabled
* - 'Vulnerable: Clear CPU buffers attempted, no microcode'
- The processor is vulnerable, but microcode is not updated. The
mitigation is enabled on a best effort basis.
* - 'Mitigation: Clear CPU buffers'
- The processor is vulnerable and the CPU buffer clearing mitigation is
enabled.
If the processor is vulnerable then the following information is appended to
the above information:
======================== ===========================================
'SMT vulnerable' SMT is enabled
'SMT disabled' SMT is disabled
'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown
======================== ===========================================
References
----------
.. [#f1] Affected Processors
https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html

View File

@ -2599,6 +2599,7 @@
kvm.nx_huge_pages=off [X86]
no_entry_flush [PPC]
no_uaccess_flush [PPC]
mmio_stale_data=off [X86]
Exceptions:
This does not have any effect on
@ -2620,6 +2621,7 @@
Equivalent to: l1tf=flush,nosmt [X86]
mds=full,nosmt [X86]
tsx_async_abort=full,nosmt [X86]
mmio_stale_data=full,nosmt [X86]
mminit_loglevel=
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
@ -2629,6 +2631,40 @@
log everything. Information is printed at KERN_DEBUG
so loglevel=8 may also need to be specified.
mmio_stale_data=
[X86,INTEL] Control mitigation for the Processor
MMIO Stale Data vulnerabilities.
Processor MMIO Stale Data is a class of
vulnerabilities that may expose data after an MMIO
operation. Exposed data could originate or end in
the same CPU buffers as affected by MDS and TAA.
Therefore, similar to MDS and TAA, the mitigation
is to clear the affected CPU buffers.
This parameter controls the mitigation. The
options are:
full - Enable mitigation on vulnerable CPUs
full,nosmt - Enable mitigation and disable SMT on
vulnerable CPUs.
off - Unconditionally disable mitigation
On MDS or TAA affected machines,
mmio_stale_data=off can be prevented by an active
MDS or TAA mitigation as these vulnerabilities are
mitigated with the same mechanism so in order to
disable this mitigation, you need to specify
mds=off and tsx_async_abort=off too.
Not specifying this option is equivalent to
mmio_stale_data=full.
For details see:
Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
module.sig_enforce
[KNL] When CONFIG_MODULE_SIG is set, this means that
modules without (valid) signatures will fail to load.
@ -3696,6 +3732,12 @@
fully seed the kernel's CRNG. Default is controlled
by CONFIG_RANDOM_TRUST_CPU.
random.trust_bootloader={on,off}
[KNL] Enable or disable trusting the use of a
seed passed by the bootloader (if available) to
fully seed the kernel's CRNG. Default is controlled
by CONFIG_RANDOM_TRUST_BOOTLOADER.
ras=option[,option,...] [KNL] RAS-specific options
cec_disable [X86]

View File

@ -848,9 +848,40 @@ The kernel command line parameter printk.devkmsg= overrides this and is
a one-time setting until next reboot: once set, it cannot be changed by
this sysctl interface anymore.
==============================================================
pty
===
randomize_va_space:
See Documentation/filesystems/devpts.rst.
random
======
This is a directory, with the following entries:
* ``boot_id``: a UUID generated the first time this is retrieved, and
unvarying after that;
* ``uuid``: a UUID generated every time this is retrieved (this can
thus be used to generate UUIDs at will);
* ``entropy_avail``: the pool's entropy count, in bits;
* ``poolsize``: the entropy pool size, in bits;
* ``urandom_min_reseed_secs``: obsolete (used to determine the minimum
number of seconds between urandom pool reseeding). This file is
writable for compatibility purposes, but writing to it has no effect
on any RNG behavior;
* ``write_wakeup_threshold``: when the entropy count drops below this
(as a number of bits), processes waiting to write to ``/dev/random``
are woken up. This file is writable for compatibility purposes, but
writing to it has no effect on any RNG behavior.
randomize_va_space
==================
This option can be used to select the type of process address
space randomization that is used in the system, for architectures

View File

@ -12280,6 +12280,7 @@ F: arch/mips/configs/generic/board-ranchu.config
RANDOM NUMBER DRIVER
M: "Theodore Ts'o" <tytso@mit.edu>
M: Jason A. Donenfeld <Jason@zx2c4.com>
S: Maintained
F: drivers/char/random.c

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 247
SUBLEVEL = 251
EXTRAVERSION =
NAME = "People's Front"
@ -1134,7 +1134,7 @@ PHONY += autoksyms_recursive
autoksyms_recursive: $(vmlinux-deps)
ifdef CONFIG_TRIM_UNUSED_KSYMS
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \
"$(MAKE) -f $(srctree)/Makefile vmlinux"
"$(MAKE) -f $(srctree)/Makefile autoksyms_recursive"
endif
# For the kernel to actually contain only the needed exported symbols,

View File

@ -28,5 +28,6 @@ static inline cycles_t get_cycles (void)
__asm__ __volatile__ ("rpcc %0" : "=r"(ret));
return ret;
}
#define get_cycles get_cycles
#endif

View File

@ -753,7 +753,7 @@
regulator-name = "vddpu";
regulator-min-microvolt = <725000>;
regulator-max-microvolt = <1450000>;
regulator-enable-ramp-delay = <150>;
regulator-enable-ramp-delay = <380>;
anatop-reg-offset = <0x140>;
anatop-vol-bit-shift = <9>;
anatop-vol-bit-width = <5>;

View File

@ -10,6 +10,7 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += blake2s-arm.o
obj-$(if $(CONFIG_CRYPTO_BLAKE2S_ARM),y) += libblake2s-arm.o
obj-$(CONFIG_CRYPTO_BLAKE2B_NEON) += blake2b-neon.o
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o
@ -51,7 +52,8 @@ sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o
sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y)
sha512-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha512-neon-glue.o
sha512-arm-y := sha512-core.o sha512-glue.o $(sha512-arm-neon-y)
blake2s-arm-y := blake2s-core.o blake2s-glue.o
blake2s-arm-y := blake2s-shash.o
libblake2s-arm-y:= blake2s-core.o blake2s-glue.o
blake2b-neon-y := blake2b-neon-core.o blake2b-neon-glue.o
sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o
sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o

View File

@ -167,7 +167,7 @@
.endm
//
// void blake2s_compress_arch(struct blake2s_state *state,
// void blake2s_compress(struct blake2s_state *state,
// const u8 *block, size_t nblocks, u32 inc);
//
// Only the first three fields of struct blake2s_state are used:
@ -176,7 +176,7 @@
// u32 f[2]; (in)
//
.align 5
ENTRY(blake2s_compress_arch)
ENTRY(blake2s_compress)
push {r0-r2,r4-r11,lr} // keep this an even number
.Lnext_block:
@ -303,4 +303,4 @@ ENTRY(blake2s_compress_arch)
str r3, [r12], #4
bne 1b
b .Lcopy_block_done
ENDPROC(blake2s_compress_arch)
ENDPROC(blake2s_compress)

View File

@ -1,78 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* BLAKE2s digest algorithm, ARM scalar implementation
*
* Copyright 2020 Google LLC
*/
#include <crypto/internal/blake2s.h>
#include <crypto/internal/hash.h>
#include <linux/module.h>
/* defined in blake2s-core.S */
EXPORT_SYMBOL(blake2s_compress_arch);
static int crypto_blake2s_update_arm(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch);
}
static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out)
{
return crypto_blake2s_final(desc, out, blake2s_compress_arch);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
{ \
.base.cra_name = name, \
.base.cra_driver_name = driver_name, \
.base.cra_priority = 200, \
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
.base.cra_module = THIS_MODULE, \
.digestsize = digest_size, \
.setkey = crypto_blake2s_setkey, \
.init = crypto_blake2s_init, \
.update = crypto_blake2s_update_arm, \
.final = crypto_blake2s_final_arm, \
.descsize = sizeof(struct blake2s_state), \
}
static struct shash_alg blake2s_arm_algs[] = {
BLAKE2S_ALG("blake2s-128", "blake2s-128-arm", BLAKE2S_128_HASH_SIZE),
BLAKE2S_ALG("blake2s-160", "blake2s-160-arm", BLAKE2S_160_HASH_SIZE),
BLAKE2S_ALG("blake2s-224", "blake2s-224-arm", BLAKE2S_224_HASH_SIZE),
BLAKE2S_ALG("blake2s-256", "blake2s-256-arm", BLAKE2S_256_HASH_SIZE),
};
static int __init blake2s_arm_mod_init(void)
{
return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
crypto_register_shashes(blake2s_arm_algs,
ARRAY_SIZE(blake2s_arm_algs)) : 0;
}
static void __exit blake2s_arm_mod_exit(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
crypto_unregister_shashes(blake2s_arm_algs,
ARRAY_SIZE(blake2s_arm_algs));
}
module_init(blake2s_arm_mod_init);
module_exit(blake2s_arm_mod_exit);
MODULE_DESCRIPTION("BLAKE2s digest algorithm, ARM scalar implementation");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_ALIAS_CRYPTO("blake2s-128");
MODULE_ALIAS_CRYPTO("blake2s-128-arm");
MODULE_ALIAS_CRYPTO("blake2s-160");
MODULE_ALIAS_CRYPTO("blake2s-160-arm");
MODULE_ALIAS_CRYPTO("blake2s-224");
MODULE_ALIAS_CRYPTO("blake2s-224-arm");
MODULE_ALIAS_CRYPTO("blake2s-256");
MODULE_ALIAS_CRYPTO("blake2s-256-arm");
EXPORT_SYMBOL(blake2s_compress);

View File

@ -0,0 +1,75 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* BLAKE2s digest algorithm, ARM scalar implementation
*
* Copyright 2020 Google LLC
*/
#include <crypto/internal/blake2s.h>
#include <crypto/internal/hash.h>
#include <linux/module.h>
static int crypto_blake2s_update_arm(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
return crypto_blake2s_update(desc, in, inlen, false);
}
static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out)
{
return crypto_blake2s_final(desc, out, false);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
{ \
.base.cra_name = name, \
.base.cra_driver_name = driver_name, \
.base.cra_priority = 200, \
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
.base.cra_module = THIS_MODULE, \
.digestsize = digest_size, \
.setkey = crypto_blake2s_setkey, \
.init = crypto_blake2s_init, \
.update = crypto_blake2s_update_arm, \
.final = crypto_blake2s_final_arm, \
.descsize = sizeof(struct blake2s_state), \
}
static struct shash_alg blake2s_arm_algs[] = {
BLAKE2S_ALG("blake2s-128", "blake2s-128-arm", BLAKE2S_128_HASH_SIZE),
BLAKE2S_ALG("blake2s-160", "blake2s-160-arm", BLAKE2S_160_HASH_SIZE),
BLAKE2S_ALG("blake2s-224", "blake2s-224-arm", BLAKE2S_224_HASH_SIZE),
BLAKE2S_ALG("blake2s-256", "blake2s-256-arm", BLAKE2S_256_HASH_SIZE),
};
static int __init blake2s_arm_mod_init(void)
{
return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
crypto_register_shashes(blake2s_arm_algs,
ARRAY_SIZE(blake2s_arm_algs)) : 0;
}
static void __exit blake2s_arm_mod_exit(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
crypto_unregister_shashes(blake2s_arm_algs,
ARRAY_SIZE(blake2s_arm_algs));
}
module_init(blake2s_arm_mod_init);
module_exit(blake2s_arm_mod_exit);
MODULE_DESCRIPTION("BLAKE2s digest algorithm, ARM scalar implementation");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_ALIAS_CRYPTO("blake2s-128");
MODULE_ALIAS_CRYPTO("blake2s-128-arm");
MODULE_ALIAS_CRYPTO("blake2s-160");
MODULE_ALIAS_CRYPTO("blake2s-160-arm");
MODULE_ALIAS_CRYPTO("blake2s-224");
MODULE_ALIAS_CRYPTO("blake2s-224-arm");
MODULE_ALIAS_CRYPTO("blake2s-256");
MODULE_ALIAS_CRYPTO("blake2s-256-arm");

View File

@ -28,7 +28,7 @@ void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{
poly1305_init_arm(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(key + 16);

View File

@ -14,5 +14,6 @@
typedef unsigned long cycles_t;
#define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
#endif

View File

@ -42,6 +42,7 @@ static int axxia_boot_secondary(unsigned int cpu, struct task_struct *idle)
return -ENOENT;
syscon = of_iomap(syscon_np, 0);
of_node_put(syscon_np);
if (!syscon)
return -ENOMEM;

View File

@ -379,6 +379,7 @@ static void __init cns3xxx_init(void)
/* De-Asscer SATA Reset */
cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SATA));
}
of_node_put(dn);
dn = of_find_compatible_node(NULL, NULL, "cavium,cns3420-sdhci");
if (of_device_is_available(dn)) {
@ -392,6 +393,7 @@ static void __init cns3xxx_init(void)
cns3xxx_pwr_clk_en(CNS3XXX_PWR_CLK_EN(SDIO));
cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SDIO));
}
of_node_put(dn);
pm_power_off = cns3xxx_power_off;

View File

@ -131,6 +131,7 @@ static void exynos_map_pmu(void)
np = of_find_matching_node(NULL, exynos_dt_pmu_match);
if (np)
pmu_base_addr = of_iomap(np, 0);
of_node_put(np);
}
static void __init exynos_init_irq(void)

View File

@ -61,11 +61,12 @@ out:
unsigned long __pfn_to_mfn(unsigned long pfn)
{
struct rb_node *n = phys_to_mach.rb_node;
struct rb_node *n;
struct xen_p2m_entry *entry;
unsigned long irqflags;
read_lock_irqsave(&p2m_lock, irqflags);
n = phys_to_mach.rb_node;
while (n) {
entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
if (entry->pfn <= pfn &&
@ -151,10 +152,11 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
int rc;
unsigned long irqflags;
struct xen_p2m_entry *p2m_entry;
struct rb_node *n = phys_to_mach.rb_node;
struct rb_node *n;
if (mfn == INVALID_P2M_ENTRY) {
write_lock_irqsave(&p2m_lock, irqflags);
n = phys_to_mach.rb_node;
while (n) {
p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
if (p2m_entry->pfn <= pfn &&

View File

@ -24,7 +24,7 @@ asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{
poly1305_init_arm64(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(key + 16);

View File

@ -72,7 +72,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long pc = rec->ip;
u32 old, new;
long offset = (long)pc - (long)addr;
long offset = (long)addr - (long)pc;
if (offset < -SZ_128M || offset >= SZ_128M) {
#ifdef CONFIG_ARM64_MODULE_PLTS
@ -151,7 +151,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long pc = rec->ip;
bool validate = true;
u32 old = 0, new;
long offset = (long)pc - (long)addr;
long offset = (long)addr - (long)pc;
if (offset < -SZ_128M || offset >= SZ_128M) {
#ifdef CONFIG_ARM64_MODULE_PLTS

View File

@ -39,6 +39,7 @@ get_cycles (void)
ret = ia64_getreg(_IA64_REG_AR_ITC);
return ret;
}
#define get_cycles get_cycles
extern void ia64_cpu_local_tick (void);
extern unsigned long long ia64_native_sched_clock (void);

View File

@ -35,7 +35,7 @@ static inline unsigned long random_get_entropy(void)
{
if (mach_random_get_entropy)
return mach_random_get_entropy();
return 0;
return random_get_entropy_fallback();
}
#define random_get_entropy random_get_entropy

View File

@ -17,7 +17,7 @@ asmlinkage void poly1305_init_mips(void *state, const u8 *key);
asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{
poly1305_init_mips(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(key + 16);

View File

@ -76,25 +76,24 @@ static inline cycles_t get_cycles(void)
else
return 0; /* no usable counter */
}
#define get_cycles get_cycles
/*
* Like get_cycles - but where c0_count is not available we desperately
* use c0_random in an attempt to get at least a little bit of entropy.
*
* R6000 and R6000A neither have a count register nor a random register.
* That leaves no entropy source in the CPU itself.
*/
static inline unsigned long random_get_entropy(void)
{
unsigned int prid = read_c0_prid();
unsigned int imp = prid & PRID_IMP_MASK;
unsigned int c0_random;
if (can_use_mips_counter(prid))
if (can_use_mips_counter(read_c0_prid()))
return read_c0_count();
else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A))
return read_c0_random();
if (cpu_has_3kex)
c0_random = (read_c0_random() >> 8) & 0x3f;
else
return 0; /* no usable register */
c0_random = read_c0_random() & 0x3f;
return (random_get_entropy_fallback() << 6) | (0x3f - c0_random);
}
#define random_get_entropy random_get_entropy

View File

@ -653,8 +653,6 @@ static int icu_get_irq(unsigned int irq)
printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
atomic_inc(&irq_err_count);
return -1;
}

View File

@ -20,5 +20,8 @@
typedef unsigned long cycles_t;
extern cycles_t get_cycles(void);
#define get_cycles get_cycles
#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
#endif

View File

@ -16,5 +16,6 @@ static inline cycles_t get_cycles (void)
{
return mfctl(16);
}
#define get_cycles get_cycles
#endif

View File

@ -6,27 +6,28 @@
#include <asm/machdep.h>
static inline int arch_get_random_long(unsigned long *v)
static inline bool arch_get_random_long(unsigned long *v)
{
return 0;
return false;
}
static inline int arch_get_random_int(unsigned int *v)
static inline bool arch_get_random_int(unsigned int *v)
{
return 0;
return false;
}
static inline int arch_get_random_seed_long(unsigned long *v)
static inline bool arch_get_random_seed_long(unsigned long *v)
{
if (ppc_md.get_random_seed)
return ppc_md.get_random_seed(v);
return 0;
return false;
}
static inline int arch_get_random_seed_int(unsigned int *v)
static inline bool arch_get_random_seed_int(unsigned int *v)
{
unsigned long val;
int rc;
bool rc;
rc = arch_get_random_seed_long(&val);
if (rc)
@ -34,16 +35,6 @@ static inline int arch_get_random_seed_int(unsigned int *v)
return rc;
}
static inline int arch_has_random(void)
{
return 0;
}
static inline int arch_has_random_seed(void)
{
return !!ppc_md.get_random_seed;
}
#endif /* CONFIG_ARCH_RANDOM */
#ifdef CONFIG_PPC_POWERNV

View File

@ -207,6 +207,7 @@
#define PPC_INST_ICBT 0x7c00002c
#define PPC_INST_ICSWX 0x7c00032d
#define PPC_INST_ICSWEPX 0x7c00076d
#define PPC_INST_DSSALL 0x7e00066c
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LDARX 0x7c0000a8
@ -424,6 +425,7 @@
__PPC_RA(a) | __PPC_RB(b))
#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \
__PPC_RA(a) | __PPC_RB(b))
#define PPC_DSSALL stringify_in_c(.long PPC_INST_DSSALL)
#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LQARX | \
___PPC_RT(t) | ___PPC_RA(a) | \
___PPC_RB(b) | __PPC_EH(eh))

View File

@ -50,6 +50,7 @@ static inline cycles_t get_cycles(void)
return ret;
#endif
}
#define get_cycles get_cycles
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_TIMEX_H */

View File

@ -133,7 +133,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
mtspr SPRN_HID0,r4
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
CURRENT_THREAD_INFO(r9, r1)

View File

@ -108,7 +108,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
/* Stop DST streams */
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
@ -305,7 +305,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
isync
/* Stop DST streams */
DSSALL
PPC_DSSALL
sync
/* Get the current enable bit of the L3CR into r4 */
@ -414,7 +414,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
_GLOBAL(__flush_disable_L1)
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
sync

View File

@ -1731,7 +1731,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
tm_reclaim_current(0);
#endif
memset(regs->gpr, 0, sizeof(regs->gpr));
memset(&regs->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0]));
regs->ctr = 0;
regs->link = 0;
regs->xer = 0;
@ -2017,12 +2017,12 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
do {
sp = *(unsigned long *)sp;
sp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
p->state == TASK_RUNNING)
return 0;
if (count > 0) {
ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]);
if (!in_sched_functions(ip))
return ip;
}

View File

@ -1105,7 +1105,7 @@ static struct rtas_filter rtas_filters[] __ro_after_init = {
{ "get-time-of-day", -1, -1, -1, -1, -1 },
{ "ibm,get-vpd", -1, 0, -1, 1, 2 },
{ "ibm,lpar-perftools", -1, 2, 3, -1, -1 },
{ "ibm,platform-dump", -1, 4, 5, -1, -1 },
{ "ibm,platform-dump", -1, 4, 5, -1, -1 }, /* Special cased */
{ "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 },
{ "ibm,scan-log-dump", -1, 0, 1, -1, -1 },
{ "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 },
@ -1152,6 +1152,15 @@ static bool block_rtas_call(int token, int nargs,
size = 1;
end = base + size - 1;
/*
* Special case for ibm,platform-dump - NULL buffer
* address is used to indicate end of dump processing
*/
if (!strcmp(f->name, "ibm,platform-dump") &&
base == 0)
return false;
if (!in_rmo_buf(base, end))
goto err;
}

View File

@ -181,7 +181,7 @@ _GLOBAL(swsusp_arch_resume)
#ifdef CONFIG_ALTIVEC
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
sync

View File

@ -143,7 +143,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)
_GLOBAL(swsusp_arch_resume)
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
sync

View File

@ -83,7 +83,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* context
*/
if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile ("dssall");
asm volatile (PPC_DSSALL);
if (new_on_cpu)
radix_kvm_prefetch_workaround(next);

View File

@ -53,7 +53,7 @@ flush_disable_75x:
/* Stop DST streams */
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
@ -201,7 +201,7 @@ flush_disable_745x:
isync
/* Stop prefetch streams */
DSSALL
PPC_DSSALL
sync
/* Disable L2 prefetching */

View File

@ -30,4 +30,6 @@ extern void opal_event_shutdown(void);
bool cpu_core_split_required(void);
void pnv_rng_init(void);
#endif /* _POWERNV_H */

View File

@ -21,6 +21,7 @@
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/smp.h>
#include "powernv.h"
#define DARN_ERR 0xFFFFFFFFFFFFFFFFul
@ -32,7 +33,6 @@ struct powernv_rng {
static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng);
int powernv_hwrng_present(void)
{
struct powernv_rng *rng;
@ -102,9 +102,6 @@ static int initialise_darn(void)
return 0;
}
}
pr_warn("Unable to use DARN for get_random_seed()\n");
return -EIO;
}
@ -167,32 +164,55 @@ static __init int rng_create(struct device_node *dn)
rng_init_per_cpu(rng, dn);
pr_info_once("Registering arch random hook.\n");
ppc_md.get_random_seed = powernv_get_random_long;
return 0;
}
static __init int rng_init(void)
static int __init pnv_get_random_long_early(unsigned long *v)
{
struct device_node *dn;
int rc;
if (!slab_is_available())
return 0;
if (cmpxchg(&ppc_md.get_random_seed, pnv_get_random_long_early,
NULL) != pnv_get_random_long_early)
return 0;
for_each_compatible_node(dn, NULL, "ibm,power-rng") {
rc = rng_create(dn);
if (rc) {
pr_err("Failed creating rng for %pOF (%d).\n",
dn, rc);
if (rng_create(dn))
continue;
}
/* Create devices for hwrng driver */
of_platform_device_create(dn, NULL, NULL);
}
initialise_darn();
if (!ppc_md.get_random_seed)
return 0;
return ppc_md.get_random_seed(v);
}
void __init pnv_rng_init(void)
{
struct device_node *dn;
/* Prefer darn over the rest. */
if (!initialise_darn())
return;
dn = of_find_compatible_node(NULL, NULL, "ibm,power-rng");
if (dn)
ppc_md.get_random_seed = pnv_get_random_long_early;
of_node_put(dn);
}
static int __init pnv_rng_late_init(void)
{
unsigned long v;
/* In case it wasn't called during init for some other reason. */
if (ppc_md.get_random_seed == pnv_get_random_long_early)
pnv_get_random_long_early(&v);
return 0;
}
machine_subsys_initcall(powernv, rng_init);
machine_subsys_initcall(powernv, pnv_rng_late_init);

View File

@ -171,6 +171,8 @@ static void __init pnv_setup_arch(void)
powersave_nap = 1;
/* XXX PMCS */
pnv_rng_init();
}
static void __init pnv_init(void)

View File

@ -110,4 +110,6 @@ int dlpar_workqueue_init(void);
void pseries_setup_rfi_flush(void);
void pseries_rng_init(void);
#endif /* _PSERIES_PSERIES_H */

View File

@ -14,6 +14,7 @@
#include <asm/archrandom.h>
#include <asm/machdep.h>
#include <asm/plpar_wrappers.h>
#include "pseries.h"
static int pseries_get_random_long(unsigned long *v)
@ -28,19 +29,13 @@ static int pseries_get_random_long(unsigned long *v)
return 0;
}
static __init int rng_init(void)
void __init pseries_rng_init(void)
{
struct device_node *dn;
dn = of_find_compatible_node(NULL, NULL, "ibm,random");
if (!dn)
return -ENODEV;
pr_info("Registering arch random hook.\n");
return;
ppc_md.get_random_seed = pseries_get_random_long;
of_node_put(dn);
return 0;
}
machine_subsys_initcall(pseries, rng_init);

View File

@ -792,6 +792,7 @@ static void __init pSeries_setup_arch(void)
}
ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
pseries_rng_init();
}
static void pseries_panic(char *str)

View File

@ -30,6 +30,8 @@
#ifndef __ASSEMBLY__
#include <asm/barrier.h>
struct task_struct;
struct pt_regs;

View File

@ -2,126 +2,17 @@
/*
* s390 arch random implementation.
*
* Copyright IBM Corp. 2017, 2018
* Copyright IBM Corp. 2017, 2020
* Author(s): Harald Freudenberger
*
* The s390_arch_random_generate() function may be called from random.c
* in interrupt context. So this implementation does the best to be very
* fast. There is a buffer of random data which is asynchronously checked
* and filled by a workqueue thread.
* If there are enough bytes in the buffer the s390_arch_random_generate()
* just delivers these bytes. Otherwise false is returned until the
* worker thread refills the buffer.
* The worker fills the rng buffer by pulling fresh entropy from the
* high quality (but slow) true hardware random generator. This entropy
* is then spread over the buffer with an pseudo random generator PRNG.
* As the arch_get_random_seed_long() fetches 8 bytes and the calling
* function add_interrupt_randomness() counts this as 1 bit entropy the
* distribution needs to make sure there is in fact 1 bit entropy contained
* in 8 bytes of the buffer. The current values pull 32 byte entropy
* and scatter this into a 2048 byte buffer. So 8 byte in the buffer
* will contain 1 bit of entropy.
* The worker thread is rescheduled based on the charge level of the
* buffer but at least with 500 ms delay to avoid too much CPU consumption.
* So the max. amount of rng data delivered via arch_get_random_seed is
* limited to 4k bytes per second.
*/
#include <linux/kernel.h>
#include <linux/atomic.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/static_key.h>
#include <linux/workqueue.h>
#include <asm/cpacf.h>
DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0);
EXPORT_SYMBOL(s390_arch_random_counter);
#define ARCH_REFILL_TICKS (HZ/2)
#define ARCH_PRNG_SEED_SIZE 32
#define ARCH_RNG_BUF_SIZE 2048
static DEFINE_SPINLOCK(arch_rng_lock);
static u8 *arch_rng_buf;
static unsigned int arch_rng_buf_idx;
static void arch_rng_refill_buffer(struct work_struct *);
static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
{
/* max hunk is ARCH_RNG_BUF_SIZE */
if (nbytes > ARCH_RNG_BUF_SIZE)
return false;
/* lock rng buffer */
if (!spin_trylock(&arch_rng_lock))
return false;
/* try to resolve the requested amount of bytes from the buffer */
arch_rng_buf_idx -= nbytes;
if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) {
memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes);
atomic64_add(nbytes, &s390_arch_random_counter);
spin_unlock(&arch_rng_lock);
return true;
}
/* not enough bytes in rng buffer, refill is done asynchronously */
spin_unlock(&arch_rng_lock);
return false;
}
EXPORT_SYMBOL(s390_arch_random_generate);
static void arch_rng_refill_buffer(struct work_struct *unused)
{
unsigned int delay = ARCH_REFILL_TICKS;
spin_lock(&arch_rng_lock);
if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) {
/* buffer is exhausted and needs refill */
u8 seed[ARCH_PRNG_SEED_SIZE];
u8 prng_wa[240];
/* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */
cpacf_trng(NULL, 0, seed, sizeof(seed));
/* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */
memset(prng_wa, 0, sizeof(prng_wa));
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
&prng_wa, NULL, 0, seed, sizeof(seed));
cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
&prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0);
arch_rng_buf_idx = ARCH_RNG_BUF_SIZE;
}
delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE;
spin_unlock(&arch_rng_lock);
/* kick next check */
queue_delayed_work(system_long_wq, &arch_rng_work, delay);
}
static int __init s390_arch_random_init(void)
{
/* all the needed PRNO subfunctions available ? */
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) &&
cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) {
/* alloc arch random working buffer */
arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL);
if (!arch_rng_buf)
return -ENOMEM;
/* kick worker queue job to fill the random buffer */
queue_delayed_work(system_long_wq,
&arch_rng_work, ARCH_REFILL_TICKS);
/* enable arch random to the outside world */
static_branch_enable(&s390_arch_random_available);
}
return 0;
}
arch_initcall(s390_arch_random_init);

View File

@ -2,7 +2,7 @@
/*
* Kernel interface for the s390 arch_random_* functions
*
* Copyright IBM Corp. 2017
* Copyright IBM Corp. 2017, 2020
*
* Author: Harald Freudenberger <freude@de.ibm.com>
*
@ -15,46 +15,37 @@
#include <linux/static_key.h>
#include <linux/atomic.h>
#include <asm/cpacf.h>
DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
extern atomic64_t s390_arch_random_counter;
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
static inline bool arch_has_random(void)
static inline bool __must_check arch_get_random_long(unsigned long *v)
{
return false;
}
static inline bool arch_has_random_seed(void)
static inline bool __must_check arch_get_random_int(unsigned int *v)
{
if (static_branch_likely(&s390_arch_random_available))
return false;
}
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
{
if (static_branch_likely(&s390_arch_random_available)) {
cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
atomic64_add(sizeof(*v), &s390_arch_random_counter);
return true;
}
return false;
}
static inline bool arch_get_random_long(unsigned long *v)
{
return false;
}
static inline bool arch_get_random_int(unsigned int *v)
{
return false;
}
static inline bool arch_get_random_seed_long(unsigned long *v)
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
{
if (static_branch_likely(&s390_arch_random_available)) {
return s390_arch_random_generate((u8 *)v, sizeof(*v));
}
return false;
}
static inline bool arch_get_random_seed_int(unsigned int *v)
{
if (static_branch_likely(&s390_arch_random_available)) {
return s390_arch_random_generate((u8 *)v, sizeof(*v));
cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
atomic64_add(sizeof(*v), &s390_arch_random_counter);
return true;
}
return false;
}

View File

@ -9,6 +9,8 @@
#ifndef _S390_KEXEC_H
#define _S390_KEXEC_H
#include <linux/module.h>
#include <asm/processor.h>
#include <asm/page.h>
/*
@ -69,4 +71,12 @@ int *kexec_file_update_kernel(struct kimage *iamge,
extern const struct kexec_file_ops s390_kexec_image_ops;
extern const struct kexec_file_ops s390_kexec_elf_ops;
#ifdef CONFIG_KEXEC_FILE
struct purgatory_info;
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
#endif
#endif /*_S390_KEXEC_H */

View File

@ -177,6 +177,7 @@ static inline cycles_t get_cycles(void)
{
return (cycles_t) get_tod_clock() >> 2;
}
#define get_cycles get_cycles
int get_phys_clock(unsigned long *clock);
void init_cpu_timer(void);

View File

@ -851,6 +851,11 @@ static void __init setup_randomness(void)
if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
memblock_free((unsigned long) vmms, PAGE_SIZE);
#ifdef CONFIG_ARCH_RANDOM
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
static_branch_enable(&s390_arch_random_available);
#endif
}
/*

View File

@ -716,7 +716,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
ptev = pte_val(*ptep);
if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
pgste_set_unlock(ptep, pgste);
preempt_enable();
}

View File

@ -9,8 +9,6 @@
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
/* XXX Maybe do something better at some point... -DaveM */
typedef unsigned long cycles_t;
#define get_cycles() (0)
#include <asm-generic/timex.h>
#endif

View File

@ -2,13 +2,8 @@
#ifndef __UM_TIMEX_H
#define __UM_TIMEX_H
typedef unsigned long cycles_t;
static inline cycles_t get_cycles (void)
{
return 0;
}
#define CLOCK_TICK_RATE (HZ)
#include <asm-generic/timex.h>
#endif

View File

@ -62,6 +62,7 @@ ifeq ($(avx_supported),yes)
obj-$(CONFIG_CRYPTO_TWOFISH_AVX_X86_64) += twofish-avx-x86_64.o
obj-$(CONFIG_CRYPTO_SERPENT_AVX_X86_64) += serpent-avx-x86_64.o
obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += blake2s-x86_64.o
obj-$(if $(CONFIG_CRYPTO_BLAKE2S_X86),y) += libblake2s-x86_64.o
endif
# These modules require assembler to support AVX2.
@ -95,7 +96,9 @@ aegis256-aesni-y := aegis256-aesni-asm.o aegis256-aesni-glue.o
morus640-sse2-y := morus640-sse2-asm.o morus640-sse2-glue.o
morus1280-sse2-y := morus1280-sse2-asm.o morus1280-sse2-glue.o
blake2s-x86_64-y := blake2s-core.o blake2s-glue.o
blake2s-x86_64-y := blake2s-shash.o
libblake2s-x86_64-y := blake2s-core.o blake2s-glue.o
poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o
ifneq ($(CONFIG_CRYPTO_POLY1305_X86_64),)
targets += poly1305-x86_64-cryptogams.S

View File

@ -4,7 +4,6 @@
*/
#include <crypto/internal/blake2s.h>
#include <crypto/internal/hash.h>
#include <linux/types.h>
#include <linux/jump_label.h>
@ -26,9 +25,8 @@ asmlinkage void blake2s_compress_avx512(struct blake2s_state *state,
static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_ssse3);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_avx512);
void blake2s_compress_arch(struct blake2s_state *state,
const u8 *block, size_t nblocks,
const u32 inc)
void blake2s_compress(struct blake2s_state *state, const u8 *block,
size_t nblocks, const u32 inc)
{
/* SIMD disables preemption, so relax after processing each page. */
BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8);
@ -54,48 +52,11 @@ void blake2s_compress_arch(struct blake2s_state *state,
block += blocks * BLAKE2S_BLOCK_SIZE;
} while (nblocks);
}
EXPORT_SYMBOL(blake2s_compress_arch);
static int crypto_blake2s_update_x86(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch);
}
static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
{
return crypto_blake2s_final(desc, out, blake2s_compress_arch);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
{ \
.base.cra_name = name, \
.base.cra_driver_name = driver_name, \
.base.cra_priority = 200, \
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
.base.cra_module = THIS_MODULE, \
.digestsize = digest_size, \
.setkey = crypto_blake2s_setkey, \
.init = crypto_blake2s_init, \
.update = crypto_blake2s_update_x86, \
.final = crypto_blake2s_final_x86, \
.descsize = sizeof(struct blake2s_state), \
}
static struct shash_alg blake2s_algs[] = {
BLAKE2S_ALG("blake2s-128", "blake2s-128-x86", BLAKE2S_128_HASH_SIZE),
BLAKE2S_ALG("blake2s-160", "blake2s-160-x86", BLAKE2S_160_HASH_SIZE),
BLAKE2S_ALG("blake2s-224", "blake2s-224-x86", BLAKE2S_224_HASH_SIZE),
BLAKE2S_ALG("blake2s-256", "blake2s-256-x86", BLAKE2S_256_HASH_SIZE),
};
EXPORT_SYMBOL(blake2s_compress);
static int __init blake2s_mod_init(void)
{
if (!boot_cpu_has(X86_FEATURE_SSSE3))
return 0;
if (boot_cpu_has(X86_FEATURE_SSSE3))
static_branch_enable(&blake2s_use_ssse3);
if (IS_ENABLED(CONFIG_AS_AVX512) &&
@ -107,26 +68,9 @@ static int __init blake2s_mod_init(void)
XFEATURE_MASK_AVX512, NULL))
static_branch_enable(&blake2s_use_avx512);
return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
crypto_register_shashes(blake2s_algs,
ARRAY_SIZE(blake2s_algs)) : 0;
}
static void __exit blake2s_mod_exit(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
return 0;
}
module_init(blake2s_mod_init);
module_exit(blake2s_mod_exit);
MODULE_ALIAS_CRYPTO("blake2s-128");
MODULE_ALIAS_CRYPTO("blake2s-128-x86");
MODULE_ALIAS_CRYPTO("blake2s-160");
MODULE_ALIAS_CRYPTO("blake2s-160-x86");
MODULE_ALIAS_CRYPTO("blake2s-224");
MODULE_ALIAS_CRYPTO("blake2s-224-x86");
MODULE_ALIAS_CRYPTO("blake2s-256");
MODULE_ALIAS_CRYPTO("blake2s-256-x86");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,77 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
#include <crypto/internal/blake2s.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/hash.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
static int crypto_blake2s_update_x86(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
return crypto_blake2s_update(desc, in, inlen, false);
}
static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
{
return crypto_blake2s_final(desc, out, false);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
{ \
.base.cra_name = name, \
.base.cra_driver_name = driver_name, \
.base.cra_priority = 200, \
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
.base.cra_module = THIS_MODULE, \
.digestsize = digest_size, \
.setkey = crypto_blake2s_setkey, \
.init = crypto_blake2s_init, \
.update = crypto_blake2s_update_x86, \
.final = crypto_blake2s_final_x86, \
.descsize = sizeof(struct blake2s_state), \
}
static struct shash_alg blake2s_algs[] = {
BLAKE2S_ALG("blake2s-128", "blake2s-128-x86", BLAKE2S_128_HASH_SIZE),
BLAKE2S_ALG("blake2s-160", "blake2s-160-x86", BLAKE2S_160_HASH_SIZE),
BLAKE2S_ALG("blake2s-224", "blake2s-224-x86", BLAKE2S_224_HASH_SIZE),
BLAKE2S_ALG("blake2s-256", "blake2s-256-x86", BLAKE2S_256_HASH_SIZE),
};
static int __init blake2s_mod_init(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
return 0;
}
static void __exit blake2s_mod_exit(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
}
module_init(blake2s_mod_init);
module_exit(blake2s_mod_exit);
MODULE_ALIAS_CRYPTO("blake2s-128");
MODULE_ALIAS_CRYPTO("blake2s-128-x86");
MODULE_ALIAS_CRYPTO("blake2s-160");
MODULE_ALIAS_CRYPTO("blake2s-160-x86");
MODULE_ALIAS_CRYPTO("blake2s-224");
MODULE_ALIAS_CRYPTO("blake2s-224-x86");
MODULE_ALIAS_CRYPTO("blake2s-256");
MODULE_ALIAS_CRYPTO("blake2s-256-x86");
MODULE_LICENSE("GPL v2");

View File

@ -15,7 +15,7 @@
#include <asm/intel-family.h>
asmlinkage void poly1305_init_x86_64(void *ctx,
const u8 key[POLY1305_KEY_SIZE]);
const u8 key[POLY1305_BLOCK_SIZE]);
asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
const size_t len, const u32 padbit);
asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
@ -80,7 +80,7 @@ static void convert_to_base2_64(void *ctx)
state->is_base2_26 = 0;
}
static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE])
static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE])
{
poly1305_init_x86_64(ctx, key);
}
@ -128,7 +128,7 @@ static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
poly1305_emit_avx(ctx, mac, nonce);
}
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{
poly1305_simd_init(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(&key[16]);

View File

@ -86,10 +86,6 @@ static inline bool rdseed_int(unsigned int *v)
return ok;
}
/* Conditional execution based on CPU type */
#define arch_has_random() static_cpu_has(X86_FEATURE_RDRAND)
#define arch_has_random_seed() static_cpu_has(X86_FEATURE_RDSEED)
/*
* These are the generic interfaces; they must not be declared if the
* stubs in <linux/random.h> are to be invoked,
@ -99,22 +95,22 @@ static inline bool rdseed_int(unsigned int *v)
static inline bool arch_get_random_long(unsigned long *v)
{
return arch_has_random() ? rdrand_long(v) : false;
return static_cpu_has(X86_FEATURE_RDRAND) ? rdrand_long(v) : false;
}
static inline bool arch_get_random_int(unsigned int *v)
{
return arch_has_random() ? rdrand_int(v) : false;
return static_cpu_has(X86_FEATURE_RDRAND) ? rdrand_int(v) : false;
}
static inline bool arch_get_random_seed_long(unsigned long *v)
{
return arch_has_random_seed() ? rdseed_long(v) : false;
return static_cpu_has(X86_FEATURE_RDSEED) ? rdseed_long(v) : false;
}
static inline bool arch_get_random_seed_int(unsigned int *v)
{
return arch_has_random_seed() ? rdseed_int(v) : false;
return static_cpu_has(X86_FEATURE_RDSEED) ? rdseed_int(v) : false;
}
extern void x86_init_rdrand(struct cpuinfo_x86 *c);

View File

@ -394,5 +394,6 @@
#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
#endif /* _ASM_X86_CPUFEATURES_H */

View File

@ -64,6 +64,14 @@
#define INTEL_FAM6_COMETLAKE 0xA5
#define INTEL_FAM6_COMETLAKE_L 0xA6
#define INTEL_FAM6_ROCKETLAKE 0xA7
/* Hybrid Core/Atom Processors */
#define INTEL_FAM6_LAKEFIELD 0x8A
#define INTEL_FAM6_ALDERLAKE 0x97
#define INTEL_FAM6_ALDERLAKE_L 0x9A
/* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
@ -83,7 +91,10 @@
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
#define INTEL_FAM6_ATOM_TREMONT 0x96 /* Elkhart Lake */
#define INTEL_FAM6_ATOM_TREMONT_L 0x9C /* Jasper Lake */
/* Xeon Phi */

View File

@ -21,6 +21,7 @@
#ifndef __ASSEMBLY__
#include <linux/string.h>
#include <linux/module.h>
#include <asm/page.h>
#include <asm/ptrace.h>
@ -217,6 +218,14 @@ extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
#define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
#ifdef CONFIG_KEXEC_FILE
struct purgatory_info;
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
#endif
#endif
typedef void crash_vmclear_fn(void);

View File

@ -96,6 +96,30 @@
* Not susceptible to
* TSX Async Abort (TAA) vulnerabilities.
*/
#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /*
* Not susceptible to SBDR and SSDP
* variants of Processor MMIO stale data
* vulnerabilities.
*/
#define ARCH_CAP_FBSDP_NO BIT(14) /*
* Not susceptible to FBSDP variant of
* Processor MMIO stale data
* vulnerabilities.
*/
#define ARCH_CAP_PSDP_NO BIT(15) /*
* Not susceptible to PSDP variant of
* Processor MMIO stale data
* vulnerabilities.
*/
#define ARCH_CAP_FB_CLEAR BIT(17) /*
* VERW clears CPU fill buffer
* even on MDS_NO CPUs.
*/
#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /*
* MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]
* bit available to control VERW
* behavior.
*/
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
@ -113,6 +137,7 @@
/* SRBDS support */
#define MSR_IA32_MCU_OPT_CTRL 0x00000123
#define RNGDS_MITG_DIS BIT(0)
#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175

View File

@ -323,6 +323,8 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
DECLARE_STATIC_KEY_FALSE(mds_user_clear);
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
#include <asm/segment.h>
/**

View File

@ -5,6 +5,15 @@
#include <asm/processor.h>
#include <asm/tsc.h>
static inline unsigned long random_get_entropy(void)
{
if (!IS_ENABLED(CONFIG_X86_TSC) &&
!cpu_feature_enabled(X86_FEATURE_TSC))
return random_get_entropy_fallback();
return rdtsc();
}
#define random_get_entropy random_get_entropy
/* Assume we use the PIT time source for the clock tick */
#define CLOCK_TICK_RATE PIT_TICK_RATE

View File

@ -22,13 +22,12 @@ extern void disable_TSC(void);
static inline cycles_t get_cycles(void)
{
#ifndef CONFIG_X86_TSC
if (!boot_cpu_has(X86_FEATURE_TSC))
if (!IS_ENABLED(CONFIG_X86_TSC) &&
!cpu_feature_enabled(X86_FEATURE_TSC))
return 0;
#endif
return rdtsc();
}
#define get_cycles get_cycles
extern struct system_counterval_t convert_art_to_tsc(u64 art);
extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);

View File

@ -40,8 +40,10 @@ static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
static void __init mds_select_mitigation(void);
static void __init mds_print_mitigation(void);
static void __init md_clear_update_mitigation(void);
static void __init md_clear_select_mitigation(void);
static void __init taa_select_mitigation(void);
static void __init mmio_select_mitigation(void);
static void __init srbds_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
@ -76,6 +78,10 @@ EXPORT_SYMBOL_GPL(mds_user_clear);
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
EXPORT_SYMBOL_GPL(mds_idle_clear);
/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
void __init check_bugs(void)
{
identify_boot_cpu();
@ -108,16 +114,9 @@ void __init check_bugs(void)
spectre_v2_select_mitigation();
ssb_select_mitigation();
l1tf_select_mitigation();
mds_select_mitigation();
taa_select_mitigation();
md_clear_select_mitigation();
srbds_select_mitigation();
/*
* As MDS and TAA mitigations are inter-related, print MDS
* mitigation until after TAA mitigation selection is done.
*/
mds_print_mitigation();
arch_smt_update();
#ifdef CONFIG_X86_32
@ -257,14 +256,6 @@ static void __init mds_select_mitigation(void)
}
}
static void __init mds_print_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
return;
pr_info("%s\n", mds_strings[mds_mitigation]);
}
static int __init mds_cmdline(char *str)
{
if (!boot_cpu_has_bug(X86_BUG_MDS))
@ -312,7 +303,7 @@ static void __init taa_select_mitigation(void)
/* TSX previously disabled by tsx=off */
if (!boot_cpu_has(X86_FEATURE_RTM)) {
taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
goto out;
return;
}
if (cpu_mitigations_off()) {
@ -326,7 +317,7 @@ static void __init taa_select_mitigation(void)
*/
if (taa_mitigation == TAA_MITIGATION_OFF &&
mds_mitigation == MDS_MITIGATION_OFF)
goto out;
return;
if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
taa_mitigation = TAA_MITIGATION_VERW;
@ -358,18 +349,6 @@ static void __init taa_select_mitigation(void)
if (taa_nosmt || cpu_mitigations_auto_nosmt())
cpu_smt_disable(false);
/*
* Update MDS mitigation, if necessary, as the mds_user_clear is
* now enabled for TAA mitigation.
*/
if (mds_mitigation == MDS_MITIGATION_OFF &&
boot_cpu_has_bug(X86_BUG_MDS)) {
mds_mitigation = MDS_MITIGATION_FULL;
mds_select_mitigation();
}
out:
pr_info("%s\n", taa_strings[taa_mitigation]);
}
static int __init tsx_async_abort_parse_cmdline(char *str)
@ -393,6 +372,151 @@ static int __init tsx_async_abort_parse_cmdline(char *str)
}
early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
#undef pr_fmt
#define pr_fmt(fmt) "MMIO Stale Data: " fmt
enum mmio_mitigations {
MMIO_MITIGATION_OFF,
MMIO_MITIGATION_UCODE_NEEDED,
MMIO_MITIGATION_VERW,
};
/* Default mitigation for Processor MMIO Stale Data vulnerabilities */
static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
static bool mmio_nosmt __ro_after_init = false;
static const char * const mmio_strings[] = {
[MMIO_MITIGATION_OFF] = "Vulnerable",
[MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
[MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
};
static void __init mmio_select_mitigation(void)
{
u64 ia32_cap;
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
cpu_mitigations_off()) {
mmio_mitigation = MMIO_MITIGATION_OFF;
return;
}
if (mmio_mitigation == MMIO_MITIGATION_OFF)
return;
ia32_cap = x86_read_arch_cap_msr();
/*
* Enable CPU buffer clear mitigation for host and VMM, if also affected
* by MDS or TAA. Otherwise, enable mitigation for VMM only.
*/
if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
boot_cpu_has(X86_FEATURE_RTM)))
static_branch_enable(&mds_user_clear);
else
static_branch_enable(&mmio_stale_data_clear);
/*
* If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
* be propagated to uncore buffers, clearing the Fill buffers on idle
* is required irrespective of SMT state.
*/
if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
static_branch_enable(&mds_idle_clear);
/*
* Check if the system has the right microcode.
*
* CPU Fill buffer clear mitigation is enumerated by either an explicit
* FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
* affected systems.
*/
if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
(boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
!(ia32_cap & ARCH_CAP_MDS_NO)))
mmio_mitigation = MMIO_MITIGATION_VERW;
else
mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
if (mmio_nosmt || cpu_mitigations_auto_nosmt())
cpu_smt_disable(false);
}
static int __init mmio_stale_data_parse_cmdline(char *str)
{
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
return 0;
if (!str)
return -EINVAL;
if (!strcmp(str, "off")) {
mmio_mitigation = MMIO_MITIGATION_OFF;
} else if (!strcmp(str, "full")) {
mmio_mitigation = MMIO_MITIGATION_VERW;
} else if (!strcmp(str, "full,nosmt")) {
mmio_mitigation = MMIO_MITIGATION_VERW;
mmio_nosmt = true;
}
return 0;
}
early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
#undef pr_fmt
#define pr_fmt(fmt) "" fmt
static void __init md_clear_update_mitigation(void)
{
if (cpu_mitigations_off())
return;
if (!static_key_enabled(&mds_user_clear))
goto out;
/*
* mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
* mitigation, if necessary.
*/
if (mds_mitigation == MDS_MITIGATION_OFF &&
boot_cpu_has_bug(X86_BUG_MDS)) {
mds_mitigation = MDS_MITIGATION_FULL;
mds_select_mitigation();
}
if (taa_mitigation == TAA_MITIGATION_OFF &&
boot_cpu_has_bug(X86_BUG_TAA)) {
taa_mitigation = TAA_MITIGATION_VERW;
taa_select_mitigation();
}
if (mmio_mitigation == MMIO_MITIGATION_OFF &&
boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
mmio_mitigation = MMIO_MITIGATION_VERW;
mmio_select_mitigation();
}
out:
if (boot_cpu_has_bug(X86_BUG_MDS))
pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
if (boot_cpu_has_bug(X86_BUG_TAA))
pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
}
static void __init md_clear_select_mitigation(void)
{
mds_select_mitigation();
taa_select_mitigation();
mmio_select_mitigation();
/*
* As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
* and print their mitigation after MDS, TAA and MMIO Stale Data
* mitigation selection is done.
*/
md_clear_update_mitigation();
}
#undef pr_fmt
#define pr_fmt(fmt) "SRBDS: " fmt
@ -454,11 +578,13 @@ static void __init srbds_select_mitigation(void)
return;
/*
* Check to see if this is one of the MDS_NO systems supporting
* TSX that are only exposed to SRBDS when TSX is enabled.
* Check to see if this is one of the MDS_NO systems supporting TSX that
* are only exposed to SRBDS when TSX is enabled or when CPU is affected
* by Processor MMIO Stale Data vulnerability.
*/
ia32_cap = x86_read_arch_cap_msr();
if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM))
if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
@ -1066,6 +1192,8 @@ static void update_indir_branch_cond(void)
/* Update the static key controlling the MDS CPU buffer clear in idle */
static void update_mds_branch_idle(void)
{
u64 ia32_cap = x86_read_arch_cap_msr();
/*
* Enable the idle clearing if SMT is active on CPUs which are
* affected only by MSBDS and not any other MDS variant.
@ -1077,14 +1205,17 @@ static void update_mds_branch_idle(void)
if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
return;
if (sched_smt_active())
if (sched_smt_active()) {
static_branch_enable(&mds_idle_clear);
else
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
(ia32_cap & ARCH_CAP_FBSDP_NO)) {
static_branch_disable(&mds_idle_clear);
}
}
#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
void arch_smt_update(void)
{
@ -1129,6 +1260,16 @@ void arch_smt_update(void)
break;
}
switch (mmio_mitigation) {
case MMIO_MITIGATION_VERW:
case MMIO_MITIGATION_UCODE_NEEDED:
if (sched_smt_active())
pr_warn_once(MMIO_MSG_SMT);
break;
case MMIO_MITIGATION_OFF:
break;
}
mutex_unlock(&spec_ctrl_mutex);
}
@ -1680,6 +1821,20 @@ static ssize_t tsx_async_abort_show_state(char *buf)
sched_smt_active() ? "vulnerable" : "disabled");
}
static ssize_t mmio_stale_data_show_state(char *buf)
{
if (mmio_mitigation == MMIO_MITIGATION_OFF)
return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
return sysfs_emit(buf, "%s; SMT Host state unknown\n",
mmio_strings[mmio_mitigation]);
}
return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
sched_smt_active() ? "vulnerable" : "disabled");
}
static char *stibp_state(void)
{
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
@ -1780,6 +1935,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_SRBDS:
return srbds_show_state(buf);
case X86_BUG_MMIO_STALE_DATA:
return mmio_stale_data_show_state(buf);
default:
break;
}
@ -1831,4 +1989,9 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
{
return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
}
ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
}
#endif

View File

@ -1019,18 +1019,42 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
X86_FEATURE_ANY, issues)
#define SRBDS BIT(0)
/* CPU is affected by X86_BUG_MMIO_STALE_DATA */
#define MMIO BIT(1)
/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
#define MMIO_SBDS BIT(2)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(HASWELL_CORE, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(HASWELL_ULT, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(HASWELL_GT3E, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(HASWELL_X, BIT(2) | BIT(4), MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL_XEON_D,X86_STEPPINGS(0x3, 0x5), MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL_GT3E, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL_CORE, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, BIT(3) | BIT(4) | BIT(6) |
BIT(7) | BIT(0xB), MMIO),
VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPINGS(0x0, 0xC), SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPINGS(0x0, 0xD), SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPINGS(0x9, 0xC), SRBDS | MMIO),
VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPINGS(0x0, 0x8), SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO),
VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPINGS(0x0, 0x8), SRBDS),
VULNBL_INTEL_STEPPINGS(ICELAKE_MOBILE, X86_STEPPINGS(0x5, 0x5), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(ICELAKE_XEON_D, X86_STEPPINGS(0x1, 0x1), MMIO),
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO),
VULNBL_INTEL_STEPPINGS(COMETLAKE, BIT(2) | BIT(3) | BIT(5), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO),
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPINGS(0x1, 0x1), MMIO),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPINGS(0x0, 0x0), MMIO | MMIO_SBDS),
{}
};
@ -1051,6 +1075,13 @@ u64 x86_read_arch_cap_msr(void)
return ia32_cap;
}
static bool arch_cap_mmio_immune(u64 ia32_cap)
{
return (ia32_cap & ARCH_CAP_FBSDP_NO &&
ia32_cap & ARCH_CAP_PSDP_NO &&
ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
}
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
u64 ia32_cap = x86_read_arch_cap_msr();
@ -1102,12 +1133,27 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
/*
* SRBDS affects CPUs which support RDRAND or RDSEED and are listed
* in the vulnerability blacklist.
*
* Some of the implications and mitigation of Shared Buffers Data
* Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as
* SRBDS.
*/
if ((cpu_has(c, X86_FEATURE_RDRAND) ||
cpu_has(c, X86_FEATURE_RDSEED)) &&
cpu_matches(cpu_vuln_blacklist, SRBDS))
cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS))
setup_force_cpu_bug(X86_BUG_SRBDS);
/*
* Processor MMIO Stale Data bug enumeration
*
* Affected CPU list is generally enough to enumerate the vulnerability,
* but for virtualization case check for ARCH_CAP MSR bits also, VMM may
* not want the guest to enumerate the bug.
*/
if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
!arch_cap_mmio_immune(ia32_cap))
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;

View File

@ -212,6 +212,9 @@ static const struct {
#define L1D_CACHE_ORDER 4
static void *vmx_l1d_flush_pages;
/* Control for disabling CPU Fill buffer clear */
static bool __read_mostly vmx_fb_clear_ctrl_available;
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
{
struct page *page;
@ -1045,6 +1048,8 @@ struct vcpu_vmx {
u64 msr_ia32_feature_control;
u64 msr_ia32_feature_control_valid_bits;
u64 ept_pointer;
u64 msr_ia32_mcu_opt_ctrl;
bool disable_fb_clear;
};
enum segment_cache_field {
@ -2107,6 +2112,60 @@ static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
BUG_ON(error);
}
static void vmx_setup_fb_clear_ctrl(void)
{
u64 msr;
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) &&
!boot_cpu_has_bug(X86_BUG_MDS) &&
!boot_cpu_has_bug(X86_BUG_TAA)) {
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
if (msr & ARCH_CAP_FB_CLEAR_CTRL)
vmx_fb_clear_ctrl_available = true;
}
}
static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
{
u64 msr;
if (!vmx->disable_fb_clear)
return;
rdmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
msr |= FB_CLEAR_DIS;
wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
/* Cache the MSR value to avoid reading it later */
vmx->msr_ia32_mcu_opt_ctrl = msr;
}
static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
{
if (!vmx->disable_fb_clear)
return;
vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
}
static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
{
vmx->disable_fb_clear = vmx_fb_clear_ctrl_available;
/*
* If guest will not execute VERW, there is no need to set FB_CLEAR_DIS
* at VMEntry. Skip the MSR read/write when a guest has no use case to
* execute VERW.
*/
if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) ||
((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) &&
(vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) &&
(vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) &&
(vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) &&
(vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO)))
vmx->disable_fb_clear = false;
}
static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
{
int i;
@ -4317,6 +4376,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
ret = kvm_set_msr_common(vcpu, msr_info);
}
/* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */
if (msr_index == MSR_IA32_ARCH_CAPABILITIES)
vmx_update_fb_clear_dis(vcpu, vmx);
return ret;
}
@ -6761,6 +6824,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vpid_sync_context(vmx->vpid);
if (init_event)
vmx_clear_hlt(vcpu);
vmx_update_fb_clear_dis(vcpu, vmx);
}
/*
@ -10773,6 +10838,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_l1d_flush(vcpu);
else if (static_branch_unlikely(&mds_user_clear))
mds_clear_cpu_buffers();
else if (static_branch_unlikely(&mmio_stale_data_clear) &&
kvm_arch_has_assigned_device(vcpu->kvm))
mds_clear_cpu_buffers();
vmx_disable_fb_clear(vmx);
asm volatile (
/* Store host registers */
@ -10918,6 +10988,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
);
vmx_enable_fb_clear(vmx);
/*
* We do not use IBRS in the kernel. If this vCPU has used the
* SPEC_CTRL MSR it may have left it on; save the value and
@ -14623,8 +14695,11 @@ static int __init vmx_init(void)
}
}
vmx_setup_fb_clear_ctrl();
for_each_possible_cpu(cpu) {
INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
}

View File

@ -1220,6 +1220,10 @@ u64 kvm_get_arch_capabilities(void)
/* KVM does not emulate MSR_IA32_TSX_CTRL. */
data &= ~ARCH_CAP_TSX_CTRL_MSR;
/* Guests don't need to know "Fill buffer clear control" exists */
data &= ~ARCH_CAP_FB_CLEAR_CTRL;
return data;
}

View File

@ -30,10 +30,6 @@
extern unsigned long ccount_freq;
typedef unsigned long long cycles_t;
#define get_cycles() (0)
void local_timer_setup(unsigned cpu);
/*
@ -69,4 +65,6 @@ static inline void set_linux_timer (unsigned long ccompare)
WSR_CCOMPARE(LINUX_TIMER, ccompare);
}
#include <asm-generic/timex.h>
#endif /* _XTENSA_TIMEX_H */

View File

@ -146,6 +146,7 @@ static void __init calibrate_ccount(void)
cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
if (cpu) {
clk = of_clk_get(cpu, 0);
of_node_put(cpu);
if (!IS_ERR(clk)) {
ccount_freq = clk_get_rate(clk);
return;

View File

@ -152,6 +152,7 @@ static int __init machine_setup(void)
if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
update_local_mac(eth);
of_node_put(eth);
return 0;
}
arch_initcall(machine_setup);

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "blacklist.h"
const char __initdata *const blacklist_hashes[] = {
const char __initconst *const blacklist_hashes[] = {
#include CONFIG_SYSTEM_BLACKLIST_HASH_LIST
, NULL
};

View File

@ -1961,7 +1961,6 @@ config CRYPTO_USER_API_AEAD
config CRYPTO_HASH_INFO
bool
source "lib/crypto/Kconfig"
source "drivers/crypto/Kconfig"
source crypto/asymmetric_keys/Kconfig
source certs/Kconfig

View File

@ -15,12 +15,12 @@
static int crypto_blake2s_update_generic(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
return crypto_blake2s_update(desc, in, inlen, blake2s_compress_generic);
return crypto_blake2s_update(desc, in, inlen, true);
}
static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out)
{
return crypto_blake2s_final(desc, out, blake2s_compress_generic);
return crypto_blake2s_final(desc, out, true);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \

View File

@ -219,6 +219,57 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
}
}
/*
* FIPS 140-2 continuous self test for the noise source
* The test is performed on the noise source input data. Thus, the function
* implicitly knows the size of the buffer to be equal to the security
* strength.
*
* Note, this function disregards the nonce trailing the entropy data during
* initial seeding.
*
* drbg->drbg_mutex must have been taken.
*
* @drbg DRBG handle
* @entropy buffer of seed data to be checked
*
* return:
* 0 on success
* -EAGAIN on when the CTRNG is not yet primed
* < 0 on error
*/
static int drbg_fips_continuous_test(struct drbg_state *drbg,
const unsigned char *entropy)
{
unsigned short entropylen = drbg_sec_strength(drbg->core->flags);
int ret = 0;
if (!IS_ENABLED(CONFIG_CRYPTO_FIPS))
return 0;
/* skip test if we test the overall system */
if (list_empty(&drbg->test_data.list))
return 0;
/* only perform test in FIPS mode */
if (!fips_enabled)
return 0;
if (!drbg->fips_primed) {
/* Priming of FIPS test */
memcpy(drbg->prev, entropy, entropylen);
drbg->fips_primed = true;
/* priming: another round is needed */
return -EAGAIN;
}
ret = memcmp(drbg->prev, entropy, entropylen);
if (!ret)
panic("DRBG continuous self test failed\n");
memcpy(drbg->prev, entropy, entropylen);
/* the test shall pass when the two values are not equal */
return 0;
}
/*
* Convert an integer into a byte representation of this integer.
* The byte representation is big-endian
@ -984,55 +1035,79 @@ static const struct drbg_state_ops drbg_hash_ops = {
******************************************************************/
static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed,
int reseed)
int reseed, enum drbg_seed_state new_seed_state)
{
int ret = drbg->d_ops->update(drbg, seed, reseed);
if (ret)
return ret;
drbg->seeded = true;
drbg->seeded = new_seed_state;
/* 10.1.1.2 / 10.1.1.3 step 5 */
drbg->reseed_ctr = 1;
switch (drbg->seeded) {
case DRBG_SEED_STATE_UNSEEDED:
/* Impossible, but handle it to silence compiler warnings. */
case DRBG_SEED_STATE_PARTIAL:
/*
* Require frequent reseeds until the seed source is
* fully initialized.
*/
drbg->reseed_threshold = 50;
break;
case DRBG_SEED_STATE_FULL:
/*
* Seed source has become fully initialized, frequent
* reseeds no longer required.
*/
drbg->reseed_threshold = drbg_max_requests(drbg);
break;
}
return ret;
}
static void drbg_async_seed(struct work_struct *work)
static inline int drbg_get_random_bytes(struct drbg_state *drbg,
unsigned char *entropy,
unsigned int entropylen)
{
int ret;
do {
get_random_bytes(entropy, entropylen);
ret = drbg_fips_continuous_test(drbg, entropy);
if (ret && ret != -EAGAIN)
return ret;
} while (ret);
return 0;
}
static int drbg_seed_from_random(struct drbg_state *drbg)
{
struct drbg_string data;
LIST_HEAD(seedlist);
struct drbg_state *drbg = container_of(work, struct drbg_state,
seed_work);
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
unsigned char entropy[32];
int ret;
BUG_ON(!entropylen);
BUG_ON(entropylen > sizeof(entropy));
get_random_bytes(entropy, entropylen);
drbg_string_fill(&data, entropy, entropylen);
list_add_tail(&data.list, &seedlist);
mutex_lock(&drbg->drbg_mutex);
ret = drbg_get_random_bytes(drbg, entropy, entropylen);
if (ret)
goto out;
/* If nonblocking pool is initialized, deactivate Jitter RNG */
crypto_free_rng(drbg->jent);
drbg->jent = NULL;
/* Set seeded to false so that if __drbg_seed fails the
* next generate call will trigger a reseed.
*/
drbg->seeded = false;
__drbg_seed(drbg, &seedlist, true);
if (drbg->seeded)
drbg->reseed_threshold = drbg_max_requests(drbg);
mutex_unlock(&drbg->drbg_mutex);
ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL);
out:
memzero_explicit(entropy, entropylen);
return ret;
}
/*
@ -1054,6 +1129,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
struct drbg_string data1;
LIST_HEAD(seedlist);
enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL;
/* 9.1 / 9.2 / 9.3.1 step 3 */
if (pers && pers->len > (drbg_max_addtl(drbg))) {
@ -1081,7 +1157,12 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
BUG_ON((entropylen * 2) > sizeof(entropy));
/* Get seed from in-kernel /dev/urandom */
get_random_bytes(entropy, entropylen);
if (!rng_is_initialized())
new_seed_state = DRBG_SEED_STATE_PARTIAL;
ret = drbg_get_random_bytes(drbg, entropy, entropylen);
if (ret)
goto out;
if (!drbg->jent) {
drbg_string_fill(&data1, entropy, entropylen);
@ -1094,7 +1175,23 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
entropylen);
if (ret) {
pr_devel("DRBG: jent failed with %d\n", ret);
return ret;
/*
* Do not treat the transient failure of the
* Jitter RNG as an error that needs to be
* reported. The combined number of the
* maximum reseed threshold times the maximum
* number of Jitter RNG transient errors is
* less than the reseed threshold required by
* SP800-90A allowing us to treat the
* transient errors as such.
*
* However, we mandate that at least the first
* seeding operation must succeed with the
* Jitter RNG.
*/
if (!reseed || ret != -EAGAIN)
goto out;
}
drbg_string_fill(&data1, entropy, entropylen * 2);
@ -1119,8 +1216,9 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
memset(drbg->C, 0, drbg_statelen(drbg));
}
ret = __drbg_seed(drbg, &seedlist, reseed);
ret = __drbg_seed(drbg, &seedlist, reseed, new_seed_state);
out:
memzero_explicit(entropy, entropylen * 2);
return ret;
@ -1142,6 +1240,11 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
drbg->reseed_ctr = 0;
drbg->d_ops = NULL;
drbg->core = NULL;
if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
kzfree(drbg->prev);
drbg->prev = NULL;
drbg->fips_primed = false;
}
}
/*
@ -1211,6 +1314,14 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
}
if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags),
GFP_KERNEL);
if (!drbg->prev)
goto fini;
drbg->fips_primed = false;
}
return 0;
fini:
@ -1283,19 +1394,25 @@ static int drbg_generate(struct drbg_state *drbg,
* here. The spec is a bit convoluted here, we make it simpler.
*/
if (drbg->reseed_threshold < drbg->reseed_ctr)
drbg->seeded = false;
drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
if (drbg->pr || !drbg->seeded) {
if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) {
pr_devel("DRBG: reseeding before generation (prediction "
"resistance: %s, state %s)\n",
drbg->pr ? "true" : "false",
drbg->seeded ? "seeded" : "unseeded");
(drbg->seeded == DRBG_SEED_STATE_FULL ?
"seeded" : "unseeded"));
/* 9.3.1 steps 7.1 through 7.3 */
len = drbg_seed(drbg, addtl, true);
if (len)
goto err;
/* 9.3.1 step 7.4 */
addtl = NULL;
} else if (rng_is_initialized() &&
drbg->seeded == DRBG_SEED_STATE_PARTIAL) {
len = drbg_seed_from_random(drbg);
if (len)
goto err;
}
if (addtl && 0 < addtl->len)
@ -1388,51 +1505,15 @@ static int drbg_generate_long(struct drbg_state *drbg,
return 0;
}
static void drbg_schedule_async_seed(struct random_ready_callback *rdy)
{
struct drbg_state *drbg = container_of(rdy, struct drbg_state,
random_ready);
schedule_work(&drbg->seed_work);
}
static int drbg_prepare_hrng(struct drbg_state *drbg)
{
int err;
/* We do not need an HRNG in test mode. */
if (list_empty(&drbg->test_data.list))
return 0;
INIT_WORK(&drbg->seed_work, drbg_async_seed);
drbg->random_ready.owner = THIS_MODULE;
drbg->random_ready.func = drbg_schedule_async_seed;
err = add_random_ready_callback(&drbg->random_ready);
switch (err) {
case 0:
break;
case -EALREADY:
err = 0;
/* fall through */
default:
drbg->random_ready.func = NULL;
return err;
}
drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
/*
* Require frequent reseeds until the seed source is fully
* initialized.
*/
drbg->reseed_threshold = 50;
return err;
return 0;
}
/*
@ -1475,7 +1556,7 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
if (!drbg->core) {
drbg->core = &drbg_cores[coreref];
drbg->pr = pr;
drbg->seeded = false;
drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
drbg->reseed_threshold = drbg_max_requests(drbg);
ret = drbg_alloc_state(drbg);
@ -1526,12 +1607,9 @@ free_everything:
*/
static int drbg_uninstantiate(struct drbg_state *drbg)
{
if (drbg->random_ready.func) {
del_random_ready_callback(&drbg->random_ready);
cancel_work_sync(&drbg->seed_work);
if (!IS_ERR_OR_NULL(drbg->jent))
crypto_free_rng(drbg->jent);
drbg->jent = NULL;
}
if (drbg->d_ops)
drbg->d_ops->crypto_fini(drbg);

View File

@ -6253,7 +6253,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi,
int n_ports)
{
const struct ata_port_info *pi;
const struct ata_port_info *pi = &ata_dummy_port_info;
struct ata_host *host;
int i, j;
@ -6261,7 +6261,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
if (!host)
return NULL;
for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
for (i = 0, j = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
if (ppi[j])

View File

@ -663,6 +663,12 @@ ssize_t __weak cpu_show_srbds(struct device *dev,
return sprintf(buf, "Not affected\n");
}
ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@ -672,6 +678,7 @@ static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@ -683,6 +690,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_tsx_async_abort.attr,
&dev_attr_itlb_multihit.attr,
&dev_attr_srbds.attr,
&dev_attr_mmio_stale_data.attr,
NULL
};

View File

@ -151,6 +151,10 @@ static unsigned int xen_blkif_max_ring_order;
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
static bool __read_mostly xen_blkif_trusted = true;
module_param_named(trusted, xen_blkif_trusted, bool, 0644);
MODULE_PARM_DESC(trusted, "Is the backend trusted");
#define BLK_RING_SIZE(info) \
__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
@ -211,6 +215,7 @@ struct blkfront_info
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
unsigned int feature_persistent:1;
unsigned int bounce:1;
unsigned int discard_granularity;
unsigned int discard_alignment;
/* Number of 4KB segments handled */
@ -300,8 +305,8 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
if (!gnt_list_entry)
goto out_of_memory;
if (info->feature_persistent) {
granted_page = alloc_page(GFP_NOIO);
if (info->bounce) {
granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
if (!granted_page) {
kfree(gnt_list_entry);
goto out_of_memory;
@ -320,7 +325,7 @@ out_of_memory:
list_for_each_entry_safe(gnt_list_entry, n,
&rinfo->grants, node) {
list_del(&gnt_list_entry->node);
if (info->feature_persistent)
if (info->bounce)
__free_page(gnt_list_entry->page);
kfree(gnt_list_entry);
i--;
@ -366,7 +371,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
/* Assign a gref to this page */
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
BUG_ON(gnt_list_entry->gref == -ENOSPC);
if (info->feature_persistent)
if (info->bounce)
grant_foreign_access(gnt_list_entry, info);
else {
/* Grant access to the GFN passed by the caller */
@ -390,7 +395,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head,
/* Assign a gref to this page */
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
BUG_ON(gnt_list_entry->gref == -ENOSPC);
if (!info->feature_persistent) {
if (!info->bounce) {
struct page *indirect_page;
/* Fetch a pre-allocated page to use for indirect grefs */
@ -705,7 +710,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
.grant_idx = 0,
.segments = NULL,
.rinfo = rinfo,
.need_copy = rq_data_dir(req) && info->feature_persistent,
.need_copy = rq_data_dir(req) && info->bounce,
};
/*
@ -1026,11 +1031,12 @@ static void xlvbd_flush(struct blkfront_info *info)
{
blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
info->feature_fua ? true : false);
pr_info("blkfront: %s: %s %s %s %s %s\n",
pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
info->gd->disk_name, flush_info(info),
"persistent grants:", info->feature_persistent ?
"enabled;" : "disabled;", "indirect descriptors:",
info->max_indirect_segments ? "enabled;" : "disabled;");
info->max_indirect_segments ? "enabled;" : "disabled;",
"bounce buffer:", info->bounce ? "enabled" : "disabled;");
}
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
@ -1265,7 +1271,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
if (!list_empty(&rinfo->indirect_pages)) {
struct page *indirect_page, *n;
BUG_ON(info->feature_persistent);
BUG_ON(info->bounce);
list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
list_del(&indirect_page->lru);
__free_page(indirect_page);
@ -1282,7 +1288,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
0, 0UL);
rinfo->persistent_gnts_c--;
}
if (info->feature_persistent)
if (info->bounce)
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
}
@ -1303,7 +1309,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
for (j = 0; j < segs; j++) {
persistent_gnt = rinfo->shadow[i].grants_used[j];
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
if (info->feature_persistent)
if (info->bounce)
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
}
@ -1493,7 +1499,7 @@ static int blkif_completion(unsigned long *id,
data.s = s;
num_sg = s->num_sg;
if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
if (bret->operation == BLKIF_OP_READ && info->bounce) {
for_each_sg(s->sg, sg, num_sg, i) {
BUG_ON(sg->offset + sg->length > PAGE_SIZE);
@ -1552,7 +1558,7 @@ static int blkif_completion(unsigned long *id,
* Add the used indirect page back to the list of
* available pages for indirect grefs.
*/
if (!info->feature_persistent) {
if (!info->bounce) {
indirect_page = s->indirect_grants[i]->page;
list_add(&indirect_page->lru, &rinfo->indirect_pages);
}
@ -1744,7 +1750,7 @@ static int setup_blkring(struct xenbus_device *dev,
for (i = 0; i < info->nr_ring_pages; i++)
rinfo->ring_ref[i] = GRANT_INVALID_REF;
sring = alloc_pages_exact(ring_size, GFP_NOIO);
sring = alloc_pages_exact(ring_size, GFP_NOIO | __GFP_ZERO);
if (!sring) {
xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
return -ENOMEM;
@ -1847,6 +1853,10 @@ static int talk_to_blkback(struct xenbus_device *dev,
if (!info)
return -ENODEV;
/* Check if backend is trusted. */
info->bounce = !xen_blkif_trusted ||
!xenbus_read_unsigned(dev->nodename, "trusted", 1);
max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
"max-ring-page-order", 0);
ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
@ -2273,17 +2283,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
if (err)
goto out_of_memory;
if (!info->feature_persistent && info->max_indirect_segments) {
if (!info->bounce && info->max_indirect_segments) {
/*
* We are using indirect descriptors but not persistent
* grants, we need to allocate a set of pages that can be
* We are using indirect descriptors but don't have a bounce
* buffer, we need to allocate a set of pages that can be
* used for mapping indirect grefs
*/
int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
BUG_ON(!list_empty(&rinfo->indirect_pages));
for (i = 0; i < num; i++) {
struct page *indirect_page = alloc_page(GFP_KERNEL);
struct page *indirect_page = alloc_page(GFP_KERNEL |
__GFP_ZERO);
if (!indirect_page)
goto out_of_memory;
list_add(&indirect_page->lru, &rinfo->indirect_pages);
@ -2375,6 +2386,8 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
info->feature_persistent =
!!xenbus_read_unsigned(info->xbdev->otherend,
"feature-persistent", 0);
if (info->feature_persistent)
info->bounce = true;
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
"feature-max-indirect-segments", 0);
@ -2750,6 +2763,13 @@ static void blkfront_delay_work(struct work_struct *work)
struct blkfront_info *info;
bool need_schedule_work = false;
/*
* Note that when using bounce buffers but not persistent grants
* there's no need to run blkfront_delay_work because grants are
* revoked in blkif_completion or else an error is reported and the
* connection is closed.
*/
mutex_lock(&blkfront_mutex);
list_for_each_entry(info, &info_list, info_list) {

View File

@ -624,22 +624,24 @@ config RB5_GPIOS_ENABLE
signals, GPIO 60 is default set to low to connect GPIOs
140/145,116/117 to mezzanine connectors.
endmenu
config RANDOM_TRUST_CPU
bool "Trust the CPU manufacturer to initialize Linux's CRNG"
depends on X86 || S390 || PPC
default n
bool "Initialize RNG using CPU RNG instructions"
default y
depends on ARCH_RANDOM
help
Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
RDRAND, IBM for the S390 and Power PC architectures) is trustworthy
for the purposes of initializing Linux's CRNG. Since this is not
something that can be independently audited, this amounts to trusting
that CPU manufacturer (perhaps with the insistence or mandate
of a Nation State's intelligence or law enforcement agencies)
has not installed a hidden back door to compromise the CPU's
random number generation facilities. This can also be configured
at boot with "random.trust_cpu=on/off".
Initialize the RNG using random numbers supplied by the CPU's
RNG instructions (e.g. RDRAND), if supported and available. These
random numbers are never used directly, but are rather hashed into
the main input pool, and this happens regardless of whether or not
this option is enabled. Instead, this option controls whether the
they are credited and hence can initialize the RNG. Additionally,
other sources of randomness are always used, regardless of this
setting. Enabling this implies trusting that the CPU can supply high
quality and non-backdoored random numbers.
Say Y here unless you have reason to mistrust your CPU or believe
its RNG facilities may be faulty. This may also be configured at
boot time with "random.trust_cpu=on/off".
config OKL4_PIPE
bool "OKL4 Pipe Driver"
@ -689,10 +691,21 @@ config VSERVICES_VTTY_COUNT
This limit applies to both the client and server.
config RANDOM_TRUST_BOOTLOADER
bool "Trust the bootloader to initialize Linux's CRNG"
bool "Initialize RNG using bootloader-supplied seed"
default y
help
Some bootloaders can provide entropy to increase the kernel's initial
device randomness. Say Y here to assume the entropy provided by the
booloader is trustworthy so it will be added to the kernel's entropy
pool. Otherwise, say N here so it will be regarded as device input that
only mixes the entropy pool.
Initialize the RNG using a seed supplied by the bootloader or boot
environment (e.g. EFI or a bootloader-generated device tree). This
seed is not used directly, but is rather hashed into the main input
pool, and this happens regardless of whether or not this option is
enabled. Instead, this option controls whether the seed is credited
and hence can initialize the RNG. Additionally, other sources of
randomness are always used, regardless of this setting. Enabling
this implies trusting that the bootloader can supply high quality and
non-backdoored seeds.
Say Y here unless you have reason to mistrust your bootloader or
believe its RNG facilities may be faulty. This may also be configured
at boot time with "random.trust_bootloader=on/off".
endmenu

View File

@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/hw_random.h>
#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/sched/signal.h>

File diff suppressed because it is too large Load Diff

View File

@ -230,8 +230,6 @@ static int giu_get_irq(unsigned int irq)
printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
maskl, pendl, maskh, pendh);
atomic_inc(&irq_err_count);
return -EINVAL;
}

View File

@ -385,12 +385,13 @@ static int winbond_gpio_get(struct gpio_chip *gc, unsigned int offset)
unsigned long *base = gpiochip_get_data(gc);
const struct winbond_gpio_info *info;
bool val;
int ret;
winbond_gpio_get_info(&offset, &info);
val = winbond_sio_enter(*base);
if (val)
return val;
ret = winbond_sio_enter(*base);
if (ret)
return ret;
winbond_sio_select_logical(*base, info->dev);

View File

@ -32,16 +32,6 @@
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
/* drm_fb_helper.c */
#ifdef CONFIG_DRM_FBDEV_EMULATION
int drm_fb_helper_modinit(void);
#else
static inline int drm_fb_helper_modinit(void)
{
return 0;
}
#endif
/* drm_dp_aux_dev.c */
#ifdef CONFIG_DRM_DP_AUX_CHARDEV
int drm_dp_aux_dev_init(void);

View File

@ -3270,24 +3270,3 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
return 0;
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);
/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable console.
*/
int __init drm_fb_helper_modinit(void)
{
#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
const char name[] = "fbcon";
struct module *fbcon;
mutex_lock(&module_mutex);
fbcon = find_module(name);
mutex_unlock(&module_mutex);
if (!fbcon)
request_module_nowait(name);
#endif
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_modinit);

View File

@ -63,19 +63,18 @@ MODULE_PARM_DESC(edid_firmware,
static int __init drm_kms_helper_init(void)
{
int ret;
/*
* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable
* console.
*/
if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) &&
IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) &&
!IS_ENABLED(CONFIG_EXPERT))
request_module_nowait("fbcon");
/* Call init functions from specific kms helpers here */
ret = drm_fb_helper_modinit();
if (ret < 0)
goto out;
ret = drm_dp_aux_dev_init();
if (ret < 0)
goto out;
out:
return ret;
return drm_dp_aux_dev_init();
}
static void __exit drm_kms_helper_exit(void)

View File

@ -115,7 +115,7 @@ static void hv_stimer0_isr(void)
hv_cpu = this_cpu_ptr(hv_context.cpu_context);
hv_cpu->clk_evt->event_handler(hv_cpu->clk_evt);
add_interrupt_randomness(stimer0_vector, 0);
add_interrupt_randomness(stimer0_vector);
}
static int hv_ce_set_next_event(unsigned long delta,

View File

@ -1146,7 +1146,7 @@ static void vmbus_isr(void)
tasklet_schedule(&hv_cpu->msg_dpc);
}
add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR);
}
/*

View File

@ -563,7 +563,7 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle)
res = platform_device_add(data->pdev);
if (res)
goto ipmi_err;
goto dev_add_err;
platform_set_drvdata(data->pdev, data);
@ -611,7 +611,9 @@ hwmon_reg_err:
ipmi_destroy_user(data->ipmi.user);
ipmi_err:
platform_set_drvdata(data->pdev, NULL);
platform_device_unregister(data->pdev);
platform_device_del(data->pdev);
dev_add_err:
platform_device_put(data->pdev);
dev_err:
ida_simple_remove(&aem_ida, data->id);
id_err:
@ -703,7 +705,7 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe,
res = platform_device_add(data->pdev);
if (res)
goto ipmi_err;
goto dev_add_err;
platform_set_drvdata(data->pdev, data);
@ -751,7 +753,9 @@ hwmon_reg_err:
ipmi_destroy_user(data->ipmi.user);
ipmi_err:
platform_set_drvdata(data->pdev, NULL);
platform_device_unregister(data->pdev);
platform_device_del(data->pdev);
dev_add_err:
platform_device_put(data->pdev);
dev_err:
ida_simple_remove(&aem_ida, data->id);
id_err:

View File

@ -780,11 +780,12 @@ static int bma180_probe(struct i2c_client *client,
data->trig->dev.parent = &client->dev;
data->trig->ops = &bma180_trigger_ops;
iio_trigger_set_drvdata(data->trig, indio_dev);
indio_dev->trig = iio_trigger_get(data->trig);
ret = iio_trigger_register(data->trig);
if (ret)
goto err_trigger_free;
indio_dev->trig = iio_trigger_get(data->trig);
}
ret = iio_triggered_buffer_setup(indio_dev, NULL,

View File

@ -1486,10 +1486,14 @@ static int mma8452_reset(struct i2c_client *client)
int i;
int ret;
ret = i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2,
/*
* Find on fxls8471, after config reset bit, it reset immediately,
* and will not give ACK, so here do not check the return value.
* The following code will read the reset register, and check whether
* this reset works.
*/
i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2,
MMA8452_CTRL_REG2_RST);
if (ret < 0)
return ret;
for (i = 0; i < 10; i++) {
usleep_range(100, 200);

View File

@ -205,6 +205,14 @@ static const struct dmi_system_id axp288_adc_ts_bias_override[] = {
},
.driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
},
{
/* Nuvision Solo 10 Draw */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TMAX"),
DMI_MATCH(DMI_PRODUCT_NAME, "TM101W610L"),
},
.driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
},
{}
};

View File

@ -421,11 +421,11 @@ static int ccs811_probe(struct i2c_client *client,
data->drdy_trig->dev.parent = &client->dev;
data->drdy_trig->ops = &ccs811_trigger_ops;
iio_trigger_set_drvdata(data->drdy_trig, indio_dev);
indio_dev->trig = data->drdy_trig;
iio_trigger_get(indio_dev->trig);
ret = iio_trigger_register(data->drdy_trig);
if (ret)
goto err_poweroff;
indio_dev->trig = iio_trigger_get(data->drdy_trig);
}
ret = iio_triggered_buffer_setup(indio_dev, NULL,

View File

@ -873,6 +873,7 @@ static int mpu3050_power_up(struct mpu3050 *mpu3050)
ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
MPU3050_PWR_MGM_SLEEP, 0);
if (ret) {
regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
dev_err(mpu3050->dev, "error setting power mode\n");
return ret;
}

View File

@ -198,6 +198,7 @@ static int iio_sysfs_trigger_remove(int id)
}
iio_trigger_unregister(t->trig);
irq_work_sync(&t->work);
iio_trigger_free(t->trig);
list_del(&t->l);

View File

@ -407,6 +407,7 @@ struct qedr_qp {
u32 sq_psn;
u32 qkey;
u32 dest_qp_num;
u8 timeout;
/* Relevant to qps created from kernel space only (ULPs) */
u8 prev_wqe_size;

View File

@ -2376,6 +2376,8 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1 << max_t(int, attr->timeout - 8, 0);
else
qp_params.ack_timeout = 0;
qp->timeout = attr->timeout;
}
if (attr_mask & IB_QP_RETRY_CNT) {
@ -2535,7 +2537,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
rdma_ah_set_sl(&qp_attr->ah_attr, 0);
qp_attr->timeout = params.timeout;
qp_attr->timeout = qp->timeout;
qp_attr->rnr_retry = params.rnr_retry;
qp_attr->retry_cnt = params.retry_cnt;
qp_attr->min_rnr_timer = params.min_rnr_nak_timer;

Some files were not shown because too many files have changed in this diff Show More