Merge android-4.19.28 (34e9e65
) into msm-4.19
* refs/heads/tmp-34e9e65: Linux 4.19.28 bpf: fix sanitation rewrite in case of non-pointers scsi: core: reset host byte in DID_NEXUS_FAILURE case exec: Fix mem leak in kernel_read_file Bluetooth: Fix locking in bt_accept_enqueue() for BH context Bluetooth: btrtl: Restore old logic to assume firmware is already loaded selftests: firmware: fix verify_reqs() return value USB: serial: cp210x: fix GPIO in autosuspend gnss: sirf: fix premature wakeup interrupt enable xtensa: fix get_wchan aio: Fix locking in aio_poll() MIPS: irq: Allocate accurate order pages for irq stack applicom: Fix potential Spectre v1 vulnerabilities usb: xhci: Fix for Enabling USB ROLE SWITCH QUIRK on INTEL_SUNRISEPOINT_LP_XHCI tracing: Fix event filters and triggers to handle negative numbers x86/boot/compressed/64: Do not read legacy ROM on EFI system x86/CPU/AMD: Set the CPB bit unconditionally on F17h net: sched: act_tunnel_key: fix NULL pointer dereference during init net/sched: act_skbedit: fix refcount leak when replace fails net/sched: act_ipt: fix refcount leak when replace fails net: dsa: mv88e6xxx: prevent interrupt storm caused by mv88e6390x_port_set_cmode net: dsa: mv88e6xxx: power serdes on/off for 10G interfaces on 6390X ipv4: Pass original device to ip_rcv_finish_core mpls: Return error for RTA_GATEWAY attribute ipv6: Return error for RTA_VIA attribute ipv4: Return error for RTA_VIA attribute net: avoid use IPCB in cipso_v4_error net: Add __icmp_send helper. tun: remove unnecessary memory barrier xen-netback: fix occasional leak of grant ref mappings under memory pressure xen-netback: don't populate the hash cache on XenBus disconnect tun: fix blocking read tipc: fix race condition causing hung sendto net: socket: set sock->sk to NULL after calling proto_ops::release() net: sit: fix memory leak in sit_init_net() net: phy: phylink: fix uninitialized variable in phylink_get_mac_state net: phy: Micrel KSZ8061: link failure after cable connect net: nfc: Fix NULL dereference on nfc_llcp_build_tlv fails net: netem: fix skb length BUG_ON in __skb_to_sgvec netlabel: fix out-of-bounds memory accesses net: dsa: mv88e6xxx: Fix u64 statistics net: dsa: mv88e6xxx: Fix statistics on mv88e6161 lan743x: Fix TX Stall Issue ipv4: Add ICMPv6 support when parse route ipproto hv_netvsc: Fix IP header checksum for coalesced packets geneve: correctly handle ipv6.disable module parameter bnxt_en: Drop oversize TX packets to prevent errors. tipc: fix RDM/DGRAM connect() regression team: Free BPF filter when unregistering netdev sky2: Disable MSI on Dell Inspiron 1545 and Gateway P-79 sctp: call iov_iter_revert() after sending ABORT qmi_wwan: Add support for Quectel EG12/EM12 net-sysfs: Fix mem leak in netdev_register_kobject net: sched: put back q.qlen into a single location net: dsa: mv8e6xxx: fix number of internal PHYs for 88E6x90 family net: dsa: mv88e6xxx: handle unknown duplex modes gracefully in mv88e6xxx_port_set_duplex ip6mr: Do not call __IP6_INC_STATS() from preemptible context staging: android: ashmem: Avoid range_alloc() allocation with ashmem_mutex held. staging: android: ashmem: Don't call fallocate() with ashmem_mutex held. staging: android: ion: fix sys heap pool's gfp_flags staging: wilc1000: fix to set correct value for 'vif_num' staging: comedi: ni_660x: fix missing break in switch statement staging: erofs: compressed_pages should not be accessed again after freed staging: erofs: fix illegal address access under memory pressure USB: serial: ftdi_sio: add ID for Hjelmslund Electronics USB485 USB: serial: cp210x: add ID for Ingenico 3070 USB: serial: option: add Telit ME910 ECM composition staging: erofs: fix mis-acted TAIL merging behavior cpufreq: Use struct kobj_attribute instead of struct global_attr ANDROID: cuttlefish: enable CONFIG_INET_UDP_DIAG=y ANDROID: PM / EM: Document the support for legacy (deprecated) EM UPSTREAM: sched/doc: Document Energy Aware Scheduling UPSTREAM: PM/EM: Document the Energy Model framework ANDROID: cuttlefish: enable CONFIG_USB_RTL8152=y ANDROID: cuttlefish_defconfig: Add support for AC97 audio ANDROID: cpufreq: times: optimize proc files ANDROID: cpufreq: times: record fast switch frequency transitions ANDROID: cpufreq: times: add /proc/uid_concurrent_{active,policy}_time ANDROID: cuttlefish_defconfig: Enable CONFIG_CPU_FREQ_TIMES ANDROID: cpufreq: Add time_in_state to /proc/uid directories ANDROID: proc: Add /proc/uid directory ANDROID: cpufreq: times: track per-uid time in state ANDROID: cpufreq: track per-task time in state ANDROID: cuttlefish: enable CONFIG_NETFILTER_XT_TARGET_CT=y ANDROID: overlayfs: override_creds=off option bypass creator_cred Conflicts: drivers/cpufreq/cpufreq.c drivers/cpufreq/cpufreq_times.c drivers/staging/android/ion/ion_system_heap.c fs/proc/Kconfig include/linux/cpufreq_times.h Change-Id: If347563ae5f040a43c74e1138d412738928dcc01 Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
commit
e0d3bea1a7
16
Documentation/ABI/testing/procfs-concurrent_time
Normal file
16
Documentation/ABI/testing/procfs-concurrent_time
Normal file
@ -0,0 +1,16 @@
|
||||
What: /proc/uid_concurrent_active_time
|
||||
Date: December 2018
|
||||
Contact: Connor O'Brien <connoro@google.com>
|
||||
Description:
|
||||
The /proc/uid_concurrent_active_time file displays aggregated cputime
|
||||
numbers for each uid, broken down by the total number of cores that were
|
||||
active while the uid's task was running.
|
||||
|
||||
What: /proc/uid_concurrent_policy_time
|
||||
Date: December 2018
|
||||
Contact: Connor O'Brien <connoro@google.com>
|
||||
Description:
|
||||
The /proc/uid_concurrent_policy_time file displays aggregated cputime
|
||||
numbers for each uid, broken down based on the cpufreq policy
|
||||
of the core used by the uid's task and the number of cores associated
|
||||
with that policy that were active while the uid's task was running.
|
@ -102,6 +102,29 @@ Only the lists of names from directories are merged. Other content
|
||||
such as metadata and extended attributes are reported for the upper
|
||||
directory only. These attributes of the lower directory are hidden.
|
||||
|
||||
credentials
|
||||
-----------
|
||||
|
||||
By default, all access to the upper, lower and work directories is the
|
||||
recorded mounter's MAC and DAC credentials. The incoming accesses are
|
||||
checked against the caller's credentials.
|
||||
|
||||
In the case where caller MAC or DAC credentials do not overlap, a
|
||||
use case available in older versions of the driver, the
|
||||
override_creds mount flag can be turned off and help when the use
|
||||
pattern has caller with legitimate credentials where the mounter
|
||||
does not. Several unintended side effects will occur though. The
|
||||
caller without certain key capabilities or lower privilege will not
|
||||
always be able to delete files or directories, create nodes, or
|
||||
search some restricted directories. The ability to search and read
|
||||
a directory entry is spotty as a result of the cache mechanism not
|
||||
retesting the credentials because of the assumption, a privileged
|
||||
caller can fill cache, then a lower privilege can read the directory
|
||||
cache. The uneven security model where cache, upperdir and workdir
|
||||
are opened at privilege, but accessed without creating a form of
|
||||
privilege escalation, should only be used with strict understanding
|
||||
of the side effects and of the security policies.
|
||||
|
||||
whiteouts and opaque directories
|
||||
--------------------------------
|
||||
|
||||
|
169
Documentation/power/energy-model.txt
Normal file
169
Documentation/power/energy-model.txt
Normal file
@ -0,0 +1,169 @@
|
||||
====================
|
||||
Energy Model of CPUs
|
||||
====================
|
||||
|
||||
1. Overview
|
||||
-----------
|
||||
|
||||
The Energy Model (EM) framework serves as an interface between drivers knowing
|
||||
the power consumed by CPUs at various performance levels, and the kernel
|
||||
subsystems willing to use that information to make energy-aware decisions.
|
||||
|
||||
The source of the information about the power consumed by CPUs can vary greatly
|
||||
from one platform to another. These power costs can be estimated using
|
||||
devicetree data in some cases. In others, the firmware will know better.
|
||||
Alternatively, userspace might be best positioned. And so on. In order to avoid
|
||||
each and every client subsystem to re-implement support for each and every
|
||||
possible source of information on its own, the EM framework intervenes as an
|
||||
abstraction layer which standardizes the format of power cost tables in the
|
||||
kernel, hence enabling to avoid redundant work.
|
||||
|
||||
The figure below depicts an example of drivers (Arm-specific here, but the
|
||||
approach is applicable to any architecture) providing power costs to the EM
|
||||
framework, and interested clients reading the data from it.
|
||||
|
||||
+---------------+ +-----------------+ +---------------+
|
||||
| Thermal (IPA) | | Scheduler (EAS) | | Other |
|
||||
+---------------+ +-----------------+ +---------------+
|
||||
| | em_pd_energy() |
|
||||
| | em_cpu_get() |
|
||||
+---------+ | +---------+
|
||||
| | |
|
||||
v v v
|
||||
+---------------------+
|
||||
| Energy Model |
|
||||
| Framework |
|
||||
+---------------------+
|
||||
^ ^ ^
|
||||
| | | em_register_perf_domain()
|
||||
+----------+ | +---------+
|
||||
| | |
|
||||
+---------------+ +---------------+ +--------------+
|
||||
| cpufreq-dt | | arm_scmi | | Other |
|
||||
+---------------+ +---------------+ +--------------+
|
||||
^ ^ ^
|
||||
| | |
|
||||
+--------------+ +---------------+ +--------------+
|
||||
| Device Tree | | Firmware | | ? |
|
||||
+--------------+ +---------------+ +--------------+
|
||||
|
||||
The EM framework manages power cost tables per 'performance domain' in the
|
||||
system. A performance domain is a group of CPUs whose performance is scaled
|
||||
together. Performance domains generally have a 1-to-1 mapping with CPUFreq
|
||||
policies. All CPUs in a performance domain are required to have the same
|
||||
micro-architecture. CPUs in different performance domains can have different
|
||||
micro-architectures.
|
||||
|
||||
|
||||
2. Core APIs
|
||||
------------
|
||||
|
||||
2.1 Config options
|
||||
|
||||
CONFIG_ENERGY_MODEL must be enabled to use the EM framework.
|
||||
|
||||
|
||||
2.2 Registration of performance domains
|
||||
|
||||
Drivers are expected to register performance domains into the EM framework by
|
||||
calling the following API:
|
||||
|
||||
int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
|
||||
struct em_data_callback *cb);
|
||||
|
||||
Drivers must specify the CPUs of the performance domains using the cpumask
|
||||
argument, and provide a callback function returning <frequency, power> tuples
|
||||
for each capacity state. The callback function provided by the driver is free
|
||||
to fetch data from any relevant location (DT, firmware, ...), and by any mean
|
||||
deemed necessary. See Section 3. for an example of driver implementing this
|
||||
callback, and kernel/power/energy_model.c for further documentation on this
|
||||
API.
|
||||
|
||||
|
||||
2.3 Accessing performance domains
|
||||
|
||||
Subsystems interested in the energy model of a CPU can retrieve it using the
|
||||
em_cpu_get() API. The energy model tables are allocated once upon creation of
|
||||
the performance domains, and kept in memory untouched.
|
||||
|
||||
The energy consumed by a performance domain can be estimated using the
|
||||
em_pd_energy() API. The estimation is performed assuming that the schedutil
|
||||
CPUfreq governor is in use.
|
||||
|
||||
More details about the above APIs can be found in include/linux/energy_model.h.
|
||||
|
||||
|
||||
3. Example driver
|
||||
-----------------
|
||||
|
||||
This section provides a simple example of a CPUFreq driver registering a
|
||||
performance domain in the Energy Model framework using the (fake) 'foo'
|
||||
protocol. The driver implements an est_power() function to be provided to the
|
||||
EM framework.
|
||||
|
||||
-> drivers/cpufreq/foo_cpufreq.c
|
||||
|
||||
01 static int est_power(unsigned long *mW, unsigned long *KHz, int cpu)
|
||||
02 {
|
||||
03 long freq, power;
|
||||
04
|
||||
05 /* Use the 'foo' protocol to ceil the frequency */
|
||||
06 freq = foo_get_freq_ceil(cpu, *KHz);
|
||||
07 if (freq < 0);
|
||||
08 return freq;
|
||||
09
|
||||
10 /* Estimate the power cost for the CPU at the relevant freq. */
|
||||
11 power = foo_estimate_power(cpu, freq);
|
||||
12 if (power < 0);
|
||||
13 return power;
|
||||
14
|
||||
15 /* Return the values to the EM framework */
|
||||
16 *mW = power;
|
||||
17 *KHz = freq;
|
||||
18
|
||||
19 return 0;
|
||||
20 }
|
||||
21
|
||||
22 static int foo_cpufreq_init(struct cpufreq_policy *policy)
|
||||
23 {
|
||||
24 struct em_data_callback em_cb = EM_DATA_CB(est_power);
|
||||
25 int nr_opp, ret;
|
||||
26
|
||||
27 /* Do the actual CPUFreq init work ... */
|
||||
28 ret = do_foo_cpufreq_init(policy);
|
||||
29 if (ret)
|
||||
30 return ret;
|
||||
31
|
||||
32 /* Find the number of OPPs for this policy */
|
||||
33 nr_opp = foo_get_nr_opp(policy);
|
||||
34
|
||||
35 /* And register the new performance domain */
|
||||
36 em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
|
||||
37
|
||||
38 return 0;
|
||||
39 }
|
||||
|
||||
|
||||
4. Support for legacy Energy Models (DEPRECATED)
|
||||
------------------------------------------------
|
||||
|
||||
The Android kernel version 4.14 and before used a different type of EM for EAS,
|
||||
referred to as the 'legacy' EM. The legacy EM relies on the out-of-tree
|
||||
'sched-energy-costs' devicetree bindings to provide the kernel with power costs.
|
||||
The usage of such bindings in Android has now been DEPRECATED in favour of the
|
||||
mainline equivalents.
|
||||
|
||||
The currently supported alternatives to populate the EM include:
|
||||
- using a firmware-based solution such as Arm SCMI (supported in
|
||||
drivers/cpufreq/scmi-cpufreq.c);
|
||||
- using the 'dynamic-power-coefficient' devicetree binding together with
|
||||
PM_OPP. See the of_dev_pm_opp_get_cpu_power() helper in PM_OPP, and the
|
||||
reference implementation in drivers/cpufreq/cpufreq-dt.c.
|
||||
|
||||
In order to ease the transition to the new EM format, Android 4.19 also provides
|
||||
a compatibility driver able to load a legacy EM from DT into the EM framework.
|
||||
*** Please note that THIS FEATURE WILL NOT BE AVAILABLE in future Android
|
||||
kernels, and as such it must be considered only as a temporary workaround. ***
|
||||
|
||||
If you know what you're doing and still want to use this driver, you need to set
|
||||
CONFIG_LEGACY_ENERGY_MODEL_DT=y in your kernel configuration to enable it.
|
425
Documentation/scheduler/sched-energy.txt
Normal file
425
Documentation/scheduler/sched-energy.txt
Normal file
@ -0,0 +1,425 @@
|
||||
=======================
|
||||
Energy Aware Scheduling
|
||||
=======================
|
||||
|
||||
1. Introduction
|
||||
---------------
|
||||
|
||||
Energy Aware Scheduling (or EAS) gives the scheduler the ability to predict
|
||||
the impact of its decisions on the energy consumed by CPUs. EAS relies on an
|
||||
Energy Model (EM) of the CPUs to select an energy efficient CPU for each task,
|
||||
with a minimal impact on throughput. This document aims at providing an
|
||||
introduction on how EAS works, what are the main design decisions behind it, and
|
||||
details what is needed to get it to run.
|
||||
|
||||
Before going any further, please note that at the time of writing:
|
||||
|
||||
/!\ EAS does not support platforms with symmetric CPU topologies /!\
|
||||
|
||||
EAS operates only on heterogeneous CPU topologies (such as Arm big.LITTLE)
|
||||
because this is where the potential for saving energy through scheduling is
|
||||
the highest.
|
||||
|
||||
The actual EM used by EAS is _not_ maintained by the scheduler, but by a
|
||||
dedicated framework. For details about this framework and what it provides,
|
||||
please refer to its documentation (see Documentation/power/energy-model.txt).
|
||||
|
||||
|
||||
2. Background and Terminology
|
||||
-----------------------------
|
||||
|
||||
To make it clear from the start:
|
||||
- energy = [joule] (resource like a battery on powered devices)
|
||||
- power = energy/time = [joule/second] = [watt]
|
||||
|
||||
The goal of EAS is to minimize energy, while still getting the job done. That
|
||||
is, we want to maximize:
|
||||
|
||||
performance [inst/s]
|
||||
--------------------
|
||||
power [W]
|
||||
|
||||
which is equivalent to minimizing:
|
||||
|
||||
energy [J]
|
||||
-----------
|
||||
instruction
|
||||
|
||||
while still getting 'good' performance. It is essentially an alternative
|
||||
optimization objective to the current performance-only objective for the
|
||||
scheduler. This alternative considers two objectives: energy-efficiency and
|
||||
performance.
|
||||
|
||||
The idea behind introducing an EM is to allow the scheduler to evaluate the
|
||||
implications of its decisions rather than blindly applying energy-saving
|
||||
techniques that may have positive effects only on some platforms. At the same
|
||||
time, the EM must be as simple as possible to minimize the scheduler latency
|
||||
impact.
|
||||
|
||||
In short, EAS changes the way CFS tasks are assigned to CPUs. When it is time
|
||||
for the scheduler to decide where a task should run (during wake-up), the EM
|
||||
is used to break the tie between several good CPU candidates and pick the one
|
||||
that is predicted to yield the best energy consumption without harming the
|
||||
system's throughput. The predictions made by EAS rely on specific elements of
|
||||
knowledge about the platform's topology, which include the 'capacity' of CPUs,
|
||||
and their respective energy costs.
|
||||
|
||||
|
||||
3. Topology information
|
||||
-----------------------
|
||||
|
||||
EAS (as well as the rest of the scheduler) uses the notion of 'capacity' to
|
||||
differentiate CPUs with different computing throughput. The 'capacity' of a CPU
|
||||
represents the amount of work it can absorb when running at its highest
|
||||
frequency compared to the most capable CPU of the system. Capacity values are
|
||||
normalized in a 1024 range, and are comparable with the utilization signals of
|
||||
tasks and CPUs computed by the Per-Entity Load Tracking (PELT) mechanism. Thanks
|
||||
to capacity and utilization values, EAS is able to estimate how big/busy a
|
||||
task/CPU is, and to take this into consideration when evaluating performance vs
|
||||
energy trade-offs. The capacity of CPUs is provided via arch-specific code
|
||||
through the arch_scale_cpu_capacity() callback.
|
||||
|
||||
The rest of platform knowledge used by EAS is directly read from the Energy
|
||||
Model (EM) framework. The EM of a platform is composed of a power cost table
|
||||
per 'performance domain' in the system (see Documentation/power/energy-model.txt
|
||||
for futher details about performance domains).
|
||||
|
||||
The scheduler manages references to the EM objects in the topology code when the
|
||||
scheduling domains are built, or re-built. For each root domain (rd), the
|
||||
scheduler maintains a singly linked list of all performance domains intersecting
|
||||
the current rd->span. Each node in the list contains a pointer to a struct
|
||||
em_perf_domain as provided by the EM framework.
|
||||
|
||||
The lists are attached to the root domains in order to cope with exclusive
|
||||
cpuset configurations. Since the boundaries of exclusive cpusets do not
|
||||
necessarily match those of performance domains, the lists of different root
|
||||
domains can contain duplicate elements.
|
||||
|
||||
Example 1.
|
||||
Let us consider a platform with 12 CPUs, split in 3 performance domains
|
||||
(pd0, pd4 and pd8), organized as follows:
|
||||
|
||||
CPUs: 0 1 2 3 4 5 6 7 8 9 10 11
|
||||
PDs: |--pd0--|--pd4--|---pd8---|
|
||||
RDs: |----rd1----|-----rd2-----|
|
||||
|
||||
Now, consider that userspace decided to split the system with two
|
||||
exclusive cpusets, hence creating two independent root domains, each
|
||||
containing 6 CPUs. The two root domains are denoted rd1 and rd2 in the
|
||||
above figure. Since pd4 intersects with both rd1 and rd2, it will be
|
||||
present in the linked list '->pd' attached to each of them:
|
||||
* rd1->pd: pd0 -> pd4
|
||||
* rd2->pd: pd4 -> pd8
|
||||
|
||||
Please note that the scheduler will create two duplicate list nodes for
|
||||
pd4 (one for each list). However, both just hold a pointer to the same
|
||||
shared data structure of the EM framework.
|
||||
|
||||
Since the access to these lists can happen concurrently with hotplug and other
|
||||
things, they are protected by RCU, like the rest of topology structures
|
||||
manipulated by the scheduler.
|
||||
|
||||
EAS also maintains a static key (sched_energy_present) which is enabled when at
|
||||
least one root domain meets all conditions for EAS to start. Those conditions
|
||||
are summarized in Section 6.
|
||||
|
||||
|
||||
4. Energy-Aware task placement
|
||||
------------------------------
|
||||
|
||||
EAS overrides the CFS task wake-up balancing code. It uses the EM of the
|
||||
platform and the PELT signals to choose an energy-efficient target CPU during
|
||||
wake-up balance. When EAS is enabled, select_task_rq_fair() calls
|
||||
find_energy_efficient_cpu() to do the placement decision. This function looks
|
||||
for the CPU with the highest spare capacity (CPU capacity - CPU utilization) in
|
||||
each performance domain since it is the one which will allow us to keep the
|
||||
frequency the lowest. Then, the function checks if placing the task there could
|
||||
save energy compared to leaving it on prev_cpu, i.e. the CPU where the task ran
|
||||
in its previous activation.
|
||||
|
||||
find_energy_efficient_cpu() uses compute_energy() to estimate what will be the
|
||||
energy consumed by the system if the waking task was migrated. compute_energy()
|
||||
looks at the current utilization landscape of the CPUs and adjusts it to
|
||||
'simulate' the task migration. The EM framework provides the em_pd_energy() API
|
||||
which computes the expected energy consumption of each performance domain for
|
||||
the given utilization landscape.
|
||||
|
||||
An example of energy-optimized task placement decision is detailed below.
|
||||
|
||||
Example 2.
|
||||
Let us consider a (fake) platform with 2 independent performance domains
|
||||
composed of two CPUs each. CPU0 and CPU1 are little CPUs; CPU2 and CPU3
|
||||
are big.
|
||||
|
||||
The scheduler must decide where to place a task P whose util_avg = 200
|
||||
and prev_cpu = 0.
|
||||
|
||||
The current utilization landscape of the CPUs is depicted on the graph
|
||||
below. CPUs 0-3 have a util_avg of 400, 100, 600 and 500 respectively
|
||||
Each performance domain has three Operating Performance Points (OPPs).
|
||||
The CPU capacity and power cost associated with each OPP is listed in
|
||||
the Energy Model table. The util_avg of P is shown on the figures
|
||||
below as 'PP'.
|
||||
|
||||
CPU util.
|
||||
1024 - - - - - - - Energy Model
|
||||
+-----------+-------------+
|
||||
| Little | Big |
|
||||
768 ============= +-----+-----+------+------+
|
||||
| Cap | Pwr | Cap | Pwr |
|
||||
+-----+-----+------+------+
|
||||
512 =========== - ##- - - - - | 170 | 50 | 512 | 400 |
|
||||
## ## | 341 | 150 | 768 | 800 |
|
||||
341 -PP - - - - ## ## | 512 | 300 | 1024 | 1700 |
|
||||
PP ## ## +-----+-----+------+------+
|
||||
170 -## - - - - ## ##
|
||||
## ## ## ##
|
||||
------------ -------------
|
||||
CPU0 CPU1 CPU2 CPU3
|
||||
|
||||
Current OPP: ===== Other OPP: - - - util_avg (100 each): ##
|
||||
|
||||
|
||||
find_energy_efficient_cpu() will first look for the CPUs with the
|
||||
maximum spare capacity in the two performance domains. In this example,
|
||||
CPU1 and CPU3. Then it will estimate the energy of the system if P was
|
||||
placed on either of them, and check if that would save some energy
|
||||
compared to leaving P on CPU0. EAS assumes that OPPs follow utilization
|
||||
(which is coherent with the behaviour of the schedutil CPUFreq
|
||||
governor, see Section 6. for more details on this topic).
|
||||
|
||||
Case 1. P is migrated to CPU1
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
1024 - - - - - - -
|
||||
|
||||
Energy calculation:
|
||||
768 ============= * CPU0: 200 / 341 * 150 = 88
|
||||
* CPU1: 300 / 341 * 150 = 131
|
||||
* CPU2: 600 / 768 * 800 = 625
|
||||
512 - - - - - - - ##- - - - - * CPU3: 500 / 768 * 800 = 520
|
||||
## ## => total_energy = 1364
|
||||
341 =========== ## ##
|
||||
PP ## ##
|
||||
170 -## - - PP- ## ##
|
||||
## ## ## ##
|
||||
------------ -------------
|
||||
CPU0 CPU1 CPU2 CPU3
|
||||
|
||||
|
||||
Case 2. P is migrated to CPU3
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
1024 - - - - - - -
|
||||
|
||||
Energy calculation:
|
||||
768 ============= * CPU0: 200 / 341 * 150 = 88
|
||||
* CPU1: 100 / 341 * 150 = 43
|
||||
PP * CPU2: 600 / 768 * 800 = 625
|
||||
512 - - - - - - - ##- - -PP - * CPU3: 700 / 768 * 800 = 729
|
||||
## ## => total_energy = 1485
|
||||
341 =========== ## ##
|
||||
## ##
|
||||
170 -## - - - - ## ##
|
||||
## ## ## ##
|
||||
------------ -------------
|
||||
CPU0 CPU1 CPU2 CPU3
|
||||
|
||||
|
||||
Case 3. P stays on prev_cpu / CPU 0
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
1024 - - - - - - -
|
||||
|
||||
Energy calculation:
|
||||
768 ============= * CPU0: 400 / 512 * 300 = 234
|
||||
* CPU1: 100 / 512 * 300 = 58
|
||||
* CPU2: 600 / 768 * 800 = 625
|
||||
512 =========== - ##- - - - - * CPU3: 500 / 768 * 800 = 520
|
||||
## ## => total_energy = 1437
|
||||
341 -PP - - - - ## ##
|
||||
PP ## ##
|
||||
170 -## - - - - ## ##
|
||||
## ## ## ##
|
||||
------------ -------------
|
||||
CPU0 CPU1 CPU2 CPU3
|
||||
|
||||
|
||||
From these calculations, the Case 1 has the lowest total energy. So CPU 1
|
||||
is be the best candidate from an energy-efficiency standpoint.
|
||||
|
||||
Big CPUs are generally more power hungry than the little ones and are thus used
|
||||
mainly when a task doesn't fit the littles. However, little CPUs aren't always
|
||||
necessarily more energy-efficient than big CPUs. For some systems, the high OPPs
|
||||
of the little CPUs can be less energy-efficient than the lowest OPPs of the
|
||||
bigs, for example. So, if the little CPUs happen to have enough utilization at
|
||||
a specific point in time, a small task waking up at that moment could be better
|
||||
of executing on the big side in order to save energy, even though it would fit
|
||||
on the little side.
|
||||
|
||||
And even in the case where all OPPs of the big CPUs are less energy-efficient
|
||||
than those of the little, using the big CPUs for a small task might still, under
|
||||
specific conditions, save energy. Indeed, placing a task on a little CPU can
|
||||
result in raising the OPP of the entire performance domain, and that will
|
||||
increase the cost of the tasks already running there. If the waking task is
|
||||
placed on a big CPU, its own execution cost might be higher than if it was
|
||||
running on a little, but it won't impact the other tasks of the little CPUs
|
||||
which will keep running at a lower OPP. So, when considering the total energy
|
||||
consumed by CPUs, the extra cost of running that one task on a big core can be
|
||||
smaller than the cost of raising the OPP on the little CPUs for all the other
|
||||
tasks.
|
||||
|
||||
The examples above would be nearly impossible to get right in a generic way, and
|
||||
for all platforms, without knowing the cost of running at different OPPs on all
|
||||
CPUs of the system. Thanks to its EM-based design, EAS should cope with them
|
||||
correctly without too many troubles. However, in order to ensure a minimal
|
||||
impact on throughput for high-utilization scenarios, EAS also implements another
|
||||
mechanism called 'over-utilization'.
|
||||
|
||||
|
||||
5. Over-utilization
|
||||
-------------------
|
||||
|
||||
From a general standpoint, the use-cases where EAS can help the most are those
|
||||
involving a light/medium CPU utilization. Whenever long CPU-bound tasks are
|
||||
being run, they will require all of the available CPU capacity, and there isn't
|
||||
much that can be done by the scheduler to save energy without severly harming
|
||||
throughput. In order to avoid hurting performance with EAS, CPUs are flagged as
|
||||
'over-utilized' as soon as they are used at more than 80% of their compute
|
||||
capacity. As long as no CPUs are over-utilized in a root domain, load balancing
|
||||
is disabled and EAS overridess the wake-up balancing code. EAS is likely to load
|
||||
the most energy efficient CPUs of the system more than the others if that can be
|
||||
done without harming throughput. So, the load-balancer is disabled to prevent
|
||||
it from breaking the energy-efficient task placement found by EAS. It is safe to
|
||||
do so when the system isn't overutilized since being below the 80% tipping point
|
||||
implies that:
|
||||
|
||||
a. there is some idle time on all CPUs, so the utilization signals used by
|
||||
EAS are likely to accurately represent the 'size' of the various tasks
|
||||
in the system;
|
||||
b. all tasks should already be provided with enough CPU capacity,
|
||||
regardless of their nice values;
|
||||
c. since there is spare capacity all tasks must be blocking/sleeping
|
||||
regularly and balancing at wake-up is sufficient.
|
||||
|
||||
As soon as one CPU goes above the 80% tipping point, at least one of the three
|
||||
assumptions above becomes incorrect. In this scenario, the 'overutilized' flag
|
||||
is raised for the entire root domain, EAS is disabled, and the load-balancer is
|
||||
re-enabled. By doing so, the scheduler falls back onto load-based algorithms for
|
||||
wake-up and load balance under CPU-bound conditions. This provides a better
|
||||
respect of the nice values of tasks.
|
||||
|
||||
Since the notion of overutilization largely relies on detecting whether or not
|
||||
there is some idle time in the system, the CPU capacity 'stolen' by higher
|
||||
(than CFS) scheduling classes (as well as IRQ) must be taken into account. As
|
||||
such, the detection of overutilization accounts for the capacity used not only
|
||||
by CFS tasks, but also by the other scheduling classes and IRQ.
|
||||
|
||||
|
||||
6. Dependencies and requirements for EAS
|
||||
----------------------------------------
|
||||
|
||||
Energy Aware Scheduling depends on the CPUs of the system having specific
|
||||
hardware properties and on other features of the kernel being enabled. This
|
||||
section lists these dependencies and provides hints as to how they can be met.
|
||||
|
||||
|
||||
6.1 - Asymmetric CPU topology
|
||||
|
||||
As mentioned in the introduction, EAS is only supported on platforms with
|
||||
asymmetric CPU topologies for now. This requirement is checked at run-time by
|
||||
looking for the presence of the SD_ASYM_CPUCAPACITY flag when the scheduling
|
||||
domains are built.
|
||||
|
||||
The flag is set/cleared automatically by the scheduler topology code whenever
|
||||
there are CPUs with different capacities in a root domain. The capacities of
|
||||
CPUs are provided by arch-specific code through the arch_scale_cpu_capacity()
|
||||
callback. As an example, arm and arm64 share an implementation of this callback
|
||||
which uses a combination of CPUFreq data and device-tree bindings to compute the
|
||||
capacity of CPUs (see drivers/base/arch_topology.c for more details).
|
||||
|
||||
So, in order to use EAS on your platform your architecture must implement the
|
||||
arch_scale_cpu_capacity() callback, and some of the CPUs must have a lower
|
||||
capacity than others.
|
||||
|
||||
Please note that EAS is not fundamentally incompatible with SMP, but no
|
||||
significant savings on SMP platforms have been observed yet. This restriction
|
||||
could be amended in the future if proven otherwise.
|
||||
|
||||
|
||||
6.2 - Energy Model presence
|
||||
|
||||
EAS uses the EM of a platform to estimate the impact of scheduling decisions on
|
||||
energy. So, your platform must provide power cost tables to the EM framework in
|
||||
order to make EAS start. To do so, please refer to documentation of the
|
||||
independent EM framework in Documentation/power/energy-model.txt.
|
||||
|
||||
Please also note that the scheduling domains need to be re-built after the
|
||||
EM has been registered in order to start EAS.
|
||||
|
||||
|
||||
6.3 - Energy Model complexity
|
||||
|
||||
The task wake-up path is very latency-sensitive. When the EM of a platform is
|
||||
too complex (too many CPUs, too many performance domains, too many performance
|
||||
states, ...), the cost of using it in the wake-up path can become prohibitive.
|
||||
The energy-aware wake-up algorithm has a complexity of:
|
||||
|
||||
C = Nd * (Nc + Ns)
|
||||
|
||||
with: Nd the number of performance domains; Nc the number of CPUs; and Ns the
|
||||
total number of OPPs (ex: for two perf. domains with 4 OPPs each, Ns = 8).
|
||||
|
||||
A complexity check is performed at the root domain level, when scheduling
|
||||
domains are built. EAS will not start on a root domain if its C happens to be
|
||||
higher than the completely arbitrary EM_MAX_COMPLEXITY threshold (2048 at the
|
||||
time of writing).
|
||||
|
||||
If you really want to use EAS but the complexity of your platform's Energy
|
||||
Model is too high to be used with a single root domain, you're left with only
|
||||
two possible options:
|
||||
|
||||
1. split your system into separate, smaller, root domains using exclusive
|
||||
cpusets and enable EAS locally on each of them. This option has the
|
||||
benefit to work out of the box but the drawback of preventing load
|
||||
balance between root domains, which can result in an unbalanced system
|
||||
overall;
|
||||
2. submit patches to reduce the complexity of the EAS wake-up algorithm,
|
||||
hence enabling it to cope with larger EMs in reasonable time.
|
||||
|
||||
|
||||
6.4 - Schedutil governor
|
||||
|
||||
EAS tries to predict at which OPP will the CPUs be running in the close future
|
||||
in order to estimate their energy consumption. To do so, it is assumed that OPPs
|
||||
of CPUs follow their utilization.
|
||||
|
||||
Although it is very difficult to provide hard guarantees regarding the accuracy
|
||||
of this assumption in practice (because the hardware might not do what it is
|
||||
told to do, for example), schedutil as opposed to other CPUFreq governors at
|
||||
least _requests_ frequencies calculated using the utilization signals.
|
||||
Consequently, the only sane governor to use together with EAS is schedutil,
|
||||
because it is the only one providing some degree of consistency between
|
||||
frequency requests and energy predictions.
|
||||
|
||||
Using EAS with any other governor than schedutil is not supported.
|
||||
|
||||
|
||||
6.5 Scale-invariant utilization signals
|
||||
|
||||
In order to make accurate prediction across CPUs and for all performance
|
||||
states, EAS needs frequency-invariant and CPU-invariant PELT signals. These can
|
||||
be obtained using the architecture-defined arch_scale{cpu,freq}_capacity()
|
||||
callbacks.
|
||||
|
||||
Using EAS on a platform that doesn't implement these two callbacks is not
|
||||
supported.
|
||||
|
||||
|
||||
6.6 Multithreading (SMT)
|
||||
|
||||
EAS in its current form is SMT unaware and is not able to leverage
|
||||
multithreaded hardware to save energy. EAS considers threads as independent
|
||||
CPUs, which can actually be counter-productive for both performance and energy.
|
||||
|
||||
EAS on SMT is not supported.
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 27
|
||||
SUBLEVEL = 28
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
@ -58,6 +58,7 @@ CONFIG_ENERGY_MODEL=y
|
||||
CONFIG_CPU_IDLE=y
|
||||
CONFIG_ARM_CPUIDLE=y
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_CPU_FREQ_TIMES=y
|
||||
CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||||
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
|
||||
@ -124,6 +125,7 @@ CONFIG_NF_CT_NETLINK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CT=y
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
|
||||
@ -229,6 +231,7 @@ CONFIG_PPP_DEFLATE=y
|
||||
CONFIG_PPP_MPPE=y
|
||||
CONFIG_PPTP=y
|
||||
CONFIG_PPPOL2TP=y
|
||||
CONFIG_USB_RTL8152=y
|
||||
CONFIG_USB_USBNET=y
|
||||
# CONFIG_USB_NET_AX8817X is not set
|
||||
# CONFIG_USB_NET_AX88179_178A is not set
|
||||
@ -299,6 +302,12 @@ CONFIG_DRM=y
|
||||
CONFIG_DRM_VIRTIO_GPU=y
|
||||
CONFIG_SOUND=y
|
||||
CONFIG_SND=y
|
||||
CONFIG_SND_HRTIMER=y
|
||||
# CONFIG_SND_SUPPORT_OLD_API is not set
|
||||
# CONFIG_SND_VERBOSE_PROCFS is not set
|
||||
# CONFIG_SND_DRIVERS is not set
|
||||
CONFIG_SND_INTEL8X0=y
|
||||
# CONFIG_SND_USB is not set
|
||||
CONFIG_HIDRAW=y
|
||||
CONFIG_UHID=y
|
||||
CONFIG_HID_A4TECH=y
|
||||
|
@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void)
|
||||
void __init init_IRQ(void)
|
||||
{
|
||||
int i;
|
||||
unsigned int order = get_order(IRQ_STACK_SIZE);
|
||||
|
||||
for (i = 0; i < NR_IRQS; i++)
|
||||
irq_set_noprobe(i);
|
||||
@ -62,8 +63,7 @@ void __init init_IRQ(void)
|
||||
arch_init_irq();
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
|
||||
void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
|
||||
void *s = (void *)__get_free_pages(GFP_KERNEL, order);
|
||||
|
||||
irq_stack[i] = s;
|
||||
pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
|
||||
|
@ -1,5 +1,7 @@
|
||||
#include <linux/efi.h>
|
||||
#include <asm/e820/types.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/efi.h>
|
||||
#include "pgtable.h"
|
||||
#include "../string.h"
|
||||
|
||||
@ -37,9 +39,10 @@ int cmdline_find_option_bool(const char *option);
|
||||
|
||||
static unsigned long find_trampoline_placement(void)
|
||||
{
|
||||
unsigned long bios_start, ebda_start;
|
||||
unsigned long bios_start = 0, ebda_start = 0;
|
||||
unsigned long trampoline_start;
|
||||
struct boot_e820_entry *entry;
|
||||
char *signature;
|
||||
int i;
|
||||
|
||||
/*
|
||||
@ -47,8 +50,18 @@ static unsigned long find_trampoline_placement(void)
|
||||
* This code is based on reserve_bios_regions().
|
||||
*/
|
||||
|
||||
ebda_start = *(unsigned short *)0x40e << 4;
|
||||
bios_start = *(unsigned short *)0x413 << 10;
|
||||
/*
|
||||
* EFI systems may not provide legacy ROM. The memory may not be mapped
|
||||
* at all.
|
||||
*
|
||||
* Only look for values in the legacy ROM for non-EFI system.
|
||||
*/
|
||||
signature = (char *)&boot_params->efi_info.efi_loader_signature;
|
||||
if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
|
||||
strncmp(signature, EFI64_LOADER_SIGNATURE, 4)) {
|
||||
ebda_start = *(unsigned short *)0x40e << 4;
|
||||
bios_start = *(unsigned short *)0x413 << 10;
|
||||
}
|
||||
|
||||
if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
|
||||
bios_start = BIOS_START_MAX;
|
||||
|
@ -58,6 +58,7 @@ CONFIG_ACPI_PROCFS_POWER=y
|
||||
# CONFIG_ACPI_FAN is not set
|
||||
# CONFIG_ACPI_THERMAL is not set
|
||||
# CONFIG_X86_PM_TIMER is not set
|
||||
CONFIG_CPU_FREQ_TIMES=y
|
||||
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
|
||||
CONFIG_X86_ACPI_CPUFREQ=y
|
||||
CONFIG_PCI_MSI=y
|
||||
@ -96,6 +97,7 @@ CONFIG_SYN_COOKIES=y
|
||||
CONFIG_NET_IPVTI=y
|
||||
CONFIG_INET_ESP=y
|
||||
# CONFIG_INET_XFRM_MODE_BEET is not set
|
||||
CONFIG_INET_UDP_DIAG=y
|
||||
CONFIG_INET_DIAG_DESTROY=y
|
||||
CONFIG_TCP_CONG_ADVANCED=y
|
||||
# CONFIG_TCP_CONG_BIC is not set
|
||||
@ -128,6 +130,7 @@ CONFIG_NF_CT_NETLINK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CT=y
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
|
||||
@ -234,6 +237,7 @@ CONFIG_PPP=y
|
||||
CONFIG_PPP_BSDCOMP=y
|
||||
CONFIG_PPP_DEFLATE=y
|
||||
CONFIG_PPP_MPPE=y
|
||||
CONFIG_USB_RTL8152=y
|
||||
CONFIG_USB_USBNET=y
|
||||
# CONFIG_USB_NET_AX8817X is not set
|
||||
# CONFIG_USB_NET_AX88179_178A is not set
|
||||
@ -311,6 +315,12 @@ CONFIG_DRM=y
|
||||
CONFIG_DRM_VIRTIO_GPU=y
|
||||
CONFIG_SOUND=y
|
||||
CONFIG_SND=y
|
||||
CONFIG_SND_HRTIMER=y
|
||||
# CONFIG_SND_SUPPORT_OLD_API is not set
|
||||
# CONFIG_SND_VERBOSE_PROCFS is not set
|
||||
# CONFIG_SND_DRIVERS is not set
|
||||
CONFIG_SND_INTEL8X0=y
|
||||
# CONFIG_SND_USB is not set
|
||||
CONFIG_HIDRAW=y
|
||||
CONFIG_UHID=y
|
||||
CONFIG_HID_A4TECH=y
|
||||
|
@ -818,11 +818,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
||||
static void init_amd_zn(struct cpuinfo_x86 *c)
|
||||
{
|
||||
set_cpu_cap(c, X86_FEATURE_ZEN);
|
||||
/*
|
||||
* Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
|
||||
* all up to and including B1.
|
||||
*/
|
||||
if (c->x86_model <= 1 && c->x86_stepping <= 1)
|
||||
|
||||
/* Fix erratum 1076: CPB feature bit not being set in CPUID. */
|
||||
if (!cpu_has(c, X86_FEATURE_CPB))
|
||||
set_cpu_cap(c, X86_FEATURE_CPB);
|
||||
}
|
||||
|
||||
|
@ -320,8 +320,8 @@ unsigned long get_wchan(struct task_struct *p)
|
||||
|
||||
/* Stack layout: sp-4: ra, sp-3: sp' */
|
||||
|
||||
pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
|
||||
sp = *(unsigned long *)sp - 3;
|
||||
pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
|
||||
sp = SPILL_SLOT(sp, 1);
|
||||
} while (count++ < 16);
|
||||
return 0;
|
||||
}
|
||||
|
@ -544,10 +544,9 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
|
||||
hdev->bus);
|
||||
|
||||
if (!btrtl_dev->ic_info) {
|
||||
rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
|
||||
rtl_dev_info(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
|
||||
lmp_subver, hci_rev, hci_ver);
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
return btrtl_dev;
|
||||
}
|
||||
|
||||
if (btrtl_dev->ic_info->has_rom_version) {
|
||||
@ -602,6 +601,11 @@ int btrtl_download_firmware(struct hci_dev *hdev,
|
||||
* standard btusb. Once that firmware is uploaded, the subver changes
|
||||
* to a different value.
|
||||
*/
|
||||
if (!btrtl_dev->ic_info) {
|
||||
rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (btrtl_dev->ic_info->lmp_subver) {
|
||||
case RTL_ROM_LMP_8723A:
|
||||
case RTL_ROM_LMP_3499:
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <linux/wait.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <linux/uaccess.h>
|
||||
@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *file, const char __user *buf, size_t count,
|
||||
TicCard = st_loc.tic_des_from_pc; /* tic number to send */
|
||||
IndexCard = NumCard - 1;
|
||||
|
||||
if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
|
||||
if (IndexCard >= MAX_BOARD)
|
||||
return -EINVAL;
|
||||
IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
|
||||
|
||||
if (!apbs[IndexCard].RamIO)
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
unsigned char IndexCard;
|
||||
void __iomem *pmem;
|
||||
int ret = 0;
|
||||
static int warncount = 10;
|
||||
volatile unsigned char byte_reset_it;
|
||||
struct st_ram_io *adgl;
|
||||
void __user *argp = (void __user *)arg;
|
||||
@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
mutex_lock(&ac_mutex);
|
||||
IndexCard = adgl->num_card-1;
|
||||
|
||||
if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
|
||||
static int warncount = 10;
|
||||
if (warncount) {
|
||||
printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
|
||||
warncount--;
|
||||
}
|
||||
kfree(adgl);
|
||||
mutex_unlock(&ac_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (cmd != 6 && IndexCard >= MAX_BOARD)
|
||||
goto err;
|
||||
IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
|
||||
|
||||
if (cmd != 6 && !apbs[IndexCard].RamIO)
|
||||
goto err;
|
||||
|
||||
switch (cmd) {
|
||||
|
||||
@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
kfree(adgl);
|
||||
mutex_unlock(&ac_mutex);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
if (warncount) {
|
||||
pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
|
||||
(int)IndexCard + 1);
|
||||
warncount--;
|
||||
}
|
||||
kfree(adgl);
|
||||
mutex_unlock(&ac_mutex);
|
||||
return -EINVAL;
|
||||
|
||||
}
|
||||
|
||||
|
@ -358,7 +358,7 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
|
||||
}
|
||||
|
||||
cpufreq_stats_record_transition(policy, freqs->new);
|
||||
cpufreq_times_record_transition(freqs);
|
||||
cpufreq_times_record_transition(policy, freqs->new);
|
||||
policy->cur = freqs->new;
|
||||
}
|
||||
}
|
||||
@ -555,13 +555,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
|
||||
* SYSFS INTERFACE *
|
||||
*********************************************************************/
|
||||
static ssize_t show_boost(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
|
||||
}
|
||||
|
||||
static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret, enable;
|
||||
|
||||
@ -1869,9 +1869,15 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
|
||||
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
int ret;
|
||||
|
||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
||||
|
||||
return cpufreq_driver->fast_switch(policy, target_freq);
|
||||
ret = cpufreq_driver->fast_switch(policy, target_freq);
|
||||
if (ret)
|
||||
cpufreq_times_record_transition(policy, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
|
||||
|
||||
|
@ -32,11 +32,17 @@ static DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
|
||||
static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
|
||||
static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */
|
||||
|
||||
struct concurrent_times {
|
||||
atomic64_t active[NR_CPUS];
|
||||
atomic64_t policy[NR_CPUS];
|
||||
};
|
||||
|
||||
struct uid_entry {
|
||||
uid_t uid;
|
||||
unsigned int max_state;
|
||||
struct hlist_node hash;
|
||||
struct rcu_head rcu;
|
||||
struct concurrent_times *concurrent_times;
|
||||
u64 time_in_state[0];
|
||||
};
|
||||
|
||||
@ -87,6 +93,7 @@ static struct uid_entry *find_uid_entry_locked(uid_t uid)
|
||||
static struct uid_entry *find_or_register_uid_locked(uid_t uid)
|
||||
{
|
||||
struct uid_entry *uid_entry, *temp;
|
||||
struct concurrent_times *times;
|
||||
unsigned int max_state = READ_ONCE(next_offset);
|
||||
size_t alloc_size = sizeof(*uid_entry) + max_state *
|
||||
sizeof(uid_entry->time_in_state[0]);
|
||||
@ -115,9 +122,15 @@ static struct uid_entry *find_or_register_uid_locked(uid_t uid)
|
||||
uid_entry = kzalloc(alloc_size, GFP_ATOMIC);
|
||||
if (!uid_entry)
|
||||
return NULL;
|
||||
times = kzalloc(sizeof(*times), GFP_ATOMIC);
|
||||
if (!times) {
|
||||
kfree(uid_entry);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
uid_entry->uid = uid;
|
||||
uid_entry->max_state = max_state;
|
||||
uid_entry->concurrent_times = times;
|
||||
|
||||
hash_add_rcu(uid_hash_table, &uid_entry->hash, uid);
|
||||
|
||||
@ -180,10 +193,12 @@ static void *uid_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
|
||||
static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
(*pos)++;
|
||||
do {
|
||||
(*pos)++;
|
||||
|
||||
if (*pos >= HASH_SIZE(uid_hash_table))
|
||||
return NULL;
|
||||
if (*pos >= HASH_SIZE(uid_hash_table))
|
||||
return NULL;
|
||||
} while (hlist_empty(&uid_hash_table[*pos]));
|
||||
|
||||
return &uid_hash_table[*pos];
|
||||
}
|
||||
@ -207,7 +222,8 @@ static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
|
||||
if (freqs->freq_table[i] ==
|
||||
CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
seq_printf(m, " %d", freqs->freq_table[i]);
|
||||
seq_put_decimal_ull(m, " ",
|
||||
freqs->freq_table[i]);
|
||||
}
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
@ -216,13 +232,16 @@ static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
|
||||
rcu_read_lock();
|
||||
|
||||
hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
|
||||
if (uid_entry->max_state)
|
||||
seq_printf(m, "%d:", uid_entry->uid);
|
||||
if (uid_entry->max_state) {
|
||||
seq_put_decimal_ull(m, "", uid_entry->uid);
|
||||
seq_putc(m, ':');
|
||||
}
|
||||
for (i = 0; i < uid_entry->max_state; ++i) {
|
||||
u64 time;
|
||||
if (freq_index_invalid(i))
|
||||
continue;
|
||||
seq_printf(m, " %lu", (unsigned long)nsec_to_clock_t(
|
||||
uid_entry->time_in_state[i]));
|
||||
time = nsec_to_clock_t(uid_entry->time_in_state[i]);
|
||||
seq_put_decimal_ull(m, " ", time);
|
||||
}
|
||||
if (uid_entry->max_state)
|
||||
seq_putc(m, '\n');
|
||||
@ -232,6 +251,86 @@ static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int concurrent_time_seq_show(struct seq_file *m, void *v,
|
||||
atomic64_t *(*get_times)(struct concurrent_times *))
|
||||
{
|
||||
struct uid_entry *uid_entry;
|
||||
int i, num_possible_cpus = num_possible_cpus();
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
|
||||
atomic64_t *times = get_times(uid_entry->concurrent_times);
|
||||
|
||||
seq_put_decimal_ull(m, "", (u64)uid_entry->uid);
|
||||
seq_putc(m, ':');
|
||||
|
||||
for (i = 0; i < num_possible_cpus; ++i) {
|
||||
u64 time = nsec_to_clock_t(atomic64_read(×[i]));
|
||||
|
||||
seq_put_decimal_ull(m, " ", time);
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline atomic64_t *get_active_times(struct concurrent_times *times)
|
||||
{
|
||||
return times->active;
|
||||
}
|
||||
|
||||
static int concurrent_active_time_seq_show(struct seq_file *m, void *v)
|
||||
{
|
||||
if (v == uid_hash_table) {
|
||||
seq_put_decimal_ull(m, "cpus: ", num_possible_cpus());
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
return concurrent_time_seq_show(m, v, get_active_times);
|
||||
}
|
||||
|
||||
static inline atomic64_t *get_policy_times(struct concurrent_times *times)
|
||||
{
|
||||
return times->policy;
|
||||
}
|
||||
|
||||
static int concurrent_policy_time_seq_show(struct seq_file *m, void *v)
|
||||
{
|
||||
int i;
|
||||
struct cpu_freqs *freqs, *last_freqs = NULL;
|
||||
|
||||
if (v == uid_hash_table) {
|
||||
int cnt = 0;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
freqs = all_freqs[i];
|
||||
if (!freqs)
|
||||
continue;
|
||||
if (freqs != last_freqs) {
|
||||
if (last_freqs) {
|
||||
seq_put_decimal_ull(m, ": ", cnt);
|
||||
seq_putc(m, ' ');
|
||||
cnt = 0;
|
||||
}
|
||||
seq_put_decimal_ull(m, "policy", i);
|
||||
|
||||
last_freqs = freqs;
|
||||
}
|
||||
cnt++;
|
||||
}
|
||||
if (last_freqs) {
|
||||
seq_put_decimal_ull(m, ": ", cnt);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
}
|
||||
|
||||
return concurrent_time_seq_show(m, v, get_policy_times);
|
||||
}
|
||||
|
||||
void cpufreq_task_times_init(struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -326,11 +425,16 @@ void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int state;
|
||||
unsigned int active_cpu_cnt = 0;
|
||||
unsigned int policy_cpu_cnt = 0;
|
||||
unsigned int policy_first_cpu;
|
||||
struct uid_entry *uid_entry;
|
||||
struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
|
||||
struct cpufreq_policy *policy;
|
||||
uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
|
||||
int cpu = 0;
|
||||
|
||||
if (!freqs || p->flags & PF_EXITING)
|
||||
if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
|
||||
return;
|
||||
|
||||
state = freqs->offset + READ_ONCE(freqs->last_index);
|
||||
@ -346,6 +450,42 @@ void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
|
||||
if (uid_entry && state < uid_entry->max_state)
|
||||
uid_entry->time_in_state[state] += cputime;
|
||||
spin_unlock_irqrestore(&uid_lock, flags);
|
||||
|
||||
rcu_read_lock();
|
||||
uid_entry = find_uid_entry_rcu(uid);
|
||||
if (!uid_entry) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
if (!idle_cpu(cpu))
|
||||
++active_cpu_cnt;
|
||||
|
||||
atomic64_add(cputime,
|
||||
&uid_entry->concurrent_times->active[active_cpu_cnt - 1]);
|
||||
|
||||
policy = cpufreq_cpu_get(task_cpu(p));
|
||||
if (!policy) {
|
||||
/*
|
||||
* This CPU may have just come up and not have a cpufreq policy
|
||||
* yet.
|
||||
*/
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_cpu(cpu, policy->related_cpus)
|
||||
if (!idle_cpu(cpu))
|
||||
++policy_cpu_cnt;
|
||||
|
||||
policy_first_cpu = cpumask_first(policy->related_cpus);
|
||||
cpufreq_cpu_put(policy);
|
||||
|
||||
atomic64_add(cputime,
|
||||
&uid_entry->concurrent_times->policy[policy_first_cpu +
|
||||
policy_cpu_cnt - 1]);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void cpufreq_times_create_policy(struct cpufreq_policy *policy)
|
||||
@ -387,6 +527,14 @@ void cpufreq_times_create_policy(struct cpufreq_policy *policy)
|
||||
all_freqs[cpu] = freqs;
|
||||
}
|
||||
|
||||
static void uid_entry_reclaim(struct rcu_head *rcu)
|
||||
{
|
||||
struct uid_entry *uid_entry = container_of(rcu, struct uid_entry, rcu);
|
||||
|
||||
kfree(uid_entry->concurrent_times);
|
||||
kfree(uid_entry);
|
||||
}
|
||||
|
||||
void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
|
||||
{
|
||||
struct uid_entry *uid_entry;
|
||||
@ -400,7 +548,7 @@ void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
|
||||
hash, uid_start) {
|
||||
if (uid_start == uid_entry->uid) {
|
||||
hash_del_rcu(&uid_entry->hash);
|
||||
kfree_rcu(uid_entry, rcu);
|
||||
call_rcu(&uid_entry->rcu, uid_entry_reclaim);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -408,24 +556,17 @@ void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
|
||||
spin_unlock_irqrestore(&uid_lock, flags);
|
||||
}
|
||||
|
||||
void cpufreq_times_record_transition(struct cpufreq_freqs *freq)
|
||||
void cpufreq_times_record_transition(struct cpufreq_policy *policy,
|
||||
unsigned int new_freq)
|
||||
{
|
||||
int index;
|
||||
struct cpu_freqs *freqs = all_freqs[freq->cpu];
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
struct cpu_freqs *freqs = all_freqs[policy->cpu];
|
||||
if (!freqs)
|
||||
return;
|
||||
|
||||
policy = cpufreq_cpu_get(freq->cpu);
|
||||
if (!policy)
|
||||
return;
|
||||
|
||||
index = cpufreq_frequency_table_get_index(policy, freq->new);
|
||||
index = cpufreq_frequency_table_get_index(policy, new_freq);
|
||||
if (index >= 0)
|
||||
WRITE_ONCE(freqs->last_index, index);
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
|
||||
static const struct seq_operations uid_time_in_state_seq_ops = {
|
||||
@ -453,11 +594,55 @@ static const struct file_operations uid_time_in_state_fops = {
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static const struct seq_operations concurrent_active_time_seq_ops = {
|
||||
.start = uid_seq_start,
|
||||
.next = uid_seq_next,
|
||||
.stop = uid_seq_stop,
|
||||
.show = concurrent_active_time_seq_show,
|
||||
};
|
||||
|
||||
static int concurrent_active_time_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &concurrent_active_time_seq_ops);
|
||||
}
|
||||
|
||||
static const struct file_operations concurrent_active_time_fops = {
|
||||
.open = concurrent_active_time_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static const struct seq_operations concurrent_policy_time_seq_ops = {
|
||||
.start = uid_seq_start,
|
||||
.next = uid_seq_next,
|
||||
.stop = uid_seq_stop,
|
||||
.show = concurrent_policy_time_seq_show,
|
||||
};
|
||||
|
||||
static int concurrent_policy_time_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &concurrent_policy_time_seq_ops);
|
||||
}
|
||||
|
||||
static const struct file_operations concurrent_policy_time_fops = {
|
||||
.open = concurrent_policy_time_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static int __init cpufreq_times_init(void)
|
||||
{
|
||||
proc_create_data("uid_time_in_state", 0444, NULL,
|
||||
&uid_time_in_state_fops, NULL);
|
||||
|
||||
proc_create_data("uid_concurrent_active_time", 0444, NULL,
|
||||
&concurrent_active_time_fops, NULL);
|
||||
|
||||
proc_create_data("uid_concurrent_policy_time", 0444, NULL,
|
||||
&concurrent_policy_time_fops, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -833,7 +833,7 @@ static void intel_pstate_update_policies(void)
|
||||
/************************** sysfs begin ************************/
|
||||
#define show_one(file_name, object) \
|
||||
static ssize_t show_##file_name \
|
||||
(struct kobject *kobj, struct attribute *attr, char *buf) \
|
||||
(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
|
||||
{ \
|
||||
return sprintf(buf, "%u\n", global.object); \
|
||||
}
|
||||
@ -842,7 +842,7 @@ static ssize_t intel_pstate_show_status(char *buf);
|
||||
static int intel_pstate_update_status(const char *buf, size_t size);
|
||||
|
||||
static ssize_t show_status(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
@ -853,7 +853,7 @@ static ssize_t show_status(struct kobject *kobj,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t store_status(struct kobject *a, struct attribute *b,
|
||||
static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
char *p = memchr(buf, '\n', count);
|
||||
@ -867,7 +867,7 @@ static ssize_t store_status(struct kobject *a, struct attribute *b,
|
||||
}
|
||||
|
||||
static ssize_t show_turbo_pct(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct cpudata *cpu;
|
||||
int total, no_turbo, turbo_pct;
|
||||
@ -893,7 +893,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
|
||||
}
|
||||
|
||||
static ssize_t show_num_pstates(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct cpudata *cpu;
|
||||
int total;
|
||||
@ -914,7 +914,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
|
||||
}
|
||||
|
||||
static ssize_t show_no_turbo(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
@ -936,7 +936,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
|
||||
static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
unsigned int input;
|
||||
@ -983,7 +983,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
|
||||
static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
unsigned int input;
|
||||
@ -1013,7 +1013,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
|
||||
static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
unsigned int input;
|
||||
@ -1045,12 +1045,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
|
||||
}
|
||||
|
||||
static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%u\n", hwp_boost);
|
||||
}
|
||||
|
||||
static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b,
|
||||
static ssize_t store_hwp_dynamic_boost(struct kobject *a,
|
||||
struct kobj_attribute *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
unsigned int input;
|
||||
|
@ -310,30 +310,26 @@ static int sirf_probe(struct serdev_device *serdev)
|
||||
ret = -ENODEV;
|
||||
goto err_put_device;
|
||||
}
|
||||
|
||||
ret = regulator_enable(data->vcc);
|
||||
if (ret)
|
||||
goto err_put_device;
|
||||
|
||||
/* Wait for chip to boot into hibernate mode. */
|
||||
msleep(SIRF_BOOT_DELAY);
|
||||
}
|
||||
|
||||
if (data->wakeup) {
|
||||
ret = gpiod_to_irq(data->wakeup);
|
||||
if (ret < 0)
|
||||
goto err_put_device;
|
||||
|
||||
goto err_disable_vcc;
|
||||
data->irq = ret;
|
||||
|
||||
ret = devm_request_threaded_irq(dev, data->irq, NULL,
|
||||
sirf_wakeup_handler,
|
||||
ret = request_threaded_irq(data->irq, NULL, sirf_wakeup_handler,
|
||||
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
|
||||
"wakeup", data);
|
||||
if (ret)
|
||||
goto err_put_device;
|
||||
}
|
||||
|
||||
if (data->on_off) {
|
||||
ret = regulator_enable(data->vcc);
|
||||
if (ret)
|
||||
goto err_put_device;
|
||||
|
||||
/* Wait for chip to boot into hibernate mode */
|
||||
msleep(SIRF_BOOT_DELAY);
|
||||
goto err_disable_vcc;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PM)) {
|
||||
@ -342,7 +338,7 @@ static int sirf_probe(struct serdev_device *serdev)
|
||||
} else {
|
||||
ret = sirf_runtime_resume(dev);
|
||||
if (ret < 0)
|
||||
goto err_disable_vcc;
|
||||
goto err_free_irq;
|
||||
}
|
||||
|
||||
ret = gnss_register_device(gdev);
|
||||
@ -356,6 +352,9 @@ err_disable_rpm:
|
||||
pm_runtime_disable(dev);
|
||||
else
|
||||
sirf_runtime_suspend(dev);
|
||||
err_free_irq:
|
||||
if (data->wakeup)
|
||||
free_irq(data->irq, data);
|
||||
err_disable_vcc:
|
||||
if (data->on_off)
|
||||
regulator_disable(data->vcc);
|
||||
@ -376,6 +375,9 @@ static void sirf_remove(struct serdev_device *serdev)
|
||||
else
|
||||
sirf_runtime_suspend(&serdev->dev);
|
||||
|
||||
if (data->wakeup)
|
||||
free_irq(data->irq, data);
|
||||
|
||||
if (data->on_off)
|
||||
regulator_disable(data->vcc);
|
||||
|
||||
|
@ -884,7 +884,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
|
||||
default:
|
||||
return U64_MAX;
|
||||
}
|
||||
value = (((u64)high) << 16) | low;
|
||||
value = (((u64)high) << 32) | low;
|
||||
return value;
|
||||
}
|
||||
|
||||
@ -3070,7 +3070,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
|
||||
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
|
||||
.port_link_state = mv88e6352_port_link_state,
|
||||
.port_get_cmode = mv88e6185_port_get_cmode,
|
||||
.stats_snapshot = mv88e6320_g1_stats_snapshot,
|
||||
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
|
||||
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
|
||||
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
|
||||
.stats_get_strings = mv88e6095_stats_get_strings,
|
||||
@ -4188,7 +4188,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
||||
.name = "Marvell 88E6190",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.num_gpio = 16,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
@ -4211,7 +4211,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
||||
.name = "Marvell 88E6190X",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.num_gpio = 16,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
@ -4234,7 +4234,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
||||
.name = "Marvell 88E6191",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
.phy_base_addr = 0x0,
|
||||
@ -4281,7 +4281,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
||||
.name = "Marvell 88E6290",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.num_gpio = 16,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
@ -4443,7 +4443,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
||||
.name = "Marvell 88E6390",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.num_gpio = 16,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
@ -4466,7 +4466,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
||||
.name = "Marvell 88E6390X",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.num_gpio = 16,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
@ -4561,6 +4561,14 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
|
||||
chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID;
|
||||
}
|
||||
|
||||
static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
|
||||
int port)
|
||||
{
|
||||
@ -4597,6 +4605,8 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
|
||||
if (err)
|
||||
goto free;
|
||||
|
||||
mv88e6xxx_ports_cmode_init(chip);
|
||||
|
||||
mutex_lock(&chip->reg_lock);
|
||||
err = mv88e6xxx_switch_reset(chip);
|
||||
mutex_unlock(&chip->reg_lock);
|
||||
|
@ -190,7 +190,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup)
|
||||
/* normal duplex detection */
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
|
||||
@ -374,6 +374,10 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
|
||||
cmode = 0;
|
||||
}
|
||||
|
||||
/* cmode doesn't change, nothing to do for us */
|
||||
if (cmode == chip->ports[port].cmode)
|
||||
return 0;
|
||||
|
||||
lane = mv88e6390x_serdes_get_lane(chip, port);
|
||||
if (lane < 0)
|
||||
return lane;
|
||||
@ -384,7 +388,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mv88e6390_serdes_power(chip, port, false);
|
||||
err = mv88e6390x_serdes_power(chip, port, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -400,7 +404,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mv88e6390_serdes_power(chip, port, true);
|
||||
err = mv88e6390x_serdes_power(chip, port, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -52,6 +52,7 @@
|
||||
#define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005
|
||||
#define MV88E6185_PORT_STS_CMODE_PHY 0x0006
|
||||
#define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007
|
||||
#define MV88E6XXX_PORT_STS_CMODE_INVALID 0xff
|
||||
|
||||
/* Offset 0x01: MAC (or PCS or Physical) Control Register */
|
||||
#define MV88E6XXX_PORT_MAC_CTL 0x01
|
||||
|
@ -463,6 +463,12 @@ normal_tx:
|
||||
}
|
||||
|
||||
length >>= 9;
|
||||
if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
|
||||
dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
|
||||
skb->len);
|
||||
i = 0;
|
||||
goto tx_dma_error;
|
||||
}
|
||||
flags |= bnxt_lhint_arr[length];
|
||||
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
|
||||
|
||||
|
@ -46,6 +46,7 @@
|
||||
#include <linux/mii.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_net.h>
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
|
||||
@ -93,7 +94,7 @@ static int copybreak __read_mostly = 128;
|
||||
module_param(copybreak, int, 0);
|
||||
MODULE_PARM_DESC(copybreak, "Receive copy threshold");
|
||||
|
||||
static int disable_msi = 0;
|
||||
static int disable_msi = -1;
|
||||
module_param(disable_msi, int, 0);
|
||||
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
|
||||
|
||||
@ -4931,6 +4932,24 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
|
||||
return buf;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id msi_blacklist[] = {
|
||||
{
|
||||
.ident = "Dell Inspiron 1545",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Gateway P-79",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
struct net_device *dev, *dev1;
|
||||
@ -5042,6 +5061,9 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
goto err_out_free_pci;
|
||||
}
|
||||
|
||||
if (disable_msi == -1)
|
||||
disable_msi = !!dmi_check_system(msi_blacklist);
|
||||
|
||||
if (!disable_msi && pci_enable_msi(pdev) == 0) {
|
||||
err = sky2_test_msi(hw);
|
||||
if (err) {
|
||||
|
@ -1403,7 +1403,8 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx,
|
||||
}
|
||||
|
||||
static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
|
||||
unsigned int frame_length)
|
||||
unsigned int frame_length,
|
||||
int nr_frags)
|
||||
{
|
||||
/* called only from within lan743x_tx_xmit_frame.
|
||||
* assuming tx->ring_lock has already been acquired.
|
||||
@ -1413,6 +1414,10 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
|
||||
|
||||
/* wrap up previous descriptor */
|
||||
tx->frame_data0 |= TX_DESC_DATA0_EXT_;
|
||||
if (nr_frags <= 0) {
|
||||
tx->frame_data0 |= TX_DESC_DATA0_LS_;
|
||||
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
|
||||
}
|
||||
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
|
||||
tx_descriptor->data0 = tx->frame_data0;
|
||||
|
||||
@ -1517,8 +1522,11 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
|
||||
u32 tx_tail_flags = 0;
|
||||
|
||||
/* wrap up previous descriptor */
|
||||
tx->frame_data0 |= TX_DESC_DATA0_LS_;
|
||||
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
|
||||
if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
|
||||
TX_DESC_DATA0_DTYPE_DATA_) {
|
||||
tx->frame_data0 |= TX_DESC_DATA0_LS_;
|
||||
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
|
||||
}
|
||||
|
||||
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
|
||||
buffer_info = &tx->buffer_info[tx->frame_tail];
|
||||
@ -1603,7 +1611,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
|
||||
}
|
||||
|
||||
if (gso)
|
||||
lan743x_tx_frame_add_lso(tx, frame_length);
|
||||
lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
|
||||
|
||||
if (nr_frags <= 0)
|
||||
goto finish;
|
||||
|
@ -636,15 +636,20 @@ out:
|
||||
static int geneve_open(struct net_device *dev)
|
||||
{
|
||||
struct geneve_dev *geneve = netdev_priv(dev);
|
||||
bool ipv6 = !!(geneve->info.mode & IP_TUNNEL_INFO_IPV6);
|
||||
bool metadata = geneve->collect_md;
|
||||
bool ipv4, ipv6;
|
||||
int ret = 0;
|
||||
|
||||
ipv6 = geneve->info.mode & IP_TUNNEL_INFO_IPV6 || metadata;
|
||||
ipv4 = !ipv6 || metadata;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (ipv6 || metadata)
|
||||
if (ipv6) {
|
||||
ret = geneve_sock_add(geneve, true);
|
||||
if (ret < 0 && ret != -EAFNOSUPPORT)
|
||||
ipv4 = false;
|
||||
}
|
||||
#endif
|
||||
if (!ret && (!ipv6 || metadata))
|
||||
if (ipv4)
|
||||
ret = geneve_sock_add(geneve, false);
|
||||
if (ret < 0)
|
||||
geneve_sock_release(geneve);
|
||||
|
@ -743,6 +743,14 @@ void netvsc_linkstatus_callback(struct net_device *net,
|
||||
schedule_delayed_work(&ndev_ctx->dwork, 0);
|
||||
}
|
||||
|
||||
static void netvsc_comp_ipcsum(struct sk_buff *skb)
|
||||
{
|
||||
struct iphdr *iph = (struct iphdr *)skb->data;
|
||||
|
||||
iph->check = 0;
|
||||
iph->check = ip_fast_csum(iph, iph->ihl);
|
||||
}
|
||||
|
||||
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
|
||||
struct napi_struct *napi,
|
||||
const struct ndis_tcp_ip_checksum_info *csum_info,
|
||||
@ -766,9 +774,17 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
|
||||
/* skb is already created with CHECKSUM_NONE */
|
||||
skb_checksum_none_assert(skb);
|
||||
|
||||
/*
|
||||
* In Linux, the IP checksum is always checked.
|
||||
* Do L4 checksum offload if enabled and present.
|
||||
/* Incoming packets may have IP header checksum verified by the host.
|
||||
* They may not have IP header checksum computed after coalescing.
|
||||
* We compute it here if the flags are set, because on Linux, the IP
|
||||
* checksum is always checked.
|
||||
*/
|
||||
if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
|
||||
csum_info->receive.ip_checksum_succeeded &&
|
||||
skb->protocol == htons(ETH_P_IP))
|
||||
netvsc_comp_ipcsum(skb);
|
||||
|
||||
/* Do L4 checksum offload if enabled and present.
|
||||
*/
|
||||
if (csum_info && (net->features & NETIF_F_RXCSUM)) {
|
||||
if (csum_info->receive.tcp_checksum_succeeded ||
|
||||
|
@ -339,6 +339,17 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
|
||||
return genphy_config_aneg(phydev);
|
||||
}
|
||||
|
||||
static int ksz8061_config_init(struct phy_device *phydev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return kszphy_config_init(phydev);
|
||||
}
|
||||
|
||||
static int ksz9021_load_values_from_of(struct phy_device *phydev,
|
||||
const struct device_node *of_node,
|
||||
u16 reg,
|
||||
@ -934,7 +945,7 @@ static struct phy_driver ksphy_driver[] = {
|
||||
.phy_id_mask = MICREL_PHY_ID_MASK,
|
||||
.features = PHY_BASIC_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.config_init = kszphy_config_init,
|
||||
.config_init = ksz8061_config_init,
|
||||
.ack_interrupt = kszphy_ack_interrupt,
|
||||
.config_intr = kszphy_config_intr,
|
||||
.suspend = genphy_suspend,
|
||||
|
@ -348,6 +348,10 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *
|
||||
linkmode_zero(state->lp_advertising);
|
||||
state->interface = pl->link_config.interface;
|
||||
state->an_enabled = pl->link_config.an_enabled;
|
||||
state->speed = SPEED_UNKNOWN;
|
||||
state->duplex = DUPLEX_UNKNOWN;
|
||||
state->pause = MLO_PAUSE_NONE;
|
||||
state->an_complete = 0;
|
||||
state->link = 1;
|
||||
|
||||
return pl->ops->mac_link_state(ndev, state);
|
||||
|
@ -325,6 +325,20 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void lb_bpf_func_free(struct team *team)
|
||||
{
|
||||
struct lb_priv *lb_priv = get_lb_priv(team);
|
||||
struct bpf_prog *fp;
|
||||
|
||||
if (!lb_priv->ex->orig_fprog)
|
||||
return;
|
||||
|
||||
__fprog_destroy(lb_priv->ex->orig_fprog);
|
||||
fp = rcu_dereference_protected(lb_priv->fp,
|
||||
lockdep_is_held(&team->lock));
|
||||
bpf_prog_destroy(fp);
|
||||
}
|
||||
|
||||
static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
|
||||
{
|
||||
struct lb_priv *lb_priv = get_lb_priv(team);
|
||||
@ -639,6 +653,7 @@ static void lb_exit(struct team *team)
|
||||
|
||||
team_options_unregister(team, lb_options,
|
||||
ARRAY_SIZE(lb_options));
|
||||
lb_bpf_func_free(team);
|
||||
cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
|
||||
free_percpu(lb_priv->pcpu_stats);
|
||||
kfree(lb_priv->ex);
|
||||
|
@ -2126,9 +2126,9 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
|
||||
}
|
||||
|
||||
add_wait_queue(&tfile->wq.wait, &wait);
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
|
||||
while (1) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
ptr = ptr_ring_consume(&tfile->tx_ring);
|
||||
if (ptr)
|
||||
break;
|
||||
@ -2144,7 +2144,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
|
||||
schedule();
|
||||
}
|
||||
|
||||
current->state = TASK_RUNNING;
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&tfile->wq.wait, &wait);
|
||||
|
||||
out:
|
||||
|
@ -976,6 +976,13 @@ static const struct usb_device_id products[] = {
|
||||
0xff),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
|
||||
},
|
||||
{ /* Quectel EG12/EM12 */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
|
||||
USB_CLASS_VENDOR_SPEC,
|
||||
USB_SUBCLASS_VENDOR_SPEC,
|
||||
0xff),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
|
||||
},
|
||||
|
||||
/* 3. Combined interface devices matching on interface number */
|
||||
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
|
||||
@ -1343,17 +1350,20 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool quectel_ep06_diag_detected(struct usb_interface *intf)
|
||||
static bool quectel_diag_detected(struct usb_interface *intf)
|
||||
{
|
||||
struct usb_device *dev = interface_to_usbdev(intf);
|
||||
struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
|
||||
u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
|
||||
u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
|
||||
|
||||
if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
|
||||
le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
|
||||
intf_desc.bNumEndpoints == 2)
|
||||
if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
|
||||
return false;
|
||||
|
||||
if (id_product == 0x0306 || id_product == 0x0512)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static int qmi_wwan_probe(struct usb_interface *intf,
|
||||
@ -1390,13 +1400,13 @@ static int qmi_wwan_probe(struct usb_interface *intf,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
|
||||
/* Several Quectel modems supports dynamic interface configuration, so
|
||||
* we need to match on class/subclass/protocol. These values are
|
||||
* identical for the diagnostic- and QMI-interface, but bNumEndpoints is
|
||||
* different. Ignore the current interface if the number of endpoints
|
||||
* the number for the diag interface (two).
|
||||
*/
|
||||
if (quectel_ep06_diag_detected(intf))
|
||||
if (quectel_diag_detected(intf))
|
||||
return -ENODEV;
|
||||
|
||||
return usbnet_probe(intf, id);
|
||||
|
@ -454,6 +454,8 @@ void xenvif_init_hash(struct xenvif *vif)
|
||||
if (xenvif_hash_cache_size == 0)
|
||||
return;
|
||||
|
||||
BUG_ON(vif->hash.cache.count);
|
||||
|
||||
spin_lock_init(&vif->hash.cache.lock);
|
||||
INIT_LIST_HEAD(&vif->hash.cache.list);
|
||||
}
|
||||
|
@ -153,6 +153,13 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
{
|
||||
struct xenvif *vif = netdev_priv(dev);
|
||||
unsigned int size = vif->hash.size;
|
||||
unsigned int num_queues;
|
||||
|
||||
/* If queues are not set up internally - always return 0
|
||||
* as the packet going to be dropped anyway */
|
||||
num_queues = READ_ONCE(vif->num_queues);
|
||||
if (num_queues < 1)
|
||||
return 0;
|
||||
|
||||
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
|
||||
return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
|
||||
|
@ -1072,11 +1072,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
|
||||
skb_frag_size_set(&frags[i], len);
|
||||
}
|
||||
|
||||
/* Copied all the bits from the frag list -- free it. */
|
||||
skb_frag_list_init(skb);
|
||||
xenvif_skb_zerocopy_prepare(queue, nskb);
|
||||
kfree_skb(nskb);
|
||||
|
||||
/* Release all the original (foreign) frags. */
|
||||
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
|
||||
skb_frag_unref(skb, f);
|
||||
@ -1145,6 +1140,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
|
||||
xenvif_fill_frags(queue, skb);
|
||||
|
||||
if (unlikely(skb_has_frag_list(skb))) {
|
||||
struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
|
||||
xenvif_skb_zerocopy_prepare(queue, nskb);
|
||||
if (xenvif_handle_frag_list(queue, skb)) {
|
||||
if (net_ratelimit())
|
||||
netdev_err(queue->vif->dev,
|
||||
@ -1153,6 +1150,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
|
||||
kfree_skb(skb);
|
||||
continue;
|
||||
}
|
||||
/* Copied all the bits from the frag list -- free it. */
|
||||
skb_frag_list_init(skb);
|
||||
kfree_skb(nskb);
|
||||
}
|
||||
|
||||
skb->dev = queue->vif->dev;
|
||||
|
@ -761,6 +761,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
|
||||
set_host_byte(cmd, DID_OK);
|
||||
return BLK_STS_TARGET;
|
||||
case DID_NEXUS_FAILURE:
|
||||
set_host_byte(cmd, DID_OK);
|
||||
return BLK_STS_NEXUS;
|
||||
case DID_ALLOC_FAILURE:
|
||||
set_host_byte(cmd, DID_OK);
|
||||
|
@ -75,6 +75,9 @@ struct ashmem_range {
|
||||
/* LRU list of unpinned pages, protected by ashmem_mutex */
|
||||
static LIST_HEAD(ashmem_lru_list);
|
||||
|
||||
static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
|
||||
|
||||
/*
|
||||
* long lru_count - The count of pages on our LRU list.
|
||||
*
|
||||
@ -168,19 +171,15 @@ static inline void lru_del(struct ashmem_range *range)
|
||||
* @end: The ending page (inclusive)
|
||||
*
|
||||
* This function is protected by ashmem_mutex.
|
||||
*
|
||||
* Return: 0 if successful, or -ENOMEM if there is an error
|
||||
*/
|
||||
static int range_alloc(struct ashmem_area *asma,
|
||||
struct ashmem_range *prev_range, unsigned int purged,
|
||||
size_t start, size_t end)
|
||||
static void range_alloc(struct ashmem_area *asma,
|
||||
struct ashmem_range *prev_range, unsigned int purged,
|
||||
size_t start, size_t end,
|
||||
struct ashmem_range **new_range)
|
||||
{
|
||||
struct ashmem_range *range;
|
||||
|
||||
range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
|
||||
if (!range)
|
||||
return -ENOMEM;
|
||||
struct ashmem_range *range = *new_range;
|
||||
|
||||
*new_range = NULL;
|
||||
range->asma = asma;
|
||||
range->pgstart = start;
|
||||
range->pgend = end;
|
||||
@ -190,8 +189,6 @@ static int range_alloc(struct ashmem_area *asma,
|
||||
|
||||
if (range_on_lru(range))
|
||||
lru_add(range);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -438,7 +435,6 @@ out:
|
||||
static unsigned long
|
||||
ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
struct ashmem_range *range, *next;
|
||||
unsigned long freed = 0;
|
||||
|
||||
/* We might recurse into filesystem code, so bail out if necessary */
|
||||
@ -448,21 +444,33 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||
if (!mutex_trylock(&ashmem_mutex))
|
||||
return -1;
|
||||
|
||||
list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
|
||||
while (!list_empty(&ashmem_lru_list)) {
|
||||
struct ashmem_range *range =
|
||||
list_first_entry(&ashmem_lru_list, typeof(*range), lru);
|
||||
loff_t start = range->pgstart * PAGE_SIZE;
|
||||
loff_t end = (range->pgend + 1) * PAGE_SIZE;
|
||||
struct file *f = range->asma->file;
|
||||
|
||||
range->asma->file->f_op->fallocate(range->asma->file,
|
||||
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
|
||||
start, end - start);
|
||||
get_file(f);
|
||||
atomic_inc(&ashmem_shrink_inflight);
|
||||
range->purged = ASHMEM_WAS_PURGED;
|
||||
lru_del(range);
|
||||
|
||||
freed += range_size(range);
|
||||
mutex_unlock(&ashmem_mutex);
|
||||
f->f_op->fallocate(f,
|
||||
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
|
||||
start, end - start);
|
||||
fput(f);
|
||||
if (atomic_dec_and_test(&ashmem_shrink_inflight))
|
||||
wake_up_all(&ashmem_shrink_wait);
|
||||
if (!mutex_trylock(&ashmem_mutex))
|
||||
goto out;
|
||||
if (--sc->nr_to_scan <= 0)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&ashmem_mutex);
|
||||
out:
|
||||
return freed;
|
||||
}
|
||||
|
||||
@ -582,7 +590,8 @@ static int get_name(struct ashmem_area *asma, void __user *name)
|
||||
*
|
||||
* Caller must hold ashmem_mutex.
|
||||
*/
|
||||
static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
|
||||
static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
|
||||
struct ashmem_range **new_range)
|
||||
{
|
||||
struct ashmem_range *range, *next;
|
||||
int ret = ASHMEM_NOT_PURGED;
|
||||
@ -635,7 +644,7 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
|
||||
* second half and adjust the first chunk's endpoint.
|
||||
*/
|
||||
range_alloc(asma, range, range->purged,
|
||||
pgend + 1, range->pgend);
|
||||
pgend + 1, range->pgend, new_range);
|
||||
range_shrink(range, range->pgstart, pgstart - 1);
|
||||
break;
|
||||
}
|
||||
@ -649,7 +658,8 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
|
||||
*
|
||||
* Caller must hold ashmem_mutex.
|
||||
*/
|
||||
static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
|
||||
static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
|
||||
struct ashmem_range **new_range)
|
||||
{
|
||||
struct ashmem_range *range, *next;
|
||||
unsigned int purged = ASHMEM_NOT_PURGED;
|
||||
@ -675,7 +685,8 @@ restart:
|
||||
}
|
||||
}
|
||||
|
||||
return range_alloc(asma, range, purged, pgstart, pgend);
|
||||
range_alloc(asma, range, purged, pgstart, pgend, new_range);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -708,11 +719,19 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
|
||||
struct ashmem_pin pin;
|
||||
size_t pgstart, pgend;
|
||||
int ret = -EINVAL;
|
||||
struct ashmem_range *range = NULL;
|
||||
|
||||
if (copy_from_user(&pin, p, sizeof(pin)))
|
||||
return -EFAULT;
|
||||
|
||||
if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
|
||||
range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
|
||||
if (!range)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mutex_lock(&ashmem_mutex);
|
||||
wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
|
||||
|
||||
if (!asma->file)
|
||||
goto out_unlock;
|
||||
@ -735,10 +754,10 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
|
||||
|
||||
switch (cmd) {
|
||||
case ASHMEM_PIN:
|
||||
ret = ashmem_pin(asma, pgstart, pgend);
|
||||
ret = ashmem_pin(asma, pgstart, pgend, &range);
|
||||
break;
|
||||
case ASHMEM_UNPIN:
|
||||
ret = ashmem_unpin(asma, pgstart, pgend);
|
||||
ret = ashmem_unpin(asma, pgstart, pgend, &range);
|
||||
break;
|
||||
case ASHMEM_GET_PIN_STATUS:
|
||||
ret = ashmem_get_pin_status(asma, pgstart, pgend);
|
||||
@ -747,6 +766,8 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&ashmem_mutex);
|
||||
if (range)
|
||||
kmem_cache_free(ashmem_range_cachep, range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -613,6 +613,7 @@ static int ion_system_heap_create_pools(struct ion_page_pool **pools,
|
||||
bool cached)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
struct ion_page_pool *pool;
|
||||
gfp_t gfp_flags = low_order_gfp_flags;
|
||||
|
@ -602,6 +602,7 @@ static int ni_660x_set_pfi_routing(struct comedi_device *dev,
|
||||
case NI_660X_PFI_OUTPUT_DIO:
|
||||
if (chan > 31)
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -57,15 +57,30 @@ enum z_erofs_vle_work_role {
|
||||
Z_EROFS_VLE_WORK_SECONDARY,
|
||||
Z_EROFS_VLE_WORK_PRIMARY,
|
||||
/*
|
||||
* The current work has at least been linked with the following
|
||||
* processed chained works, which means if the processing page
|
||||
* is the tail partial page of the work, the current work can
|
||||
* safely use the whole page, as illustrated below:
|
||||
* +--------------+-------------------------------------------+
|
||||
* | tail page | head page (of the previous work) |
|
||||
* +--------------+-------------------------------------------+
|
||||
* /\ which belongs to the current work
|
||||
* [ (*) this page can be used for the current work itself. ]
|
||||
* The current work was the tail of an exist chain, and the previous
|
||||
* processed chained works are all decided to be hooked up to it.
|
||||
* A new chain should be created for the remaining unprocessed works,
|
||||
* therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
|
||||
* the next work cannot reuse the whole page in the following scenario:
|
||||
* ________________________________________________________________
|
||||
* | tail (partial) page | head (partial) page |
|
||||
* | (belongs to the next work) | (belongs to the current work) |
|
||||
* |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
|
||||
*/
|
||||
Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
|
||||
/*
|
||||
* The current work has been linked with the processed chained works,
|
||||
* and could be also linked with the potential remaining works, which
|
||||
* means if the processing page is the tail partial page of the work,
|
||||
* the current work can safely use the whole page (since the next work
|
||||
* is under control) for in-place decompression, as illustrated below:
|
||||
* ________________________________________________________________
|
||||
* | tail (partial) page | head (partial) page |
|
||||
* | (of the current work) | (of the previous work) |
|
||||
* | PRIMARY_FOLLOWED or | |
|
||||
* |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
|
||||
*
|
||||
* [ (*) the above page can be used for the current work itself. ]
|
||||
*/
|
||||
Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
|
||||
Z_EROFS_VLE_WORK_MAX
|
||||
@ -234,10 +249,10 @@ static int z_erofs_vle_work_add_page(
|
||||
return ret ? 0 : -EAGAIN;
|
||||
}
|
||||
|
||||
static inline bool try_to_claim_workgroup(
|
||||
struct z_erofs_vle_workgroup *grp,
|
||||
z_erofs_vle_owned_workgrp_t *owned_head,
|
||||
bool *hosted)
|
||||
static enum z_erofs_vle_work_role
|
||||
try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
|
||||
z_erofs_vle_owned_workgrp_t *owned_head,
|
||||
bool *hosted)
|
||||
{
|
||||
DBG_BUGON(*hosted == true);
|
||||
|
||||
@ -251,6 +266,9 @@ retry:
|
||||
|
||||
*owned_head = grp;
|
||||
*hosted = true;
|
||||
/* lucky, I am the followee :) */
|
||||
return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
|
||||
|
||||
} else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
|
||||
/*
|
||||
* type 2, link to the end of a existing open chain,
|
||||
@ -260,12 +278,11 @@ retry:
|
||||
if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next,
|
||||
Z_EROFS_VLE_WORKGRP_TAIL, *owned_head))
|
||||
goto retry;
|
||||
|
||||
*owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
|
||||
} else
|
||||
return false; /* :( better luck next time */
|
||||
return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
|
||||
}
|
||||
|
||||
return true; /* lucky, I am the followee :) */
|
||||
return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
|
||||
}
|
||||
|
||||
static struct z_erofs_vle_work *
|
||||
@ -337,12 +354,8 @@ z_erofs_vle_work_lookup(struct super_block *sb,
|
||||
*hosted = false;
|
||||
if (!primary)
|
||||
*role = Z_EROFS_VLE_WORK_SECONDARY;
|
||||
/* claim the workgroup if possible */
|
||||
else if (try_to_claim_workgroup(grp, owned_head, hosted))
|
||||
*role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
|
||||
else
|
||||
*role = Z_EROFS_VLE_WORK_PRIMARY;
|
||||
|
||||
else /* claim the workgroup if possible */
|
||||
*role = try_to_claim_workgroup(grp, owned_head, hosted);
|
||||
return work;
|
||||
}
|
||||
|
||||
@ -419,6 +432,9 @@ static inline void __update_workgrp_llen(struct z_erofs_vle_workgroup *grp,
|
||||
}
|
||||
}
|
||||
|
||||
#define builder_is_hooked(builder) \
|
||||
((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
|
||||
|
||||
#define builder_is_followed(builder) \
|
||||
((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
|
||||
|
||||
@ -583,7 +599,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
|
||||
struct z_erofs_vle_work_builder *const builder = &fe->builder;
|
||||
const loff_t offset = page_offset(page);
|
||||
|
||||
bool tight = builder_is_followed(builder);
|
||||
bool tight = builder_is_hooked(builder);
|
||||
struct z_erofs_vle_work *work = builder->work;
|
||||
|
||||
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
||||
@ -606,8 +622,12 @@ repeat:
|
||||
|
||||
/* lucky, within the range of the current map_blocks */
|
||||
if (offset + cur >= map->m_la &&
|
||||
offset + cur < map->m_la + map->m_llen)
|
||||
offset + cur < map->m_la + map->m_llen) {
|
||||
/* didn't get a valid unzip work previously (very rare) */
|
||||
if (!builder->work)
|
||||
goto restart_now;
|
||||
goto hitted;
|
||||
}
|
||||
|
||||
/* go ahead the next map_blocks */
|
||||
debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
|
||||
@ -621,6 +641,7 @@ repeat:
|
||||
if (unlikely(err))
|
||||
goto err_out;
|
||||
|
||||
restart_now:
|
||||
if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
|
||||
goto hitted;
|
||||
|
||||
@ -646,7 +667,7 @@ repeat:
|
||||
builder->role = Z_EROFS_VLE_WORK_PRIMARY;
|
||||
#endif
|
||||
|
||||
tight &= builder_is_followed(builder);
|
||||
tight &= builder_is_hooked(builder);
|
||||
work = builder->work;
|
||||
hitted:
|
||||
cur = end - min_t(unsigned, offset + end - map->m_la, end);
|
||||
@ -661,6 +682,9 @@ hitted:
|
||||
(tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
|
||||
Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
|
||||
|
||||
if (cur)
|
||||
tight &= builder_is_followed(builder);
|
||||
|
||||
retry:
|
||||
err = z_erofs_vle_work_add_page(builder, page, page_type);
|
||||
/* should allocate an additional staging page for pagevec */
|
||||
@ -901,11 +925,10 @@ repeat:
|
||||
if (llen > grp->llen)
|
||||
llen = grp->llen;
|
||||
|
||||
err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
|
||||
clusterpages, pages, llen, work->pageofs,
|
||||
z_erofs_onlinepage_endio);
|
||||
err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
|
||||
pages, llen, work->pageofs);
|
||||
if (err != -ENOTSUPP)
|
||||
goto out_percpu;
|
||||
goto out;
|
||||
|
||||
if (sparsemem_pages >= nr_pages)
|
||||
goto skip_allocpage;
|
||||
@ -926,21 +949,7 @@ skip_allocpage:
|
||||
erofs_vunmap(vout, nr_pages);
|
||||
|
||||
out:
|
||||
for (i = 0; i < nr_pages; ++i) {
|
||||
page = pages[i];
|
||||
DBG_BUGON(page->mapping == NULL);
|
||||
|
||||
/* recycle all individual staging pages */
|
||||
if (z_erofs_gather_if_stagingpage(page_pool, page))
|
||||
continue;
|
||||
|
||||
if (unlikely(err < 0))
|
||||
SetPageError(page);
|
||||
|
||||
z_erofs_onlinepage_endio(page);
|
||||
}
|
||||
|
||||
out_percpu:
|
||||
/* must handle all compressed pages before endding pages */
|
||||
for (i = 0; i < clusterpages; ++i) {
|
||||
page = compressed_pages[i];
|
||||
|
||||
@ -954,6 +963,23 @@ out_percpu:
|
||||
WRITE_ONCE(compressed_pages[i], NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_pages; ++i) {
|
||||
page = pages[i];
|
||||
if (!page)
|
||||
continue;
|
||||
|
||||
DBG_BUGON(page->mapping == NULL);
|
||||
|
||||
/* recycle all individual staging pages */
|
||||
if (z_erofs_gather_if_stagingpage(page_pool, page))
|
||||
continue;
|
||||
|
||||
if (unlikely(err < 0))
|
||||
SetPageError(page);
|
||||
|
||||
z_erofs_onlinepage_endio(page);
|
||||
}
|
||||
|
||||
if (pages == z_pagemap_global)
|
||||
mutex_unlock(&z_pagemap_global_lock);
|
||||
else if (unlikely(pages != pages_onstack))
|
||||
|
@ -218,8 +218,7 @@ extern int z_erofs_vle_plain_copy(struct page **compressed_pages,
|
||||
|
||||
extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
|
||||
unsigned clusterpages, struct page **pages,
|
||||
unsigned outlen, unsigned short pageofs,
|
||||
void (*endio)(struct page *));
|
||||
unsigned int outlen, unsigned short pageofs);
|
||||
|
||||
extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
|
||||
unsigned clusterpages, void *vaddr, unsigned llen,
|
||||
|
@ -105,8 +105,7 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
|
||||
unsigned clusterpages,
|
||||
struct page **pages,
|
||||
unsigned outlen,
|
||||
unsigned short pageofs,
|
||||
void (*endio)(struct page *))
|
||||
unsigned short pageofs)
|
||||
{
|
||||
void *vin, *vout;
|
||||
unsigned nr_pages, i, j;
|
||||
@ -128,31 +127,30 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
|
||||
ret = z_erofs_unzip_lz4(vin, vout + pageofs,
|
||||
clusterpages * PAGE_SIZE, outlen);
|
||||
|
||||
if (ret >= 0) {
|
||||
outlen = ret;
|
||||
ret = 0;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ret = 0;
|
||||
|
||||
for (i = 0; i < nr_pages; ++i) {
|
||||
j = min((unsigned)PAGE_SIZE - pageofs, outlen);
|
||||
|
||||
if (pages[i] != NULL) {
|
||||
if (ret < 0)
|
||||
SetPageError(pages[i]);
|
||||
else if (clusterpages == 1 && pages[i] == compressed_pages[0])
|
||||
if (clusterpages == 1 &&
|
||||
pages[i] == compressed_pages[0]) {
|
||||
memcpy(vin + pageofs, vout + pageofs, j);
|
||||
else {
|
||||
} else {
|
||||
void *dst = kmap_atomic(pages[i]);
|
||||
|
||||
memcpy(dst + pageofs, vout + pageofs, j);
|
||||
kunmap_atomic(dst);
|
||||
}
|
||||
endio(pages[i]);
|
||||
}
|
||||
vout += PAGE_SIZE;
|
||||
outlen -= j;
|
||||
pageofs = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
|
||||
if (clusterpages == 1)
|
||||
|
@ -1090,8 +1090,8 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
|
||||
vif->wilc = *wilc;
|
||||
vif->ndev = ndev;
|
||||
wl->vif[i] = vif;
|
||||
wl->vif_num = i;
|
||||
vif->idx = wl->vif_num;
|
||||
wl->vif_num = i + 1;
|
||||
vif->idx = i;
|
||||
|
||||
ndev->netdev_ops = &wilc_netdev_ops;
|
||||
|
||||
|
@ -187,6 +187,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
|
||||
xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
|
||||
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
|
||||
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
|
||||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
|
||||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
|
||||
xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
|
||||
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
|
||||
|
@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = {
|
||||
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
|
||||
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
|
||||
{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
|
||||
{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
|
||||
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
|
||||
{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
|
||||
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
|
||||
@ -1353,8 +1354,13 @@ static int cp210x_gpio_get(struct gpio_chip *gc, unsigned int gpio)
|
||||
if (priv->partnum == CP210X_PARTNUM_CP2105)
|
||||
req_type = REQTYPE_INTERFACE_TO_HOST;
|
||||
|
||||
result = usb_autopm_get_interface(serial->interface);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
result = cp210x_read_vendor_block(serial, req_type,
|
||||
CP210X_READ_LATCH, &buf, sizeof(buf));
|
||||
usb_autopm_put_interface(serial->interface);
|
||||
if (result < 0)
|
||||
return result;
|
||||
|
||||
@ -1375,6 +1381,10 @@ static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
|
||||
|
||||
buf.mask = BIT(gpio);
|
||||
|
||||
result = usb_autopm_get_interface(serial->interface);
|
||||
if (result)
|
||||
goto out;
|
||||
|
||||
if (priv->partnum == CP210X_PARTNUM_CP2105) {
|
||||
result = cp210x_write_vendor_block(serial,
|
||||
REQTYPE_HOST_TO_INTERFACE,
|
||||
@ -1392,6 +1402,8 @@ static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
|
||||
NULL, 0, USB_CTRL_SET_TIMEOUT);
|
||||
}
|
||||
|
||||
usb_autopm_put_interface(serial->interface);
|
||||
out:
|
||||
if (result < 0) {
|
||||
dev_err(&serial->interface->dev, "failed to set GPIO value: %d\n",
|
||||
result);
|
||||
|
@ -1015,6 +1015,8 @@ static const struct usb_device_id id_table_combined[] = {
|
||||
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
|
||||
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
|
||||
{ USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
|
||||
/* EZPrototypes devices */
|
||||
{ USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
||||
|
@ -1308,6 +1308,12 @@
|
||||
#define IONICS_VID 0x1c0c
|
||||
#define IONICS_PLUGCOMPUTER_PID 0x0102
|
||||
|
||||
/*
|
||||
* EZPrototypes (PID reseller)
|
||||
*/
|
||||
#define EZPROTOTYPES_VID 0x1c40
|
||||
#define HJELMSLUND_USB485_ISO_PID 0x0477
|
||||
|
||||
/*
|
||||
* Dresden Elektronik Sensor Terminal Board
|
||||
*/
|
||||
|
@ -1148,6 +1148,8 @@ static const struct usb_device_id option_ids[] = {
|
||||
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
|
||||
.driver_info = NCTRL(0) | RSVD(3) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */
|
||||
.driver_info = NCTRL(0) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
|
||||
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
|
||||
|
12
fs/aio.c
12
fs/aio.c
@ -1661,6 +1661,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
||||
struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
|
||||
struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
|
||||
__poll_t mask = key_to_poll(key);
|
||||
unsigned long flags;
|
||||
|
||||
req->woken = true;
|
||||
|
||||
@ -1669,10 +1670,15 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
||||
if (!(mask & req->events))
|
||||
return 0;
|
||||
|
||||
/* try to complete the iocb inline if we can: */
|
||||
if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
|
||||
/*
|
||||
* Try to complete the iocb inline if we can. Use
|
||||
* irqsave/irqrestore because not all filesystems (e.g. fuse)
|
||||
* call this function with IRQs disabled and because IRQs
|
||||
* have to be disabled before ctx_lock is obtained.
|
||||
*/
|
||||
if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
|
||||
list_del(&iocb->ki_list);
|
||||
spin_unlock(&iocb->ki_ctx->ctx_lock);
|
||||
spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
|
||||
|
||||
list_del_init(&req->wait.entry);
|
||||
aio_poll_complete(iocb, mask);
|
||||
|
@ -929,7 +929,7 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
|
||||
bytes = kernel_read(file, *buf + pos, i_size - pos, &pos);
|
||||
if (bytes < 0) {
|
||||
ret = bytes;
|
||||
goto out;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (bytes == 0)
|
||||
|
@ -829,7 +829,7 @@ int ovl_copy_up_flags(struct dentry *dentry, int flags)
|
||||
dput(parent);
|
||||
dput(next);
|
||||
}
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -567,7 +567,8 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
|
||||
override_cred->fsgid = inode->i_gid;
|
||||
if (!attr->hardlink) {
|
||||
err = security_dentry_create_files_as(dentry,
|
||||
attr->mode, &dentry->d_name, old_cred,
|
||||
attr->mode, &dentry->d_name,
|
||||
old_cred ? old_cred : current_cred(),
|
||||
override_cred);
|
||||
if (err) {
|
||||
put_cred(override_cred);
|
||||
@ -583,7 +584,7 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
|
||||
err = ovl_create_over_whiteout(dentry, inode, attr);
|
||||
}
|
||||
out_revert_creds:
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -659,7 +660,7 @@ static int ovl_set_link_redirect(struct dentry *dentry)
|
||||
|
||||
old_cred = ovl_override_creds(dentry->d_sb);
|
||||
err = ovl_set_redirect(dentry, false);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -857,7 +858,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
|
||||
err = ovl_remove_upper(dentry, is_dir, &list);
|
||||
else
|
||||
err = ovl_remove_and_whiteout(dentry, &list);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
if (!err) {
|
||||
if (is_dir)
|
||||
clear_nlink(dentry->d_inode);
|
||||
@ -1225,7 +1226,7 @@ out_dput_old:
|
||||
out_unlock:
|
||||
unlock_rename(new_upperdir, old_upperdir);
|
||||
out_revert_creds:
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
ovl_nlink_end(new, locked);
|
||||
out_drop_write:
|
||||
ovl_drop_write(old);
|
||||
|
@ -33,7 +33,7 @@ static struct file *ovl_open_realfile(const struct file *file,
|
||||
old_cred = ovl_override_creds(inode->i_sb);
|
||||
realfile = open_with_fake_path(&file->f_path, file->f_flags | O_NOATIME,
|
||||
realinode, current_cred());
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n",
|
||||
file, file, ovl_whatisit(inode, realinode), file->f_flags,
|
||||
@ -208,7 +208,7 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
|
||||
old_cred = ovl_override_creds(file_inode(file)->i_sb);
|
||||
ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
|
||||
ovl_iocb_to_rwf(iocb));
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
ovl_file_accessed(file);
|
||||
|
||||
@ -244,7 +244,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
|
||||
ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
|
||||
ovl_iocb_to_rwf(iocb));
|
||||
file_end_write(real.file);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
/* Update size */
|
||||
ovl_copyattr(ovl_inode_real(inode), inode);
|
||||
@ -271,7 +271,7 @@ static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
||||
if (file_inode(real.file) == ovl_inode_upper(file_inode(file))) {
|
||||
old_cred = ovl_override_creds(file_inode(file)->i_sb);
|
||||
ret = vfs_fsync_range(real.file, start, end, datasync);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
}
|
||||
|
||||
fdput(real);
|
||||
@ -295,7 +295,7 @@ static int ovl_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
|
||||
old_cred = ovl_override_creds(file_inode(file)->i_sb);
|
||||
ret = call_mmap(vma->vm_file, vma);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
if (ret) {
|
||||
/* Drop reference count from new vm_file value */
|
||||
@ -323,7 +323,7 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
|
||||
|
||||
old_cred = ovl_override_creds(file_inode(file)->i_sb);
|
||||
ret = vfs_fallocate(real.file, mode, offset, len);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
/* Update size */
|
||||
ovl_copyattr(ovl_inode_real(inode), inode);
|
||||
@ -345,7 +345,7 @@ static int ovl_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
|
||||
|
||||
old_cred = ovl_override_creds(file_inode(file)->i_sb);
|
||||
ret = vfs_fadvise(real.file, offset, len, advice);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
fdput(real);
|
||||
|
||||
@ -365,7 +365,7 @@ static long ovl_real_ioctl(struct file *file, unsigned int cmd,
|
||||
|
||||
old_cred = ovl_override_creds(file_inode(file)->i_sb);
|
||||
ret = vfs_ioctl(real.file, cmd, arg);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
fdput(real);
|
||||
|
||||
@ -470,7 +470,7 @@ static ssize_t ovl_copyfile(struct file *file_in, loff_t pos_in,
|
||||
real_out.file, pos_out, len);
|
||||
break;
|
||||
}
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
/* Update size */
|
||||
ovl_copyattr(ovl_inode_real(inode_out), inode_out);
|
||||
|
@ -64,7 +64,7 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
inode_lock(upperdentry->d_inode);
|
||||
old_cred = ovl_override_creds(dentry->d_sb);
|
||||
err = notify_change(upperdentry, attr, NULL);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
if (!err)
|
||||
ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
|
||||
inode_unlock(upperdentry->d_inode);
|
||||
@ -260,7 +260,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
|
||||
stat->nlink = dentry->d_inode->i_nlink;
|
||||
|
||||
out:
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -294,7 +294,7 @@ int ovl_permission(struct inode *inode, int mask)
|
||||
mask |= MAY_READ;
|
||||
}
|
||||
err = inode_permission(realinode, mask);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -311,7 +311,7 @@ static const char *ovl_get_link(struct dentry *dentry,
|
||||
|
||||
old_cred = ovl_override_creds(dentry->d_sb);
|
||||
p = vfs_get_link(ovl_dentry_real(dentry), done);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -354,7 +354,7 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
|
||||
WARN_ON(flags != XATTR_REPLACE);
|
||||
err = vfs_removexattr(realdentry, name);
|
||||
}
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
/* copy c/mtime */
|
||||
ovl_copyattr(d_inode(realdentry), inode);
|
||||
@ -375,7 +375,7 @@ int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
|
||||
|
||||
old_cred = ovl_override_creds(dentry->d_sb);
|
||||
res = vfs_getxattr(realdentry, name, value, size);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -399,7 +399,7 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
|
||||
|
||||
old_cred = ovl_override_creds(dentry->d_sb);
|
||||
res = vfs_listxattr(realdentry, list, size);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
if (res <= 0 || size == 0)
|
||||
return res;
|
||||
|
||||
@ -434,7 +434,7 @@ struct posix_acl *ovl_get_acl(struct inode *inode, int type)
|
||||
|
||||
old_cred = ovl_override_creds(inode->i_sb);
|
||||
acl = get_acl(realinode, type);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
return acl;
|
||||
}
|
||||
@ -472,7 +472,7 @@ static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
filemap_write_and_wait(realinode->i_mapping);
|
||||
|
||||
err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1069,7 +1069,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
|
||||
goto out_free_oe;
|
||||
}
|
||||
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
if (origin_path) {
|
||||
dput(origin_path->dentry);
|
||||
kfree(origin_path);
|
||||
@ -1096,7 +1096,7 @@ out_put_upper:
|
||||
kfree(upperredirect);
|
||||
out:
|
||||
kfree(d.redirect);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
@ -1150,7 +1150,7 @@ bool ovl_lower_positive(struct dentry *dentry)
|
||||
dput(this);
|
||||
}
|
||||
}
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
return positive;
|
||||
}
|
||||
|
@ -208,6 +208,7 @@ int ovl_want_write(struct dentry *dentry);
|
||||
void ovl_drop_write(struct dentry *dentry);
|
||||
struct dentry *ovl_workdir(struct dentry *dentry);
|
||||
const struct cred *ovl_override_creds(struct super_block *sb);
|
||||
void ovl_revert_creds(const struct cred *oldcred);
|
||||
struct super_block *ovl_same_sb(struct super_block *sb);
|
||||
int ovl_can_decode_fh(struct super_block *sb);
|
||||
struct dentry *ovl_indexdir(struct super_block *sb);
|
||||
|
@ -20,6 +20,7 @@ struct ovl_config {
|
||||
bool nfs_export;
|
||||
int xino;
|
||||
bool metacopy;
|
||||
bool override_creds;
|
||||
};
|
||||
|
||||
struct ovl_sb {
|
||||
|
@ -289,7 +289,7 @@ static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
|
||||
}
|
||||
inode_unlock(dir->d_inode);
|
||||
}
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -921,7 +921,7 @@ int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
|
||||
|
||||
old_cred = ovl_override_creds(dentry->d_sb);
|
||||
err = ovl_dir_read_merged(dentry, list, &root);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -56,6 +56,11 @@ module_param_named(xino_auto, ovl_xino_auto_def, bool, 0644);
|
||||
MODULE_PARM_DESC(ovl_xino_auto_def,
|
||||
"Auto enable xino feature");
|
||||
|
||||
static bool __read_mostly ovl_override_creds_def = true;
|
||||
module_param_named(override_creds, ovl_override_creds_def, bool, 0644);
|
||||
MODULE_PARM_DESC(ovl_override_creds_def,
|
||||
"Use mounter's credentials for accesses");
|
||||
|
||||
static void ovl_entry_stack_free(struct ovl_entry *oe)
|
||||
{
|
||||
unsigned int i;
|
||||
@ -362,6 +367,9 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
|
||||
if (ofs->config.metacopy != ovl_metacopy_def)
|
||||
seq_printf(m, ",metacopy=%s",
|
||||
ofs->config.metacopy ? "on" : "off");
|
||||
if (ofs->config.override_creds != ovl_override_creds_def)
|
||||
seq_show_option(m, "override_creds",
|
||||
ofs->config.override_creds ? "on" : "off");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -401,6 +409,8 @@ enum {
|
||||
OPT_XINO_AUTO,
|
||||
OPT_METACOPY_ON,
|
||||
OPT_METACOPY_OFF,
|
||||
OPT_OVERRIDE_CREDS_ON,
|
||||
OPT_OVERRIDE_CREDS_OFF,
|
||||
OPT_ERR,
|
||||
};
|
||||
|
||||
@ -419,6 +429,8 @@ static const match_table_t ovl_tokens = {
|
||||
{OPT_XINO_AUTO, "xino=auto"},
|
||||
{OPT_METACOPY_ON, "metacopy=on"},
|
||||
{OPT_METACOPY_OFF, "metacopy=off"},
|
||||
{OPT_OVERRIDE_CREDS_ON, "override_creds=on"},
|
||||
{OPT_OVERRIDE_CREDS_OFF, "override_creds=off"},
|
||||
{OPT_ERR, NULL}
|
||||
};
|
||||
|
||||
@ -477,6 +489,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
|
||||
config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL);
|
||||
if (!config->redirect_mode)
|
||||
return -ENOMEM;
|
||||
config->override_creds = ovl_override_creds_def;
|
||||
|
||||
while ((p = ovl_next_opt(&opt)) != NULL) {
|
||||
int token;
|
||||
@ -557,6 +570,14 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
|
||||
config->metacopy = false;
|
||||
break;
|
||||
|
||||
case OPT_OVERRIDE_CREDS_ON:
|
||||
config->override_creds = true;
|
||||
break;
|
||||
|
||||
case OPT_OVERRIDE_CREDS_OFF:
|
||||
config->override_creds = false;
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p);
|
||||
return -EINVAL;
|
||||
@ -1521,7 +1542,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
|
||||
ovl_dentry_lower(root_dentry), NULL);
|
||||
|
||||
sb->s_root = root_dentry;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_oe:
|
||||
|
@ -40,9 +40,17 @@ const struct cred *ovl_override_creds(struct super_block *sb)
|
||||
{
|
||||
struct ovl_fs *ofs = sb->s_fs_info;
|
||||
|
||||
if (!ofs->config.override_creds)
|
||||
return NULL;
|
||||
return override_creds(ofs->creator_cred);
|
||||
}
|
||||
|
||||
void ovl_revert_creds(const struct cred *old_cred)
|
||||
{
|
||||
if (old_cred)
|
||||
revert_creds(old_cred);
|
||||
}
|
||||
|
||||
struct super_block *ovl_same_sb(struct super_block *sb)
|
||||
{
|
||||
struct ovl_fs *ofs = sb->s_fs_info;
|
||||
@ -783,7 +791,7 @@ int ovl_nlink_start(struct dentry *dentry, bool *locked)
|
||||
* value relative to the upper inode nlink in an upper inode xattr.
|
||||
*/
|
||||
err = ovl_set_nlink_upper(dentry);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
|
||||
out:
|
||||
if (err)
|
||||
@ -803,7 +811,7 @@ void ovl_nlink_end(struct dentry *dentry, bool locked)
|
||||
|
||||
old_cred = ovl_override_creds(dentry->d_sb);
|
||||
ovl_cleanup_index(dentry);
|
||||
revert_creds(old_cred);
|
||||
ovl_revert_creds(old_cred);
|
||||
}
|
||||
|
||||
mutex_unlock(&OVL_I(d_inode(dentry))->lock);
|
||||
|
@ -100,7 +100,6 @@ config PROC_CHILDREN
|
||||
|
||||
config PROC_UID
|
||||
bool "Include /proc/uid/ files"
|
||||
default y
|
||||
depends on PROC_FS && RT_MUTEXES
|
||||
help
|
||||
Provides aggregated per-uid information under /proc/uid.
|
||||
|
@ -254,20 +254,12 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
|
||||
static struct freq_attr _name = \
|
||||
__ATTR(_name, 0200, NULL, store_##_name)
|
||||
|
||||
struct global_attr {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf);
|
||||
ssize_t (*store)(struct kobject *a, struct attribute *b,
|
||||
const char *c, size_t count);
|
||||
};
|
||||
|
||||
#define define_one_global_ro(_name) \
|
||||
static struct global_attr _name = \
|
||||
static struct kobj_attribute _name = \
|
||||
__ATTR(_name, 0444, show_##_name, NULL)
|
||||
|
||||
#define define_one_global_rw(_name) \
|
||||
static struct global_attr _name = \
|
||||
static struct kobj_attribute _name = \
|
||||
__ATTR(_name, 0644, show_##_name, store_##_name)
|
||||
|
||||
|
||||
|
@ -27,7 +27,8 @@ int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
|
||||
struct pid *pid, struct task_struct *p);
|
||||
void cpufreq_acct_update_power(struct task_struct *p, u64 cputime);
|
||||
void cpufreq_times_create_policy(struct cpufreq_policy *policy);
|
||||
void cpufreq_times_record_transition(struct cpufreq_freqs *freq);
|
||||
void cpufreq_times_record_transition(struct cpufreq_policy *policy,
|
||||
unsigned int new_freq);
|
||||
void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end);
|
||||
int single_uid_time_in_state_open(struct inode *inode, struct file *file);
|
||||
#else
|
||||
@ -38,7 +39,7 @@ static inline void cpufreq_acct_update_power(struct task_struct *p,
|
||||
u64 cputime) {}
|
||||
static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {}
|
||||
static inline void cpufreq_times_record_transition(
|
||||
struct cpufreq_freqs *freq) {}
|
||||
struct cpufreq_policy *policy, unsigned int new_freq) {}
|
||||
static inline void cpufreq_task_times_remove_uids(uid_t uid_start,
|
||||
uid_t uid_end) {}
|
||||
#endif /* CONFIG_CPU_FREQ_TIMES */
|
||||
|
@ -276,7 +276,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
|
||||
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
|
||||
int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
|
||||
|
||||
void bt_accept_enqueue(struct sock *parent, struct sock *sk);
|
||||
void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh);
|
||||
void bt_accept_unlink(struct sock *sk);
|
||||
struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
|
||||
#include <net/inet_sock.h>
|
||||
#include <net/snmp.h>
|
||||
#include <net/ip.h>
|
||||
|
||||
struct icmp_err {
|
||||
int errno;
|
||||
@ -39,7 +40,13 @@ struct net_proto_family;
|
||||
struct sk_buff;
|
||||
struct net;
|
||||
|
||||
void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
|
||||
void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
|
||||
const struct ip_options *opt);
|
||||
static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
|
||||
{
|
||||
__icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
|
||||
}
|
||||
|
||||
int icmp_rcv(struct sk_buff *skb);
|
||||
void icmp_err(struct sk_buff *skb, u32 info);
|
||||
int icmp_init(void);
|
||||
|
@ -641,6 +641,8 @@ static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
|
||||
}
|
||||
|
||||
void ip_options_fragment(struct sk_buff *skb);
|
||||
int __ip_options_compile(struct net *net, struct ip_options *opt,
|
||||
struct sk_buff *skb, __be32 *info);
|
||||
int ip_options_compile(struct net *net, struct ip_options *opt,
|
||||
struct sk_buff *skb);
|
||||
int ip_options_get(struct net *net, struct ip_options_rcu **optp,
|
||||
@ -690,7 +692,7 @@ extern int sysctl_icmp_msgs_burst;
|
||||
int ip_misc_proc_init(void);
|
||||
#endif
|
||||
|
||||
int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
|
||||
int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
|
||||
struct netlink_ext_ack *extack);
|
||||
|
||||
#endif /* _IP_H */
|
||||
|
@ -47,7 +47,10 @@ struct qdisc_size_table {
|
||||
struct qdisc_skb_head {
|
||||
struct sk_buff *head;
|
||||
struct sk_buff *tail;
|
||||
__u32 qlen;
|
||||
union {
|
||||
u32 qlen;
|
||||
atomic_t atomic_qlen;
|
||||
};
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
@ -384,27 +387,19 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
|
||||
BUILD_BUG_ON(sizeof(qcb->data) < sz);
|
||||
}
|
||||
|
||||
static inline int qdisc_qlen_cpu(const struct Qdisc *q)
|
||||
{
|
||||
return this_cpu_ptr(q->cpu_qstats)->qlen;
|
||||
}
|
||||
|
||||
static inline int qdisc_qlen(const struct Qdisc *q)
|
||||
{
|
||||
return q->q.qlen;
|
||||
}
|
||||
|
||||
static inline int qdisc_qlen_sum(const struct Qdisc *q)
|
||||
static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
|
||||
{
|
||||
__u32 qlen = q->qstats.qlen;
|
||||
int i;
|
||||
u32 qlen = q->qstats.qlen;
|
||||
|
||||
if (q->flags & TCQ_F_NOLOCK) {
|
||||
for_each_possible_cpu(i)
|
||||
qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
|
||||
} else {
|
||||
if (q->flags & TCQ_F_NOLOCK)
|
||||
qlen += atomic_read(&q->q.atomic_qlen);
|
||||
else
|
||||
qlen += q->q.qlen;
|
||||
}
|
||||
|
||||
return qlen;
|
||||
}
|
||||
@ -776,14 +771,14 @@ static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
|
||||
this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
|
||||
}
|
||||
|
||||
static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
|
||||
static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
|
||||
{
|
||||
this_cpu_inc(sch->cpu_qstats->qlen);
|
||||
atomic_inc(&sch->q.atomic_qlen);
|
||||
}
|
||||
|
||||
static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
|
||||
static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
|
||||
{
|
||||
this_cpu_dec(sch->cpu_qstats->qlen);
|
||||
atomic_dec(&sch->q.atomic_qlen);
|
||||
}
|
||||
|
||||
static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
|
||||
|
@ -6035,7 +6035,8 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
||||
u32 off_reg;
|
||||
|
||||
aux = &env->insn_aux_data[i + delta];
|
||||
if (!aux->alu_state)
|
||||
if (!aux->alu_state ||
|
||||
aux->alu_state == BPF_ALU_NON_POINTER)
|
||||
continue;
|
||||
|
||||
isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
|
||||
|
@ -1301,7 +1301,7 @@ static int parse_pred(const char *str, void *data,
|
||||
/* go past the last quote */
|
||||
i++;
|
||||
|
||||
} else if (isdigit(str[i])) {
|
||||
} else if (isdigit(str[i]) || str[i] == '-') {
|
||||
|
||||
/* Make sure the field is not a string */
|
||||
if (is_string_field(field)) {
|
||||
@ -1314,6 +1314,9 @@ static int parse_pred(const char *str, void *data,
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
if (str[i] == '-')
|
||||
i++;
|
||||
|
||||
/* We allow 0xDEADBEEF */
|
||||
while (isalnum(str[i]))
|
||||
i++;
|
||||
|
@ -183,15 +183,25 @@ void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
|
||||
}
|
||||
EXPORT_SYMBOL(bt_sock_unlink);
|
||||
|
||||
void bt_accept_enqueue(struct sock *parent, struct sock *sk)
|
||||
void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
|
||||
{
|
||||
BT_DBG("parent %p, sk %p", parent, sk);
|
||||
|
||||
sock_hold(sk);
|
||||
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
|
||||
|
||||
if (bh)
|
||||
bh_lock_sock_nested(sk);
|
||||
else
|
||||
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
|
||||
|
||||
list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
|
||||
bt_sk(sk)->parent = parent;
|
||||
release_sock(sk);
|
||||
|
||||
if (bh)
|
||||
bh_unlock_sock(sk);
|
||||
else
|
||||
release_sock(sk);
|
||||
|
||||
parent->sk_ack_backlog++;
|
||||
}
|
||||
EXPORT_SYMBOL(bt_accept_enqueue);
|
||||
|
@ -1252,7 +1252,7 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
|
||||
|
||||
l2cap_sock_init(sk, parent);
|
||||
|
||||
bt_accept_enqueue(parent, sk);
|
||||
bt_accept_enqueue(parent, sk, false);
|
||||
|
||||
release_sock(parent);
|
||||
|
||||
|
@ -988,7 +988,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
|
||||
rfcomm_pi(sk)->channel = channel;
|
||||
|
||||
sk->sk_state = BT_CONFIG;
|
||||
bt_accept_enqueue(parent, sk);
|
||||
bt_accept_enqueue(parent, sk, true);
|
||||
|
||||
/* Accept connection and return socket DLC */
|
||||
*d = rfcomm_pi(sk)->dlc;
|
||||
|
@ -193,7 +193,7 @@ static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
|
||||
conn->sk = sk;
|
||||
|
||||
if (parent)
|
||||
bt_accept_enqueue(parent, sk);
|
||||
bt_accept_enqueue(parent, sk, true);
|
||||
}
|
||||
|
||||
static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
|
||||
|
@ -256,7 +256,6 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
|
||||
for_each_possible_cpu(i) {
|
||||
const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
|
||||
|
||||
qstats->qlen = 0;
|
||||
qstats->backlog += qcpu->backlog;
|
||||
qstats->drops += qcpu->drops;
|
||||
qstats->requeues += qcpu->requeues;
|
||||
@ -272,7 +271,6 @@ void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
|
||||
if (cpu) {
|
||||
__gnet_stats_copy_queue_cpu(qstats, cpu);
|
||||
} else {
|
||||
qstats->qlen = q->qlen;
|
||||
qstats->backlog = q->backlog;
|
||||
qstats->drops = q->drops;
|
||||
qstats->requeues = q->requeues;
|
||||
|
@ -1547,6 +1547,9 @@ static int register_queue_kobjects(struct net_device *dev)
|
||||
error:
|
||||
netdev_queue_update_kobjects(dev, txq, 0);
|
||||
net_rx_queue_update_kobjects(dev, rxq, 0);
|
||||
#ifdef CONFIG_SYSFS
|
||||
kset_unregister(dev->queues_kset);
|
||||
#endif
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -667,7 +667,8 @@ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level)
|
||||
case CIPSO_V4_MAP_PASS:
|
||||
return 0;
|
||||
case CIPSO_V4_MAP_TRANS:
|
||||
if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)
|
||||
if ((level < doi_def->map.std->lvl.cipso_size) &&
|
||||
(doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
|
||||
return 0;
|
||||
break;
|
||||
}
|
||||
@ -1735,13 +1736,26 @@ validate_return:
|
||||
*/
|
||||
void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
|
||||
{
|
||||
unsigned char optbuf[sizeof(struct ip_options) + 40];
|
||||
struct ip_options *opt = (struct ip_options *)optbuf;
|
||||
|
||||
if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
|
||||
return;
|
||||
|
||||
/*
|
||||
* We might be called above the IP layer,
|
||||
* so we can not use icmp_send and IPCB here.
|
||||
*/
|
||||
|
||||
memset(opt, 0, sizeof(struct ip_options));
|
||||
opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
|
||||
if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
|
||||
return;
|
||||
|
||||
if (gateway)
|
||||
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0);
|
||||
__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
|
||||
else
|
||||
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0);
|
||||
__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -700,6 +700,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
|
||||
case RTA_GATEWAY:
|
||||
cfg->fc_gw = nla_get_be32(attr);
|
||||
break;
|
||||
case RTA_VIA:
|
||||
NL_SET_ERR_MSG(extack, "IPv4 does not support RTA_VIA attribute");
|
||||
err = -EINVAL;
|
||||
goto errout;
|
||||
case RTA_PRIORITY:
|
||||
cfg->fc_priority = nla_get_u32(attr);
|
||||
break;
|
||||
|
@ -570,7 +570,8 @@ relookup_failed:
|
||||
* MUST reply to only the first fragment.
|
||||
*/
|
||||
|
||||
void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
|
||||
void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
|
||||
const struct ip_options *opt)
|
||||
{
|
||||
struct iphdr *iph;
|
||||
int room;
|
||||
@ -691,7 +692,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
|
||||
iph->tos;
|
||||
mark = IP4_REPLY_MARK(net, skb_in->mark);
|
||||
|
||||
if (ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in))
|
||||
if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt))
|
||||
goto out_unlock;
|
||||
|
||||
|
||||
@ -742,7 +743,7 @@ out_bh_enable:
|
||||
local_bh_enable();
|
||||
out:;
|
||||
}
|
||||
EXPORT_SYMBOL(icmp_send);
|
||||
EXPORT_SYMBOL(__icmp_send);
|
||||
|
||||
|
||||
static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
|
||||
|
@ -307,11 +307,10 @@ drop:
|
||||
}
|
||||
|
||||
static int ip_rcv_finish_core(struct net *net, struct sock *sk,
|
||||
struct sk_buff *skb)
|
||||
struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
int (*edemux)(struct sk_buff *skb);
|
||||
struct net_device *dev = skb->dev;
|
||||
struct rtable *rt;
|
||||
int err;
|
||||
|
||||
@ -400,6 +399,7 @@ drop_error:
|
||||
|
||||
static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = skb->dev;
|
||||
int ret;
|
||||
|
||||
/* if ingress device is enslaved to an L3 master device pass the
|
||||
@ -409,7 +409,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
if (!skb)
|
||||
return NET_RX_SUCCESS;
|
||||
|
||||
ret = ip_rcv_finish_core(net, sk, skb);
|
||||
ret = ip_rcv_finish_core(net, sk, skb, dev);
|
||||
if (ret != NET_RX_DROP)
|
||||
ret = dst_input(skb);
|
||||
return ret;
|
||||
@ -549,6 +549,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
|
||||
|
||||
INIT_LIST_HEAD(&sublist);
|
||||
list_for_each_entry_safe(skb, next, head, list) {
|
||||
struct net_device *dev = skb->dev;
|
||||
struct dst_entry *dst;
|
||||
|
||||
skb_list_del_init(skb);
|
||||
@ -558,7 +559,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
|
||||
skb = l3mdev_ip_rcv(skb);
|
||||
if (!skb)
|
||||
continue;
|
||||
if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
|
||||
if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP)
|
||||
continue;
|
||||
|
||||
dst = skb_dst(skb);
|
||||
|
@ -251,8 +251,9 @@ static void spec_dst_fill(__be32 *spec_dst, struct sk_buff *skb)
|
||||
* If opt == NULL, then skb->data should point to IP header.
|
||||
*/
|
||||
|
||||
int ip_options_compile(struct net *net,
|
||||
struct ip_options *opt, struct sk_buff *skb)
|
||||
int __ip_options_compile(struct net *net,
|
||||
struct ip_options *opt, struct sk_buff *skb,
|
||||
__be32 *info)
|
||||
{
|
||||
__be32 spec_dst = htonl(INADDR_ANY);
|
||||
unsigned char *pp_ptr = NULL;
|
||||
@ -468,11 +469,22 @@ eol:
|
||||
return 0;
|
||||
|
||||
error:
|
||||
if (skb) {
|
||||
icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
|
||||
}
|
||||
if (info)
|
||||
*info = htonl((pp_ptr-iph)<<24);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int ip_options_compile(struct net *net,
|
||||
struct ip_options *opt, struct sk_buff *skb)
|
||||
{
|
||||
int ret;
|
||||
__be32 info;
|
||||
|
||||
ret = __ip_options_compile(net, opt, skb, &info);
|
||||
if (ret != 0 && skb)
|
||||
icmp_send(skb, ICMP_PARAMETERPROB, 0, info);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ip_options_compile);
|
||||
|
||||
/*
|
||||
|
@ -3,9 +3,10 @@
|
||||
#include <linux/types.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/netlink.h>
|
||||
#include <linux/in6.h>
|
||||
#include <net/ip.h>
|
||||
|
||||
int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
|
||||
int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
*ip_proto = nla_get_u8(attr);
|
||||
@ -13,11 +14,19 @@ int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
|
||||
switch (*ip_proto) {
|
||||
case IPPROTO_TCP:
|
||||
case IPPROTO_UDP:
|
||||
case IPPROTO_ICMP:
|
||||
return 0;
|
||||
default:
|
||||
NL_SET_ERR_MSG(extack, "Unsupported ip proto");
|
||||
return -EOPNOTSUPP;
|
||||
case IPPROTO_ICMP:
|
||||
if (family != AF_INET)
|
||||
break;
|
||||
return 0;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
case IPPROTO_ICMPV6:
|
||||
if (family != AF_INET6)
|
||||
break;
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
NL_SET_ERR_MSG(extack, "Unsupported ip proto");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rtm_getroute_parse_ip_proto);
|
||||
|
@ -2814,7 +2814,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
|
||||
|
||||
if (tb[RTA_IP_PROTO]) {
|
||||
err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
|
||||
&ip_proto, extack);
|
||||
&ip_proto, AF_INET, extack);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
@ -1954,10 +1954,10 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
|
||||
|
||||
static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_OUTFORWDATAGRAMS);
|
||||
__IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_OUTOCTETS, skb->len);
|
||||
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_OUTFORWDATAGRAMS);
|
||||
IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_OUTOCTETS, skb->len);
|
||||
return dst_output(net, sk, skb);
|
||||
}
|
||||
|
||||
|
@ -4189,6 +4189,10 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
|
||||
cfg->fc_flags |= RTF_GATEWAY;
|
||||
}
|
||||
if (tb[RTA_VIA]) {
|
||||
NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
|
||||
goto errout;
|
||||
}
|
||||
|
||||
if (tb[RTA_DST]) {
|
||||
int plen = (rtm->rtm_dst_len + 7) >> 3;
|
||||
@ -4883,7 +4887,8 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
|
||||
|
||||
if (tb[RTA_IP_PROTO]) {
|
||||
err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
|
||||
&fl6.flowi6_proto, extack);
|
||||
&fl6.flowi6_proto, AF_INET6,
|
||||
extack);
|
||||
if (err)
|
||||
goto errout;
|
||||
}
|
||||
|
@ -1873,6 +1873,7 @@ static int __net_init sit_init_net(struct net *net)
|
||||
|
||||
err_reg_dev:
|
||||
ipip6_dev_free(sitn->fb_tunnel_dev);
|
||||
free_netdev(sitn->fb_tunnel_dev);
|
||||
err_alloc_dev:
|
||||
return err;
|
||||
}
|
||||
|
@ -1822,6 +1822,9 @@ static int rtm_to_route_config(struct sk_buff *skb,
|
||||
goto errout;
|
||||
break;
|
||||
}
|
||||
case RTA_GATEWAY:
|
||||
NL_SET_ERR_MSG(extack, "MPLS does not support RTA_GATEWAY attribute");
|
||||
goto errout;
|
||||
case RTA_VIA:
|
||||
{
|
||||
if (nla_get_via(nla, &cfg->rc_via_alen,
|
||||
|
@ -903,7 +903,8 @@ int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len,
|
||||
(state == 0 && (byte & bitmask) == 0))
|
||||
return bit_spot;
|
||||
|
||||
bit_spot++;
|
||||
if (++bit_spot >= bitmap_len)
|
||||
return -1;
|
||||
bitmask >>= 1;
|
||||
if (bitmask == 0) {
|
||||
byte = bitmap[++byte_offset];
|
||||
|
@ -419,6 +419,10 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
|
||||
sock->service_name,
|
||||
sock->service_name_len,
|
||||
&service_name_tlv_length);
|
||||
if (!service_name_tlv) {
|
||||
err = -ENOMEM;
|
||||
goto error_tlv;
|
||||
}
|
||||
size += service_name_tlv_length;
|
||||
}
|
||||
|
||||
@ -429,9 +433,17 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
|
||||
|
||||
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
|
||||
&miux_tlv_length);
|
||||
if (!miux_tlv) {
|
||||
err = -ENOMEM;
|
||||
goto error_tlv;
|
||||
}
|
||||
size += miux_tlv_length;
|
||||
|
||||
rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
|
||||
if (!rw_tlv) {
|
||||
err = -ENOMEM;
|
||||
goto error_tlv;
|
||||
}
|
||||
size += rw_tlv_length;
|
||||
|
||||
pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
|
||||
@ -484,9 +496,17 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
|
||||
|
||||
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
|
||||
&miux_tlv_length);
|
||||
if (!miux_tlv) {
|
||||
err = -ENOMEM;
|
||||
goto error_tlv;
|
||||
}
|
||||
size += miux_tlv_length;
|
||||
|
||||
rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
|
||||
if (!rw_tlv) {
|
||||
err = -ENOMEM;
|
||||
goto error_tlv;
|
||||
}
|
||||
size += rw_tlv_length;
|
||||
|
||||
skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
|
||||
|
@ -532,10 +532,10 @@ static u8 nfc_llcp_reserve_sdp_ssap(struct nfc_llcp_local *local)
|
||||
|
||||
static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
|
||||
{
|
||||
u8 *gb_cur, *version_tlv, version, version_length;
|
||||
u8 *lto_tlv, lto_length;
|
||||
u8 *wks_tlv, wks_length;
|
||||
u8 *miux_tlv, miux_length;
|
||||
u8 *gb_cur, version, version_length;
|
||||
u8 lto_length, wks_length, miux_length;
|
||||
u8 *version_tlv = NULL, *lto_tlv = NULL,
|
||||
*wks_tlv = NULL, *miux_tlv = NULL;
|
||||
__be16 wks = cpu_to_be16(local->local_wks);
|
||||
u8 gb_len = 0;
|
||||
int ret = 0;
|
||||
@ -543,17 +543,33 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
|
||||
version = LLCP_VERSION_11;
|
||||
version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
|
||||
1, &version_length);
|
||||
if (!version_tlv) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
gb_len += version_length;
|
||||
|
||||
lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, <o_length);
|
||||
if (!lto_tlv) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
gb_len += lto_length;
|
||||
|
||||
pr_debug("Local wks 0x%lx\n", local->local_wks);
|
||||
wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length);
|
||||
if (!wks_tlv) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
gb_len += wks_length;
|
||||
|
||||
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
|
||||
&miux_length);
|
||||
if (!miux_tlv) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
gb_len += miux_length;
|
||||
|
||||
gb_len += ARRAY_SIZE(llcp_magic);
|
||||
|
@ -199,8 +199,7 @@ err3:
|
||||
err2:
|
||||
kfree(tname);
|
||||
err1:
|
||||
if (ret == ACT_P_CREATED)
|
||||
tcf_idr_release(*a, bind);
|
||||
tcf_idr_release(*a, bind);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -191,8 +191,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
|
||||
if (unlikely(!params_new)) {
|
||||
if (ret == ACT_P_CREATED)
|
||||
tcf_idr_release(*a, bind);
|
||||
tcf_idr_release(*a, bind);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -377,7 +377,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
||||
return ret;
|
||||
|
||||
release_tun_meta:
|
||||
dst_release(&metadata->dst);
|
||||
if (metadata)
|
||||
dst_release(&metadata->dst);
|
||||
|
||||
err_out:
|
||||
if (exists)
|
||||
|
@ -68,7 +68,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
|
||||
skb = __skb_dequeue(&q->skb_bad_txq);
|
||||
if (qdisc_is_percpu_stats(q)) {
|
||||
qdisc_qstats_cpu_backlog_dec(q, skb);
|
||||
qdisc_qstats_cpu_qlen_dec(q);
|
||||
qdisc_qstats_atomic_qlen_dec(q);
|
||||
} else {
|
||||
qdisc_qstats_backlog_dec(q, skb);
|
||||
q->q.qlen--;
|
||||
@ -108,7 +108,7 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
|
||||
|
||||
if (qdisc_is_percpu_stats(q)) {
|
||||
qdisc_qstats_cpu_backlog_inc(q, skb);
|
||||
qdisc_qstats_cpu_qlen_inc(q);
|
||||
qdisc_qstats_atomic_qlen_inc(q);
|
||||
} else {
|
||||
qdisc_qstats_backlog_inc(q, skb);
|
||||
q->q.qlen++;
|
||||
@ -147,7 +147,7 @@ static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
|
||||
|
||||
qdisc_qstats_cpu_requeues_inc(q);
|
||||
qdisc_qstats_cpu_backlog_inc(q, skb);
|
||||
qdisc_qstats_cpu_qlen_inc(q);
|
||||
qdisc_qstats_atomic_qlen_inc(q);
|
||||
|
||||
skb = next;
|
||||
}
|
||||
@ -252,7 +252,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
|
||||
skb = __skb_dequeue(&q->gso_skb);
|
||||
if (qdisc_is_percpu_stats(q)) {
|
||||
qdisc_qstats_cpu_backlog_dec(q, skb);
|
||||
qdisc_qstats_cpu_qlen_dec(q);
|
||||
qdisc_qstats_atomic_qlen_dec(q);
|
||||
} else {
|
||||
qdisc_qstats_backlog_dec(q, skb);
|
||||
q->q.qlen--;
|
||||
@ -633,7 +633,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
|
||||
if (unlikely(err))
|
||||
return qdisc_drop_cpu(skb, qdisc, to_free);
|
||||
|
||||
qdisc_qstats_cpu_qlen_inc(qdisc);
|
||||
qdisc_qstats_atomic_qlen_inc(qdisc);
|
||||
/* Note: skb can not be used after skb_array_produce(),
|
||||
* so we better not use qdisc_qstats_cpu_backlog_inc()
|
||||
*/
|
||||
@ -658,7 +658,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
|
||||
if (likely(skb)) {
|
||||
qdisc_qstats_cpu_backlog_dec(qdisc, skb);
|
||||
qdisc_bstats_cpu_update(qdisc, skb);
|
||||
qdisc_qstats_cpu_qlen_dec(qdisc);
|
||||
qdisc_qstats_atomic_qlen_dec(qdisc);
|
||||
}
|
||||
|
||||
return skb;
|
||||
@ -702,7 +702,6 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
|
||||
struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
|
||||
|
||||
q->backlog = 0;
|
||||
q->qlen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -440,6 +440,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
int nb = 0;
|
||||
int count = 1;
|
||||
int rc = NET_XMIT_SUCCESS;
|
||||
int rc_drop = NET_XMIT_DROP;
|
||||
|
||||
/* Do not fool qdisc_drop_all() */
|
||||
skb->prev = NULL;
|
||||
@ -479,6 +480,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
q->duplicate = 0;
|
||||
rootq->enqueue(skb2, rootq, to_free);
|
||||
q->duplicate = dupsave;
|
||||
rc_drop = NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -491,7 +493,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
if (skb_is_gso(skb)) {
|
||||
segs = netem_segment(skb, sch, to_free);
|
||||
if (!segs)
|
||||
return NET_XMIT_DROP;
|
||||
return rc_drop;
|
||||
} else {
|
||||
segs = skb;
|
||||
}
|
||||
@ -514,8 +516,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
1<<(prandom_u32() % 8);
|
||||
}
|
||||
|
||||
if (unlikely(sch->q.qlen >= sch->limit))
|
||||
return qdisc_drop_all(skb, sch, to_free);
|
||||
if (unlikely(sch->q.qlen >= sch->limit)) {
|
||||
qdisc_drop_all(skb, sch, to_free);
|
||||
return rc_drop;
|
||||
}
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
|
||||
|
@ -1884,6 +1884,7 @@ static int sctp_sendmsg_check_sflags(struct sctp_association *asoc,
|
||||
|
||||
pr_debug("%s: aborting association:%p\n", __func__, asoc);
|
||||
sctp_primitive_ABORT(net, asoc, chunk);
|
||||
iov_iter_revert(&msg->msg_iter, msg_len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -587,6 +587,7 @@ static void __sock_release(struct socket *sock, struct inode *inode)
|
||||
if (inode)
|
||||
inode_lock(inode);
|
||||
sock->ops->release(sock);
|
||||
sock->sk = NULL;
|
||||
if (inode)
|
||||
inode_unlock(inode);
|
||||
sock->ops = NULL;
|
||||
|
@ -377,11 +377,13 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
|
||||
|
||||
#define tipc_wait_for_cond(sock_, timeo_, condition_) \
|
||||
({ \
|
||||
DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
|
||||
struct sock *sk_; \
|
||||
int rc_; \
|
||||
\
|
||||
while ((rc_ = !(condition_))) { \
|
||||
DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
|
||||
/* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
|
||||
smp_rmb(); \
|
||||
sk_ = (sock_)->sk; \
|
||||
rc_ = tipc_sk_sock_err((sock_), timeo_); \
|
||||
if (rc_) \
|
||||
@ -1318,7 +1320,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
|
||||
|
||||
if (unlikely(!dest)) {
|
||||
dest = &tsk->peer;
|
||||
if (!syn || dest->family != AF_TIPC)
|
||||
if (!syn && dest->family != AF_TIPC)
|
||||
return -EDESTADDRREQ;
|
||||
}
|
||||
|
||||
@ -1961,6 +1963,8 @@ static void tipc_sk_proto_rcv(struct sock *sk,
|
||||
return;
|
||||
case SOCK_WAKEUP:
|
||||
tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
|
||||
/* coupled with smp_rmb() in tipc_wait_for_cond() */
|
||||
smp_wmb();
|
||||
tsk->cong_link_cnt--;
|
||||
wakeup = true;
|
||||
break;
|
||||
|
@ -91,7 +91,7 @@ verify_reqs()
|
||||
if [ "$TEST_REQS_FW_SYSFS_FALLBACK" = "yes" ]; then
|
||||
if [ ! "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
|
||||
echo "usermode helper disabled so ignoring test"
|
||||
exit $ksft_skip
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user