android_kernel_xiaomi_sm7250/block/elevator.c
UtsavBalar1231 61bba85e05 Merge tag 'ASB-2022-03-05_4.19-stable' of https://github.com/aosp-mirror/kernel_common into android12-base
https://source.android.com/security/bulletin/2022-03-01
CVE-2020-29368
CVE-2021-39685
CVE-2021-39686
CVE-2021-39698
CVE-2021-3655

* tag 'ASB-2022-03-05_4.19-stable' of https://github.com/aosp-mirror/kernel_common:
  Linux 4.19.232
  tty: n_gsm: fix encoding of control signal octet bit DV
  xhci: Prevent futile URB re-submissions due to incorrect return value.
  xhci: re-initialize the HC during resume if HCE was set
  usb: dwc3: gadget: Let the interrupt handler disable bottom halves.
  usb: dwc3: pci: Fix Bay Trail phy GPIO mappings
  USB: serial: option: add Telit LE910R1 compositions
  USB: serial: option: add support for DW5829e
  tracefs: Set the group ownership in apply_options() not parse_options()
  USB: gadget: validate endpoint index for xilinx udc
  usb: gadget: rndis: add spinlock for rndis response list
  Revert "USB: serial: ch341: add new Product ID for CH341A"
  ata: pata_hpt37x: disable primary channel on HPT371
  iio: adc: men_z188_adc: Fix a resource leak in an error handling path
  tracing: Have traceon and traceoff trigger honor the instance
  fget: clarify and improve __fget_files() implementation
  memblock: use kfree() to release kmalloced memblock regions
  Revert "drm/nouveau/pmu/gm200-: avoid touching PMU outside of DEVINIT/PREOS/ACR"
  gpio: tegra186: Fix chip_data type confusion
  tty: n_gsm: fix proper link termination after failed open
  RDMA/ib_srp: Fix a deadlock
  configfs: fix a race in configfs_{,un}register_subsystem()
  net/mlx5e: Fix wrong return value on ioctl EEPROM query failure
  drm/edid: Always set RGB444
  openvswitch: Fix setting ipv6 fields causing hw csum failure
  gso: do not skip outer ip header in case of ipip and net_failover
  tipc: Fix end of loop tests for list_for_each_entry()
  net: __pskb_pull_tail() & pskb_carve_frag_list() drop_monitor friends
  ping: remove pr_err from ping_lookup
  USB: zaurus: support another broken Zaurus
  sr9700: sanity check for packet length
  parisc/unaligned: Fix ldw() and stw() unalignment handlers
  parisc/unaligned: Fix fldd and fstd unaligned handlers on 32-bit kernel
  vhost/vsock: don't check owner in vhost_vsock_stop() while releasing
  cgroup/cpuset: Fix a race between cpuset_attach() and cpu hotplug
  Linux 4.19.231
  net: macb: Align the dma and coherent dma masks
  net: usb: qmi_wwan: Add support for Dell DW5829e
  tracing: Fix tp_printk option related with tp_printk_stop_on_boot
  ata: libata-core: Disable TRIM on M88V29
  kconfig: let 'shell' return enough output for deep path names
  arm64: dts: meson-gx: add ATF BL32 reserved-memory region
  netfilter: conntrack: don't refresh sctp entries in closed state
  irqchip/sifive-plic: Add missing thead,c900-plic match string
  ARM: OMAP2+: hwmod: Add of_node_put() before break
  KVM: x86/pmu: Use AMD64_RAW_EVENT_MASK for PERF_TYPE_RAW
  Drivers: hv: vmbus: Fix memory leak in vmbus_add_channel_kobj
  Drivers: hv: vmbus: Expose monitor data only when monitor pages are used
  mtd: rawnand: brcmnand: Fixed incorrect sub-page ECC status
  mtd: rawnand: brcmnand: Refactored code to introduce helper functions
  lib/iov_iter: initialize "flags" in new pipe_buffer
  i2c: brcmstb: fix support for DSL and CM variants
  dmaengine: sh: rcar-dmac: Check for error num after setting mask
  net: sched: limit TC_ACT_REPEAT loops
  EDAC: Fix calculation of returned address and next offset in edac_align_ptr()
  mtd: rawnand: qcom: Fix clock sequencing in qcom_nandc_probe()
  NFS: Do not report writeback errors in nfs_getattr()
  NFS: LOOKUP_DIRECTORY is also ok with symlinks
  block/wbt: fix negative inflight counter when remove scsi device
  ext4: check for out-of-order index extents in ext4_valid_extent_entries()
  powerpc/lib/sstep: fix 'ptesync' build error
  ASoC: ops: Fix stereo change notifications in snd_soc_put_volsw_range()
  ASoC: ops: Fix stereo change notifications in snd_soc_put_volsw()
  ALSA: hda: Fix missing codec probe on Shenker Dock 15
  ALSA: hda: Fix regression on forced probe mask option
  libsubcmd: Fix use-after-free for realloc(..., 0)
  bonding: fix data-races around agg_select_timer
  drop_monitor: fix data-race in dropmon_net_event / trace_napi_poll_hit
  ping: fix the dif and sdif check in ping_lookup
  net: ieee802154: ca8210: Fix lifs/sifs periods
  net: dsa: lan9303: fix reset on probe
  iwlwifi: pcie: gen2: fix locking when "HW not ready"
  iwlwifi: pcie: fix locking when "HW not ready"
  vsock: remove vsock from connected table when connect is interrupted by a signal
  mmc: block: fix read single on recovery logic
  taskstats: Cleanup the use of task->exit_code
  xfrm: Don't accidentally set RTO_ONLINK in decode_session4()
  drm/radeon: Fix backlight control on iMac 12,1
  iwlwifi: fix use-after-free
  Revert "module, async: async_synchronize_full() on module init iff async is used"
  nvme-rdma: fix possible use-after-free in transport error_recovery work
  nvme: fix a possible use-after-free in controller reset during load
  quota: make dquot_quota_sync return errors from ->sync_fs
  vfs: make freeze_super abort when sync_filesystem returns error
  ax25: improve the incomplete fix to avoid UAF and NPD bugs
  selftests/zram: Adapt the situation that /dev/zram0 is being used
  selftests/zram01.sh: Fix compression ratio calculation
  selftests/zram: Skip max_comp_streams interface on newer kernel
  net: ieee802154: at86rf230: Stop leaking skb's
  btrfs: send: in case of IO error log it
  parisc: Fix sglist access in ccio-dma.c
  parisc: Fix data TLB miss in sba_unmap_sg
  serial: parisc: GSC: fix build when IOSAPIC is not set
  net: usb: ax88179_178a: Fix out-of-bounds accesses in RX fixup
  Makefile.extrawarn: Move -Wunaligned-access to W=1
  Linux 4.19.230
  perf: Fix list corruption in perf_cgroup_switch()
  hwmon: (dell-smm) Speed up setting of fan speed
  seccomp: Invalidate seccomp mode to catch death failures
  USB: serial: cp210x: add CPI Bulk Coin Recycler id
  USB: serial: cp210x: add NCR Retail IO box id
  USB: serial: ch341: add support for GW Instek USB2.0-Serial devices
  USB: serial: option: add ZTE MF286D modem
  USB: serial: ftdi_sio: add support for Brainboxes US-159/235/320
  usb: gadget: rndis: check size of RNDIS_MSG_SET command
  USB: gadget: validate interface OS descriptor requests
  usb: dwc3: gadget: Prevent core from processing stale TRBs
  usb: ulpi: Call of_node_put correctly
  usb: ulpi: Move of_node_put to ulpi_dev_release
  n_tty: wake up poll(POLLRDNORM) on receiving data
  vt_ioctl: add array_index_nospec to VT_ACTIVATE
  vt_ioctl: fix array_index_nospec in vt_setactivate
  net: amd-xgbe: disable interrupts during pci removal
  tipc: rate limit warning for received illegal binding update
  veth: fix races around rq->rx_notify_masked
  net: fix a memleak when uncloning an skb dst and its metadata
  net: do not keep the dst cache when uncloning an skb dst and its metadata
  ipmr,ip6mr: acquire RTNL before calling ip[6]mr_free_table() on failure path
  bonding: pair enable_port with slave_arr_updates
  ixgbevf: Require large buffers for build_skb on 82599VF
  usb: f_fs: Fix use-after-free for epfile
  ARM: dts: imx6qdl-udoo: Properly describe the SD card detect
  staging: fbtft: Fix error path in fbtft_driver_module_init()
  ARM: dts: meson: Fix the UART compatible strings
  perf probe: Fix ppc64 'perf probe add events failed' case
  net: bridge: fix stale eth hdr pointer in br_dev_xmit
  ARM: dts: imx23-evk: Remove MX23_PAD_SSP1_DETECT from hog group
  bpf: Add kconfig knob for disabling unpriv bpf by default
  net: stmmac: dwmac-sun8i: use return val of readl_poll_timeout()
  usb: dwc2: gadget: don't try to disable ep0 in dwc2_hsotg_suspend
  scsi: target: iscsi: Make sure the np under each tpg is unique
  net: sched: Clarify error message when qdisc kind is unknown
  NFSv4 expose nfs_parse_server_name function
  NFSv4 remove zero number of fs_locations entries error check
  NFSv4.1: Fix uninitialised variable in devicenotify
  nfs: nfs4clinet: check the return value of kstrdup()
  NFSv4 only print the label when its queried
  NFSD: Fix offset type in I/O trace points
  NFSD: Clamp WRITE offsets
  NFS: Fix initialisation of nfs_client cl_flags field
  net: phy: marvell: Fix MDI-x polarity setting in 88e1118-compatible PHYs
  mmc: sdhci-of-esdhc: Check for error num after setting mask
  ima: Allow template selection with ima_template[_fmt]= after ima_hash=
  ima: Remove ima_policy file before directory
  integrity: check the return value of audit_log_start()
  FROMGIT: f2fs: avoid EINVAL by SBI_NEED_FSCK when pinning a file
  Revert "tracefs: Have tracefs directories not set OTH permission bits by default"
  ANDROID: GKI: Enable CONFIG_SERIAL_8250_RUNTIME_UARTS=0
  Linux 4.19.229
  tipc: improve size validations for received domain records
  moxart: fix potential use-after-free on remove path
  cgroup-v1: Require capabilities to set release_agent
  Linux 4.19.228
  ext4: fix error handling in ext4_restore_inline_data()
  EDAC/xgene: Fix deferred probing
  EDAC/altera: Fix deferred probing
  rtc: cmos: Evaluate century appropriate
  selftests: futex: Use variable MAKE instead of make
  nfsd: nfsd4_setclientid_confirm mistakenly expires confirmed client.
  scsi: bnx2fc: Make bnx2fc_recv_frame() mp safe
  ASoC: max9759: fix underflow in speaker_gain_control_put()
  ASoC: cpcap: Check for NULL pointer after calling of_get_child_by_name
  ASoC: fsl: Add missing error handling in pcm030_fabric_probe
  drm/i915/overlay: Prevent divide by zero bugs in scaling
  net: stmmac: ensure PTP time register reads are consistent
  net: macsec: Verify that send_sci is on when setting Tx sci explicitly
  net: ieee802154: Return meaningful error codes from the netlink helpers
  net: ieee802154: ca8210: Stop leaking skb's
  net: ieee802154: mcr20a: Fix lifs/sifs periods
  net: ieee802154: hwsim: Ensure proper channel selection at probe time
  spi: meson-spicc: add IRQ check in meson_spicc_probe
  spi: mediatek: Avoid NULL pointer crash in interrupt
  spi: bcm-qspi: check for valid cs before applying chip select
  iommu/amd: Fix loop timeout issue in iommu_ga_log_enable()
  iommu/vt-d: Fix potential memory leak in intel_setup_irq_remapping()
  RDMA/mlx4: Don't continue event handler after memory allocation failure
  Revert "ASoC: mediatek: Check for error clk pointer"
  block: bio-integrity: Advance seed correctly for larger interval sizes
  drm/nouveau: fix off by one in BIOS boundary checking
  ALSA: hda/realtek: Fix silent output on Gigabyte X570 Aorus Xtreme after reboot from Windows
  ALSA: hda/realtek: Fix silent output on Gigabyte X570S Aorus Master (newer chipset)
  ALSA: hda/realtek: Add missing fixup-model entry for Gigabyte X570 ALC1220 quirks
  ASoC: ops: Reject out of bounds values in snd_soc_put_xr_sx()
  ASoC: ops: Reject out of bounds values in snd_soc_put_volsw_sx()
  ASoC: ops: Reject out of bounds values in snd_soc_put_volsw()
  audit: improve audit queue handling when "audit=1" on cmdline
  af_packet: fix data-race in packet_setsockopt / packet_setsockopt
  rtnetlink: make sure to refresh master_dev/m_ops in __rtnl_newlink()
  net: amd-xgbe: Fix skb data length underflow
  net: amd-xgbe: ensure to reset the tx_timer_active flag
  ipheth: fix EOVERFLOW in ipheth_rcvbulk_callback
  tcp: fix possible socket leaks in internal pacing mode
  netfilter: nat: limit port clash resolution attempts
  netfilter: nat: remove l4 protocol port rovers
  ipv4: tcp: send zero IPID in SYNACK messages
  ipv4: raw: lock the socket in raw_bind()
  yam: fix a memory leak in yam_siocdevprivate()
  ibmvnic: don't spin in tasklet
  ibmvnic: init ->running_cap_crqs early
  phylib: fix potential use-after-free
  NFS: Ensure the server has an up to date ctime before renaming
  NFS: Ensure the server has an up to date ctime before hardlinking
  ipv6: annotate accesses to fn->fn_sernum
  drm/msm/dsi: invalid parameter check in msm_dsi_phy_enable
  drm/msm: Fix wrong size calculation
  net-procfs: show net devices bound packet types
  NFSv4: nfs_atomic_open() can race when looking up a non-regular file
  NFSv4: Handle case where the lookup of a directory fails
  hwmon: (lm90) Reduce maximum conversion rate for G781
  ipv4: avoid using shared IP generator for connected sockets
  ping: fix the sk_bound_dev_if match in ping_lookup
  net: fix information leakage in /proc/net/ptype
  ipv6_tunnel: Rate limit warning messages
  scsi: bnx2fc: Flush destroy_work queue before calling bnx2fc_interface_put()
  rpmsg: char: Fix race between the release of rpmsg_eptdev and cdev
  rpmsg: char: Fix race between the release of rpmsg_ctrldev and cdev
  i40e: fix unsigned stat widths
  i40e: Fix queues reservation for XDP
  i40e: Fix issue when maximum queues is exceeded
  i40e: Increase delay to 1 s after global EMP reset
  powerpc/32: Fix boot failure with GCC latent entropy plugin
  net: sfp: ignore disabled SFP node
  usb: typec: tcpm: Do not disconnect while receiving VBUS off
  USB: core: Fix hang in usb_kill_urb by adding memory barriers
  usb: gadget: f_sourcesink: Fix isoc transfer for USB_SPEED_SUPER_PLUS
  usb: common: ulpi: Fix crash in ulpi_match()
  usb-storage: Add unusual-devs entry for VL817 USB-SATA bridge
  tty: Add support for Brainboxes UC cards.
  tty: n_gsm: fix SW flow control encoding/handling
  serial: stm32: fix software flow control transfer
  serial: 8250: of: Fix mapped region size when using reg-offset property
  netfilter: nft_payload: do not update layer 4 checksum when mangling fragments
  drm/etnaviv: relax submit size limits
  PM: wakeup: simplify the output logic of pm_show_wakelocks()
  udf: Fix NULL ptr deref when converting from inline format
  udf: Restore i_lenAlloc when inode expansion fails
  scsi: zfcp: Fix failed recovery on gone remote port with non-NPIV FCP devices
  s390/hypfs: include z/VM guests with access control group set
  Bluetooth: refactor malicious adv data check
  ANDROID: Increase x86 cmdline size to 4k

Change-Id: Icc3c578b223a53feb469666071df2e1715fa8698
Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>

Conflicts:
	drivers/usb/dwc3/gadget.c
	drivers/usb/gadget/function/f_fs.c
2022-03-08 06:44:12 +05:30

1202 lines
28 KiB
C

/*
* Block device elevator/IO-scheduler.
*
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
*
* 30042000 Jens Axboe <axboe@kernel.dk> :
*
* Split the elevator a bit so that it is possible to choose a different
* one or even write a new "plug in". There are three pieces:
* - elevator_fn, inserts a new request in the queue list
* - elevator_merge_fn, decides whether a new buffer can be merged with
* an existing request
* - elevator_dequeue_fn, called when a request is taken off the active list
*
* 20082000 Dave Jones <davej@suse.de> :
* Removed tests for max-bomb-segments, which was breaking elvtune
* when run without -bN
*
* Jens:
* - Rework again to work with bio instead of buffer_heads
* - loose bi_dev comparisons, partition handling is right now
* - completely modularize elevator setup and teardown
*
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/blktrace_api.h>
#include <linux/hash.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
#include <trace/events/block.h>
#include "blk.h"
#include "blk-mq-sched.h"
#include "blk-wbt.h"
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
/*
* Merge hash stuff.
*/
#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
/*
* Query io scheduler to see if the current process issuing bio may be
* merged with rq.
*/
static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
{
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
if (e->uses_mq && e->type->ops.mq.allow_merge)
return e->type->ops.mq.allow_merge(q, rq, bio);
else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
return 1;
}
/*
* can we safely merge with this request?
*/
bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
{
if (!blk_rq_merge_ok(rq, bio))
return false;
if (!elv_iosched_allow_bio_merge(rq, bio))
return false;
return true;
}
EXPORT_SYMBOL(elv_bio_merge_ok);
static bool elevator_match(const struct elevator_type *e, const char *name)
{
if (!strcmp(e->elevator_name, name))
return true;
if (e->elevator_alias && !strcmp(e->elevator_alias, name))
return true;
return false;
}
/*
* Return scheduler with name 'name' and with matching 'mq capability
*/
static struct elevator_type *elevator_find(const char *name, bool mq)
{
struct elevator_type *e;
list_for_each_entry(e, &elv_list, list) {
if (elevator_match(e, name) && (mq == e->uses_mq))
return e;
}
return NULL;
}
static void elevator_put(struct elevator_type *e)
{
module_put(e->elevator_owner);
}
static struct elevator_type *elevator_get(struct request_queue *q,
const char *name, bool try_loading)
{
struct elevator_type *e;
spin_lock(&elv_list_lock);
e = elevator_find(name, q->mq_ops != NULL);
if (!e && try_loading) {
spin_unlock(&elv_list_lock);
request_module("%s-iosched", name);
spin_lock(&elv_list_lock);
e = elevator_find(name, q->mq_ops != NULL);
}
if (e && !try_module_get(e->elevator_owner))
e = NULL;
spin_unlock(&elv_list_lock);
return e;
}
static char chosen_elevator[ELV_NAME_MAX];
static int __init elevator_setup(char *str)
{
/*
* Be backwards-compatible with previous kernels, so users
* won't get the wrong elevator.
*/
strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
return 1;
}
__setup("elevator=", elevator_setup);
/* called during boot to load the elevator chosen by the elevator param */
void __init load_default_elevator_module(void)
{
struct elevator_type *e;
if (!chosen_elevator[0])
return;
/*
* Boot parameter is deprecated, we haven't supported that for MQ.
* Only look for non-mq schedulers from here.
*/
spin_lock(&elv_list_lock);
e = elevator_find(chosen_elevator, false);
spin_unlock(&elv_list_lock);
if (!e)
request_module("%s-iosched", chosen_elevator);
}
static struct kobj_type elv_ktype;
struct elevator_queue *elevator_alloc(struct request_queue *q,
struct elevator_type *e)
{
struct elevator_queue *eq;
eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
if (unlikely(!eq))
return NULL;
eq->type = e;
kobject_init(&eq->kobj, &elv_ktype);
mutex_init(&eq->sysfs_lock);
hash_init(eq->hash);
eq->uses_mq = e->uses_mq;
return eq;
}
EXPORT_SYMBOL(elevator_alloc);
static void elevator_release(struct kobject *kobj)
{
struct elevator_queue *e;
e = container_of(kobj, struct elevator_queue, kobj);
elevator_put(e->type);
kfree(e);
}
/*
* Use the default elevator specified by config boot param for non-mq devices,
* or by config option. Don't try to load modules as we could be running off
* async and request_module() isn't allowed from async.
*/
int elevator_init(struct request_queue *q)
{
struct elevator_type *e = NULL;
int err = 0;
/*
* q->sysfs_lock must be held to provide mutual exclusion between
* elevator_switch() and here.
*/
mutex_lock(&q->sysfs_lock);
if (unlikely(q->elevator))
goto out_unlock;
if (*chosen_elevator) {
e = elevator_get(q, chosen_elevator, false);
if (!e)
printk(KERN_ERR "I/O scheduler %s not found\n",
chosen_elevator);
}
if (!e)
e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
if (!e) {
printk(KERN_ERR
"Default I/O scheduler not found. Using noop.\n");
e = elevator_get(q, "noop", false);
}
err = e->ops.sq.elevator_init_fn(q, e);
if (err)
elevator_put(e);
out_unlock:
mutex_unlock(&q->sysfs_lock);
return err;
}
void elevator_exit(struct request_queue *q, struct elevator_queue *e)
{
mutex_lock(&e->sysfs_lock);
if (e->uses_mq && e->type->ops.mq.exit_sched)
blk_mq_exit_sched(q, e);
else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
e->type->ops.sq.elevator_exit_fn(e);
mutex_unlock(&e->sysfs_lock);
kobject_put(&e->kobj);
}
static inline void __elv_rqhash_del(struct request *rq)
{
hash_del(&rq->hash);
rq->rq_flags &= ~RQF_HASHED;
}
void elv_rqhash_del(struct request_queue *q, struct request *rq)
{
if (ELV_ON_HASH(rq))
__elv_rqhash_del(rq);
}
EXPORT_SYMBOL_GPL(elv_rqhash_del);
void elv_rqhash_add(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
BUG_ON(ELV_ON_HASH(rq));
hash_add(e->hash, &rq->hash, rq_hash_key(rq));
rq->rq_flags |= RQF_HASHED;
}
EXPORT_SYMBOL_GPL(elv_rqhash_add);
void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
{
__elv_rqhash_del(rq);
elv_rqhash_add(q, rq);
}
struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
{
struct elevator_queue *e = q->elevator;
struct hlist_node *next;
struct request *rq;
hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
BUG_ON(!ELV_ON_HASH(rq));
if (unlikely(!rq_mergeable(rq))) {
__elv_rqhash_del(rq);
continue;
}
if (rq_hash_key(rq) == offset)
return rq;
}
return NULL;
}
/*
* RB-tree support functions for inserting/lookup/removal of requests
* in a sorted RB tree.
*/
void elv_rb_add(struct rb_root *root, struct request *rq)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct request *__rq;
while (*p) {
parent = *p;
__rq = rb_entry(parent, struct request, rb_node);
if (blk_rq_pos(rq) < blk_rq_pos(__rq))
p = &(*p)->rb_left;
else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
p = &(*p)->rb_right;
}
rb_link_node(&rq->rb_node, parent, p);
rb_insert_color(&rq->rb_node, root);
}
EXPORT_SYMBOL(elv_rb_add);
void elv_rb_del(struct rb_root *root, struct request *rq)
{
BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
rb_erase(&rq->rb_node, root);
RB_CLEAR_NODE(&rq->rb_node);
}
EXPORT_SYMBOL(elv_rb_del);
struct request *elv_rb_find(struct rb_root *root, sector_t sector)
{
struct rb_node *n = root->rb_node;
struct request *rq;
while (n) {
rq = rb_entry(n, struct request, rb_node);
if (sector < blk_rq_pos(rq))
n = n->rb_left;
else if (sector > blk_rq_pos(rq))
n = n->rb_right;
else
return rq;
}
return NULL;
}
EXPORT_SYMBOL(elv_rb_find);
/*
* Insert rq into dispatch queue of q. Queue lock must be held on
* entry. rq is sort instead into the dispatch queue. To be used by
* specific elevators.
*/
void elv_dispatch_sort(struct request_queue *q, struct request *rq)
{
sector_t boundary;
struct list_head *entry;
if (q->last_merge == rq)
q->last_merge = NULL;
elv_rqhash_del(q, rq);
q->nr_sorted--;
boundary = q->end_sector;
list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry);
if (req_op(rq) != req_op(pos))
break;
if (rq_data_dir(rq) != rq_data_dir(pos))
break;
if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
break;
if (blk_rq_pos(rq) >= boundary) {
if (blk_rq_pos(pos) < boundary)
continue;
} else {
if (blk_rq_pos(pos) >= boundary)
break;
}
if (blk_rq_pos(rq) >= blk_rq_pos(pos))
break;
}
list_add(&rq->queuelist, entry);
}
EXPORT_SYMBOL(elv_dispatch_sort);
/*
* Insert rq into dispatch queue of q. Queue lock must be held on
* entry. rq is added to the back of the dispatch queue. To be used by
* specific elevators.
*/
void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
{
if (q->last_merge == rq)
q->last_merge = NULL;
elv_rqhash_del(q, rq);
q->nr_sorted--;
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
list_add_tail(&rq->queuelist, &q->queue_head);
}
EXPORT_SYMBOL(elv_dispatch_add_tail);
enum elv_merge elv_merge(struct request_queue *q, struct request **req,
struct bio *bio)
{
struct elevator_queue *e = q->elevator;
struct request *__rq;
/*
* Levels of merges:
* nomerges: No merges at all attempted
* noxmerges: Only simple one-hit cache try
* merges: All merge tries attempted
*/
if (blk_queue_nomerges(q) || !bio_mergeable(bio))
return ELEVATOR_NO_MERGE;
/*
* First try one-hit cache.
*/
if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
enum elv_merge ret = blk_try_merge(q->last_merge, bio);
if (ret != ELEVATOR_NO_MERGE) {
*req = q->last_merge;
return ret;
}
}
if (blk_queue_noxmerges(q))
return ELEVATOR_NO_MERGE;
/*
* See if our hash lookup can find a potential backmerge.
*/
__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
if (__rq && elv_bio_merge_ok(__rq, bio)) {
*req = __rq;
return ELEVATOR_BACK_MERGE;
}
if (e->uses_mq && e->type->ops.mq.request_merge)
return e->type->ops.mq.request_merge(q, req, bio);
else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
return e->type->ops.sq.elevator_merge_fn(q, req, bio);
return ELEVATOR_NO_MERGE;
}
/*
* Attempt to do an insertion back merge. Only check for the case where
* we can append 'rq' to an existing request, so we can throw 'rq' away
* afterwards.
*
* Returns true if we merged, false otherwise
*/
bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
{
struct request *__rq;
bool ret;
if (blk_queue_nomerges(q))
return false;
/*
* First try one-hit cache.
*/
if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
return true;
if (blk_queue_noxmerges(q))
return false;
ret = false;
/*
* See if our hash lookup can find a potential backmerge.
*/
while (1) {
__rq = elv_rqhash_find(q, blk_rq_pos(rq));
if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
break;
/* The merged request could be merged with others, try again */
ret = true;
rq = __rq;
}
return ret;
}
void elv_merged_request(struct request_queue *q, struct request *rq,
enum elv_merge type)
{
struct elevator_queue *e = q->elevator;
if (e->uses_mq && e->type->ops.mq.request_merged)
e->type->ops.mq.request_merged(q, rq, type);
else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
e->type->ops.sq.elevator_merged_fn(q, rq, type);
if (type == ELEVATOR_BACK_MERGE)
elv_rqhash_reposition(q, rq);
q->last_merge = rq;
}
void elv_merge_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
struct elevator_queue *e = q->elevator;
bool next_sorted = false;
if (e->uses_mq && e->type->ops.mq.requests_merged)
e->type->ops.mq.requests_merged(q, rq, next);
else if (e->type->ops.sq.elevator_merge_req_fn) {
next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
if (next_sorted)
e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
}
elv_rqhash_reposition(q, rq);
if (next_sorted) {
elv_rqhash_del(q, next);
q->nr_sorted--;
}
q->last_merge = rq;
}
void elv_bio_merged(struct request_queue *q, struct request *rq,
struct bio *bio)
{
struct elevator_queue *e = q->elevator;
if (WARN_ON_ONCE(e->uses_mq))
return;
if (e->type->ops.sq.elevator_bio_merged_fn)
e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
}
#ifdef CONFIG_PM
static void blk_pm_requeue_request(struct request *rq)
{
if (rq->q->dev && !(rq->rq_flags & RQF_PM) &&
(rq->rq_flags & (RQF_PM_ADDED | RQF_FLUSH_SEQ))) {
rq->rq_flags &= ~RQF_PM_ADDED;
rq->q->nr_pending--;
}
}
static void blk_pm_add_request(struct request_queue *q, struct request *rq)
{
if (q->dev && !(rq->rq_flags & RQF_PM)) {
rq->rq_flags |= RQF_PM_ADDED;
if (q->nr_pending++ == 0 &&
(q->rpm_status == RPM_SUSPENDED ||
q->rpm_status == RPM_SUSPENDING))
pm_request_resume(q->dev);
}
}
#else
static inline void blk_pm_requeue_request(struct request *rq) {}
static inline void blk_pm_add_request(struct request_queue *q,
struct request *rq)
{
}
#endif
void elv_requeue_request(struct request_queue *q, struct request *rq)
{
/*
* it already went through dequeue, we need to decrement the
* in_flight count again
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
if (rq->rq_flags & RQF_SORTED)
elv_deactivate_rq(q, rq);
}
rq->rq_flags &= ~RQF_STARTED;
blk_pm_requeue_request(rq);
__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
}
void elv_drain_elevator(struct request_queue *q)
{
struct elevator_queue *e = q->elevator;
static int printed;
if (WARN_ON_ONCE(e->uses_mq))
return;
lockdep_assert_held(q->queue_lock);
while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
;
if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
printk(KERN_ERR "%s: forced dispatching is broken "
"(nr_sorted=%u), please report this\n",
q->elevator->type->elevator_name, q->nr_sorted);
}
}
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
trace_block_rq_insert(q, rq);
blk_pm_add_request(q, rq);
rq->q = q;
if (rq->rq_flags & RQF_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (!blk_rq_is_passthrough(rq)) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
} else if (!(rq->rq_flags & RQF_ELVPRIV) &&
(where == ELEVATOR_INSERT_SORT ||
where == ELEVATOR_INSERT_SORT_MERGE))
where = ELEVATOR_INSERT_BACK;
switch (where) {
case ELEVATOR_INSERT_REQUEUE:
case ELEVATOR_INSERT_FRONT:
rq->rq_flags |= RQF_SOFTBARRIER;
list_add(&rq->queuelist, &q->queue_head);
break;
case ELEVATOR_INSERT_BACK:
rq->rq_flags |= RQF_SOFTBARRIER;
elv_drain_elevator(q);
list_add_tail(&rq->queuelist, &q->queue_head);
/*
* We kick the queue here for the following reasons.
* - The elevator might have returned NULL previously
* to delay requests and returned them now. As the
* queue wasn't empty before this request, ll_rw_blk
* won't run the queue on return, resulting in hang.
* - Usually, back inserted requests won't be merged
* with anything. There's no point in delaying queue
* processing.
*/
__blk_run_queue(q);
break;
case ELEVATOR_INSERT_SORT_MERGE:
/*
* If we succeed in merging this request with one in the
* queue already, we are done - rq has now been freed,
* so no need to do anything further.
*/
if (elv_attempt_insert_merge(q, rq))
break;
/* fall through */
case ELEVATOR_INSERT_SORT:
BUG_ON(blk_rq_is_passthrough(rq));
rq->rq_flags |= RQF_SORTED;
q->nr_sorted++;
if (rq_mergeable(rq)) {
elv_rqhash_add(q, rq);
if (!q->last_merge)
q->last_merge = rq;
}
/*
* Some ioscheds (cfq) run q->request_fn directly, so
* rq cannot be accessed after calling
* elevator_add_req_fn.
*/
q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
break;
case ELEVATOR_INSERT_FLUSH:
rq->rq_flags |= RQF_SOFTBARRIER;
blk_insert_flush(rq);
break;
default:
printk(KERN_ERR "%s: bad insertion point %d\n",
__func__, where);
BUG();
}
}
EXPORT_SYMBOL(__elv_add_request);
void elv_add_request(struct request_queue *q, struct request *rq, int where)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__elv_add_request(q, rq, where);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(elv_add_request);
struct request *elv_latter_request(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
if (e->uses_mq && e->type->ops.mq.next_request)
return e->type->ops.mq.next_request(q, rq);
else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
return e->type->ops.sq.elevator_latter_req_fn(q, rq);
return NULL;
}
struct request *elv_former_request(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
if (e->uses_mq && e->type->ops.mq.former_request)
return e->type->ops.mq.former_request(q, rq);
if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
return e->type->ops.sq.elevator_former_req_fn(q, rq);
return NULL;
}
int elv_set_request(struct request_queue *q, struct request *rq,
struct bio *bio, gfp_t gfp_mask)
{
struct elevator_queue *e = q->elevator;
if (WARN_ON_ONCE(e->uses_mq))
return 0;
if (e->type->ops.sq.elevator_set_req_fn)
return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
return 0;
}
void elv_put_request(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
if (WARN_ON_ONCE(e->uses_mq))
return;
if (e->type->ops.sq.elevator_put_req_fn)
e->type->ops.sq.elevator_put_req_fn(rq);
}
int elv_may_queue(struct request_queue *q, unsigned int op)
{
struct elevator_queue *e = q->elevator;
if (WARN_ON_ONCE(e->uses_mq))
return 0;
if (e->type->ops.sq.elevator_may_queue_fn)
return e->type->ops.sq.elevator_may_queue_fn(q, op);
return ELV_MQUEUE_MAY;
}
void elv_completed_request(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
if (WARN_ON_ONCE(e->uses_mq))
return;
/*
* request is released from the driver, io must be done
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
if ((rq->rq_flags & RQF_SORTED) &&
e->type->ops.sq.elevator_completed_req_fn)
e->type->ops.sq.elevator_completed_req_fn(q, rq);
}
}
#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
static ssize_t
elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct elv_fs_entry *entry = to_elv(attr);
struct elevator_queue *e;
ssize_t error;
if (!entry->show)
return -EIO;
e = container_of(kobj, struct elevator_queue, kobj);
mutex_lock(&e->sysfs_lock);
error = e->type ? entry->show(e, page) : -ENOENT;
mutex_unlock(&e->sysfs_lock);
return error;
}
static ssize_t
elv_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct elv_fs_entry *entry = to_elv(attr);
struct elevator_queue *e;
ssize_t error;
if (!entry->store)
return -EIO;
e = container_of(kobj, struct elevator_queue, kobj);
mutex_lock(&e->sysfs_lock);
error = e->type ? entry->store(e, page, length) : -ENOENT;
mutex_unlock(&e->sysfs_lock);
return error;
}
static const struct sysfs_ops elv_sysfs_ops = {
.show = elv_attr_show,
.store = elv_attr_store,
};
static struct kobj_type elv_ktype = {
.sysfs_ops = &elv_sysfs_ops,
.release = elevator_release,
};
int elv_register_queue(struct request_queue *q)
{
struct elevator_queue *e = q->elevator;
int error;
lockdep_assert_held(&q->sysfs_lock);
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
if (!error) {
struct elv_fs_entry *attr = e->type->elevator_attrs;
if (attr) {
while (attr->attr.name) {
if (sysfs_create_file(&e->kobj, &attr->attr))
break;
attr++;
}
}
kobject_uevent(&e->kobj, KOBJ_ADD);
e->registered = 1;
if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
e->type->ops.sq.elevator_registered_fn(q);
else if (e->uses_mq && e->type->ops.mq.elevator_registered_fn)
e->type->ops.mq.elevator_registered_fn(q);
}
return error;
}
void elv_unregister_queue(struct request_queue *q)
{
lockdep_assert_held(&q->sysfs_lock);
if (q) {
struct elevator_queue *e = q->elevator;
kobject_uevent(&e->kobj, KOBJ_REMOVE);
kobject_del(&e->kobj);
e->registered = 0;
}
}
int elv_register(struct elevator_type *e)
{
char *def = "";
/* create icq_cache if requested */
if (e->icq_size) {
if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
WARN_ON(e->icq_align < __alignof__(struct io_cq)))
return -EINVAL;
snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
"%s_io_cq", e->elevator_name);
e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
e->icq_align, 0, NULL);
if (!e->icq_cache)
return -ENOMEM;
}
/* register, don't allow duplicate names */
spin_lock(&elv_list_lock);
if (elevator_find(e->elevator_name, e->uses_mq)) {
spin_unlock(&elv_list_lock);
kmem_cache_destroy(e->icq_cache);
return -EBUSY;
}
list_add_tail(&e->list, &elv_list);
spin_unlock(&elv_list_lock);
/* print pretty message */
if (elevator_match(e, chosen_elevator) ||
(!*chosen_elevator &&
elevator_match(e, CONFIG_DEFAULT_IOSCHED)))
def = " (default)";
printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
def);
return 0;
}
EXPORT_SYMBOL_GPL(elv_register);
void elv_unregister(struct elevator_type *e)
{
/* unregister */
spin_lock(&elv_list_lock);
list_del_init(&e->list);
spin_unlock(&elv_list_lock);
/*
* Destroy icq_cache if it exists. icq's are RCU managed. Make
* sure all RCU operations are complete before proceeding.
*/
if (e->icq_cache) {
rcu_barrier();
kmem_cache_destroy(e->icq_cache);
e->icq_cache = NULL;
}
}
EXPORT_SYMBOL_GPL(elv_unregister);
int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e)
{
int ret;
lockdep_assert_held(&q->sysfs_lock);
if (q->elevator) {
if (q->elevator->registered)
elv_unregister_queue(q);
ioc_clear_queue(q);
elevator_exit(q, q->elevator);
}
ret = blk_mq_init_sched(q, new_e);
if (ret)
goto out;
if (new_e) {
ret = elv_register_queue(q);
if (ret) {
elevator_exit(q, q->elevator);
goto out;
}
}
if (new_e)
blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
else
blk_add_trace_msg(q, "elv switch: none");
out:
return ret;
}
/*
* For blk-mq devices, we default to using mq-deadline, if available, for single
* queue devices. If deadline isn't available OR we have multiple queues,
* default to "none".
*/
int elevator_init_mq(struct request_queue *q)
{
struct elevator_type *e;
int err = 0;
if (q->nr_hw_queues != 1)
return 0;
WARN_ON_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags));
if (unlikely(q->elevator))
goto out;
if (IS_ENABLED(CONFIG_IOSCHED_BFQ)) {
e = elevator_get(q, "bfq", false);
if (!e)
goto out;
} else {
e = elevator_get(q, "mq-deadline", false);
if (!e)
goto out;
}
err = blk_mq_init_sched(q, e);
if (err)
elevator_put(e);
out:
return err;
}
/*
* switch to new_e io scheduler. be careful not to introduce deadlocks -
* we don't free the old io scheduler, before we have allocated what we
* need for the new one. this way we have a chance of going back to the old
* one, if the new one fails init for some reason.
*/
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{
struct elevator_queue *old = q->elevator;
bool old_registered = false;
int err;
lockdep_assert_held(&q->sysfs_lock);
if (q->mq_ops) {
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
err = elevator_switch_mq(q, new_e);
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return err;
}
/*
* Turn on BYPASS and drain all requests w/ elevator private data.
* Block layer doesn't call into a quiesced elevator - all requests
* are directly put on the dispatch list without elevator data
* using INSERT_BACK. All requests have SOFTBARRIER set and no
* merge happens either.
*/
if (old) {
old_registered = old->registered;
blk_queue_bypass_start(q);
/* unregister and clear all auxiliary data of the old elevator */
if (old_registered)
elv_unregister_queue(q);
ioc_clear_queue(q);
}
/* allocate, init and register new elevator */
err = new_e->ops.sq.elevator_init_fn(q, new_e);
if (err)
goto fail_init;
err = elv_register_queue(q);
if (err)
goto fail_register;
/* done, kill the old one and finish */
if (old) {
elevator_exit(q, old);
blk_queue_bypass_end(q);
}
blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
return 0;
fail_register:
elevator_exit(q, q->elevator);
fail_init:
/* switch failed, restore and re-register old elevator */
if (old) {
q->elevator = old;
elv_register_queue(q);
blk_queue_bypass_end(q);
}
return err;
}
/*
* Switch this queue to the given IO scheduler.
*/
static int __elevator_change(struct request_queue *q, const char *name)
{
char elevator_name[ELV_NAME_MAX];
struct elevator_type *e;
/* Make sure queue is not in the middle of being removed */
if (!blk_queue_registered(q))
return -ENOENT;
/*
* Special case for mq, turn off scheduling
*/
if (q->mq_ops && !strncmp(name, "none", 4))
return elevator_switch(q, NULL);
strlcpy(elevator_name, name, sizeof(elevator_name));
e = elevator_get(q, strstrip(elevator_name), true);
if (!e)
return -EINVAL;
if (q->elevator && elevator_match(q->elevator->type, elevator_name)) {
elevator_put(e);
return 0;
}
return elevator_switch(q, e);
}
static inline bool elv_support_iosched(struct request_queue *q)
{
if (q->mq_ops && q->tag_set && (q->tag_set->flags &
BLK_MQ_F_NO_SCHED))
return false;
return true;
}
ssize_t elv_iosched_store(struct request_queue *q, const char *name,
size_t count)
{
int ret;
if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
return count;
ret = __elevator_change(q, name);
if (!ret)
return count;
return ret;
}
ssize_t elv_iosched_show(struct request_queue *q, char *name)
{
struct elevator_queue *e = q->elevator;
struct elevator_type *elv = NULL;
struct elevator_type *__e;
bool uses_mq = q->mq_ops != NULL;
int len = 0;
if (!queue_is_rq_based(q))
return sprintf(name, "none\n");
if (!q->elevator)
len += sprintf(name+len, "[none] ");
else
elv = e->type;
spin_lock(&elv_list_lock);
list_for_each_entry(__e, &elv_list, list) {
if (elv && elevator_match(elv, __e->elevator_name) &&
(__e->uses_mq == uses_mq)) {
len += sprintf(name+len, "[%s] ", elv->elevator_name);
continue;
}
if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
len += sprintf(name+len, "%s ", __e->elevator_name);
else if (!__e->uses_mq && !q->mq_ops)
len += sprintf(name+len, "%s ", __e->elevator_name);
}
spin_unlock(&elv_list_lock);
if (q->mq_ops && q->elevator)
len += sprintf(name+len, "none");
len += sprintf(len+name, "\n");
return len;
}
struct request *elv_rb_former_request(struct request_queue *q,
struct request *rq)
{
struct rb_node *rbprev = rb_prev(&rq->rb_node);
if (rbprev)
return rb_entry_rq(rbprev);
return NULL;
}
EXPORT_SYMBOL(elv_rb_former_request);
struct request *elv_rb_latter_request(struct request_queue *q,
struct request *rq)
{
struct rb_node *rbnext = rb_next(&rq->rb_node);
if (rbnext)
return rb_entry_rq(rbnext);
return NULL;
}
EXPORT_SYMBOL(elv_rb_latter_request);