android_kernel_xiaomi_sm7250/block/blk-iolatency.c
Greg Kroah-Hartman a1757f43e8 This is the 4.19.247 stable release
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmKoorkACgkQONu9yGCS
 aT7wAQ//XrRJAWmmj+Uc2UlQJti3FwrWwbMBMi65v3ST7Z21ru/EQA1oCg2dMNqr
 Oc42+UO5zkQ7V4nrbygXx0QcMkJYavrep4+GJjKCgYa5zQw+uzXOvzhkY8pFoSbC
 QnHpnYjW9JfbR/PrXzN7wQnES2UuGSqLW7669AQrcfdhuyMphOo2ypeyRTJYehnB
 qO5z8kZKFDp89jqXn06Bj6eC/wVhWZh69vi8w59LOsQgOA7ELqYWURLZpoHf8ipN
 fZwYdjh1Qaqo+NZBw93QNiVy+vbWkPKxdQEHQ1GLJbnHP6XDk8rgqsXpf6KGNDeZ
 Rabv7+77Fy3bLuDCR7gv0PDpoaas/mL1PfvofGC8ooJS0VdA6T88eKL9Dh5+fiu8
 UuBtdCkvfEx0rKxbghuWEQiTFcIonsp1g24Z9Xk8NWCnUgxTHcwfqrwf3r0nAUbf
 DmQtq2OrVTwmRd/qPAc79aBtd3EUY0ZP7ZliOg3DI2d4ZtOS6TYL7Ocnky62qMpZ
 Q/vgyr/C1RhjRs7UKfAW+YmpD0SP5kz92J2mU9UM7/8qsKR2ykx78pKe2a8Zj5nU
 TONyfC5q1ctwA9KMxliMT7V7icnN9ofBs7BD698CxybP6q/itj4epYe6LT/zNUMS
 xEre3o0Uz//MZoiomFW03Js6RsjsdV8o4YD8IuHOLUSk+CY8aDg=
 =iJUA
 -----END PGP SIGNATURE-----

Merge 4.19.247 into android-4.19-stable

Changes in 4.19.247
	binfmt_flat: do not stop relocating GOT entries prematurely on riscv
	ALSA: hda/realtek - Fix microphone noise on ASUS TUF B550M-PLUS
	USB: serial: option: add Quectel BG95 modem
	USB: new quirk for Dell Gen 2 devices
	ptrace/xtensa: Replace PT_SINGLESTEP with TIF_SINGLESTEP
	ptrace: Reimplement PTRACE_KILL by always sending SIGKILL
	btrfs: add "0x" prefix for unsupported optional features
	btrfs: repair super block num_devices automatically
	drm/virtio: fix NULL pointer dereference in virtio_gpu_conn_get_modes
	mwifiex: add mutex lock for call in mwifiex_dfs_chan_sw_work_queue
	b43legacy: Fix assigning negative value to unsigned variable
	b43: Fix assigning negative value to unsigned variable
	ipw2x00: Fix potential NULL dereference in libipw_xmit()
	ipv6: fix locking issues with loops over idev->addr_list
	fbcon: Consistently protect deferred_takeover with console_lock()
	ACPICA: Avoid cache flush inside virtual machines
	ALSA: jack: Access input_dev under mutex
	drm/amd/pm: fix double free in si_parse_power_table()
	ath9k: fix QCA9561 PA bias level
	media: venus: hfi: avoid null dereference in deinit
	media: pci: cx23885: Fix the error handling in cx23885_initdev()
	media: cx25821: Fix the warning when removing the module
	md/bitmap: don't set sb values if can't pass sanity check
	scsi: megaraid: Fix error check return value of register_chrdev()
	drm/plane: Move range check for format_count earlier
	drm/amd/pm: fix the compile warning
	ipv6: Don't send rs packets to the interface of ARPHRD_TUNNEL
	ASoC: dapm: Don't fold register value changes into notifications
	mlxsw: spectrum_dcb: Do not warn about priority changes
	ASoC: tscs454: Add endianness flag in snd_soc_component_driver
	s390/preempt: disable __preempt_count_add() optimization for PROFILE_ALL_BRANCHES
	dma-debug: change allocation mode from GFP_NOWAIT to GFP_ATIOMIC
	ipmi:ssif: Check for NULL msg when handling events and messages
	rtlwifi: Use pr_warn instead of WARN_ONCE
	media: cec-adap.c: fix is_configuring state
	openrisc: start CPU timer early in boot
	nvme-pci: fix a NULL pointer dereference in nvme_alloc_admin_tags
	ASoC: rt5645: Fix errorenous cleanup order
	net: phy: micrel: Allow probing without .driver_data
	media: exynos4-is: Fix compile warning
	hwmon: Make chip parameter for with_info API mandatory
	rxrpc: Return an error to sendmsg if call failed
	eth: tg3: silence the GCC 12 array-bounds warning
	ARM: dts: ox820: align interrupt controller node name with dtschema
	PM / devfreq: rk3399_dmc: Disable edev on remove()
	fs: jfs: fix possible NULL pointer dereference in dbFree()
	ARM: OMAP1: clock: Fix UART rate reporting algorithm
	fat: add ratelimit to fat*_ent_bread()
	ARM: versatile: Add missing of_node_put in dcscb_init
	ARM: dts: exynos: add atmel,24c128 fallback to Samsung EEPROM
	ARM: hisi: Add missing of_node_put after of_find_compatible_node
	PCI: Avoid pci_dev_lock() AB/BA deadlock with sriov_numvfs_store()
	tracing: incorrect isolate_mote_t cast in mm_vmscan_lru_isolate
	powerpc/xics: fix refcount leak in icp_opal_init()
	macintosh/via-pmu: Fix build failure when CONFIG_INPUT is disabled
	RDMA/hfi1: Prevent panic when SDMA is disabled
	drm: fix EDID struct for old ARM OABI format
	ath9k: fix ar9003_get_eepmisc
	drm/edid: fix invalid EDID extension block filtering
	drm/bridge: adv7511: clean up CEC adapter when probe fails
	ASoC: mediatek: Fix error handling in mt8173_max98090_dev_probe
	ASoC: mediatek: Fix missing of_node_put in mt2701_wm8960_machine_probe
	x86/delay: Fix the wrong asm constraint in delay_loop()
	drm/mediatek: Fix mtk_cec_mask()
	drm/vc4: txp: Don't set TXP_VSTART_AT_EOF
	drm/vc4: txp: Force alpha to be 0xff if it's disabled
	nl80211: show SSID for P2P_GO interfaces
	spi: spi-ti-qspi: Fix return value handling of wait_for_completion_timeout
	NFC: NULL out the dev->rfkill to prevent UAF
	efi: Add missing prototype for efi_capsule_setup_info
	HID: hid-led: fix maximum brightness for Dream Cheeky
	HID: elan: Fix potential double free in elan_input_configured
	spi: img-spfi: Fix pm_runtime_get_sync() error checking
	ath9k_htc: fix potential out of bounds access with invalid rxstatus->rs_keyix
	inotify: show inotify mask flags in proc fdinfo
	fsnotify: fix wrong lockdep annotations
	of: overlay: do not break notify on NOTIFY_{OK|STOP}
	scsi: ufs: core: Exclude UECxx from SFR dump list
	x86/pm: Fix false positive kmemleak report in msr_build_context()
	x86/speculation: Add missing prototype for unpriv_ebpf_notify()
	drm/msm/disp/dpu1: set vbif hw config to NULL to avoid use after memory free during pm runtime resume
	drm/msm/dsi: fix error checks and return values for DSI xmit functions
	drm/msm/hdmi: check return value after calling platform_get_resource_byname()
	drm/rockchip: vop: fix possible null-ptr-deref in vop_bind()
	x86: Fix return value of __setup handlers
	irqchip/aspeed-i2c-ic: Fix irq_of_parse_and_map() return value
	x86/mm: Cleanup the control_va_addr_alignment() __setup handler
	drm/msm/mdp5: Return error code in mdp5_pipe_release when deadlock is detected
	drm/msm/mdp5: Return error code in mdp5_mixer_release when deadlock is detected
	drm/msm: return an error pointer in msm_gem_prime_get_sg_table()
	media: uvcvideo: Fix missing check to determine if element is found in list
	perf/amd/ibs: Use interrupt regs ip for stack unwinding
	ASoC: mxs-saif: Fix refcount leak in mxs_saif_probe
	regulator: pfuze100: Fix refcount leak in pfuze_parse_regulators_dt
	scripts/faddr2line: Fix overlapping text section failures
	media: st-delta: Fix PM disable depth imbalance in delta_probe
	media: exynos4-is: Change clk_disable to clk_disable_unprepare
	media: pvrusb2: fix array-index-out-of-bounds in pvr2_i2c_core_init
	media: vsp1: Fix offset calculation for plane cropping
	Bluetooth: fix dangling sco_conn and use-after-free in sco_sock_timeout
	m68k: math-emu: Fix dependencies of math emulation support
	sctp: read sk->sk_bound_dev_if once in sctp_rcv()
	ext4: reject the 'commit' option on ext2 filesystems
	drm: msm: fix possible memory leak in mdp5_crtc_cursor_set()
	ASoC: wm2000: fix missing clk_disable_unprepare() on error in wm2000_anc_transition()
	NFC: hci: fix sleep in atomic context bugs in nfc_hci_hcp_message_tx
	rxrpc: Fix listen() setting the bar too high for the prealloc rings
	rxrpc: Don't try to resend the request if we're receiving the reply
	soc: qcom: smp2p: Fix missing of_node_put() in smp2p_parse_ipc
	soc: qcom: smsm: Fix missing of_node_put() in smsm_parse_ipc
	PCI: cadence: Fix find_first_zero_bit() limit
	PCI: rockchip: Fix find_first_zero_bit() limit
	ARM: dts: bcm2835-rpi-zero-w: Fix GPIO line name for Wifi/BT
	ARM: dts: bcm2835-rpi-b: Fix GPIO line names
	crypto: marvell/cesa - ECB does not IV
	mfd: ipaq-micro: Fix error check return value of platform_get_irq()
	scsi: fcoe: Fix Wstringop-overflow warnings in fcoe_wwn_from_mac()
	firmware: arm_scmi: Fix list protocols enumeration in the base protocol
	pinctrl: mvebu: Fix irq_of_parse_and_map() return value
	drivers/base/node.c: fix compaction sysfs file leak
	dax: fix cache flush on PMD-mapped pages
	powerpc/8xx: export 'cpm_setbrg' for modules
	powerpc/idle: Fix return value of __setup() handler
	powerpc/4xx/cpm: Fix return value of __setup() handler
	proc: fix dentry/inode overinstantiating under /proc/${pid}/net
	tty: fix deadlock caused by calling printk() under tty_port->lock
	Input: sparcspkr - fix refcount leak in bbc_beep_probe
	powerpc/perf: Fix the threshold compare group constraint for power9
	powerpc/fsl_rio: Fix refcount leak in fsl_rio_setup
	mailbox: forward the hrtimer if not queued and under a lock
	RDMA/hfi1: Prevent use of lock before it is initialized
	f2fs: fix dereference of stale list iterator after loop body
	iommu/mediatek: Add list_del in mtk_iommu_remove
	i2c: at91: use dma safe buffers
	i2c: at91: Initialize dma_buf in at91_twi_xfer()
	NFSv4/pNFS: Do not fail I/O when we fail to allocate the pNFS layout
	video: fbdev: clcdfb: Fix refcount leak in clcdfb_of_vram_setup
	dmaengine: stm32-mdma: remove GISR1 register
	iommu/amd: Increase timeout waiting for GA log enablement
	perf c2c: Use stdio interface if slang is not supported
	perf jevents: Fix event syntax error caused by ExtSel
	f2fs: fix deadloop in foreground GC
	wifi: mac80211: fix use-after-free in chanctx code
	iwlwifi: mvm: fix assert 1F04 upon reconfig
	fs-writeback: writeback_sb_inodes:Recalculate 'wrote' according skipped pages
	netfilter: nf_tables: disallow non-stateful expression in sets earlier
	ext4: fix use-after-free in ext4_rename_dir_prepare
	ext4: fix bug_on in ext4_writepages
	ext4: verify dir block before splitting it
	ext4: avoid cycles in directory h-tree
	tracing: Fix potential double free in create_var_ref()
	PCI/PM: Fix bridge_d3_blacklist[] Elo i2 overwrite of Gigabyte X299
	PCI: qcom: Fix runtime PM imbalance on probe errors
	PCI: qcom: Fix unbalanced PHY init on probe errors
	dlm: fix plock invalid read
	dlm: fix missing lkb refcount handling
	ocfs2: dlmfs: fix error handling of user_dlm_destroy_lock
	scsi: dc395x: Fix a missing check on list iterator
	scsi: ufs: qcom: Add a readl() to make sure ref_clk gets enabled
	drm/amdgpu/cs: make commands with 0 chunks illegal behaviour.
	drm/nouveau/clk: Fix an incorrect NULL check on list iterator
	drm/bridge: analogix_dp: Grab runtime PM reference for DP-AUX
	md: fix an incorrect NULL check in does_sb_need_changing
	md: fix an incorrect NULL check in md_reload_sb
	media: coda: Fix reported H264 profile
	media: coda: Add more H264 levels for CODA960
	RDMA/hfi1: Fix potential integer multiplication overflow errors
	irqchip/armada-370-xp: Do not touch Performance Counter Overflow on A375, A38x, A39x
	irqchip: irq-xtensa-mx: fix initial IRQ affinity
	mac80211: upgrade passive scan to active scan on DFS channels after beacon rx
	um: chan_user: Fix winch_tramp() return value
	um: Fix out-of-bounds read in LDT setup
	iommu/msm: Fix an incorrect NULL check on list iterator
	nodemask.h: fix compilation error with GCC12
	hugetlb: fix huge_pmd_unshare address update
	rtl818x: Prevent using not initialized queues
	ASoC: rt5514: Fix event generation for "DSP Voice Wake Up" control
	carl9170: tx: fix an incorrect use of list iterator
	gma500: fix an incorrect NULL check on list iterator
	arm64: dts: qcom: ipq8074: fix the sleep clock frequency
	phy: qcom-qmp: fix struct clk leak on probe errors
	docs/conf.py: Cope with removal of language=None in Sphinx 5.0.0
	dt-bindings: gpio: altera: correct interrupt-cells
	blk-iolatency: Fix inflight count imbalances and IO hangs on offline
	phy: qcom-qmp: fix reset-controller leak on probe errors
	RDMA/rxe: Generate a completion for unsupported/invalid opcode
	MIPS: IP27: Remove incorrect `cpu_has_fpu' override
	md: bcache: check the return value of kzalloc() in detached_dev_do_request()
	pcmcia: db1xxx_ss: restrict to MIPS_DB1XXX boards
	staging: greybus: codecs: fix type confusion of list iterator variable
	tty: goldfish: Use tty_port_destroy() to destroy port
	usb: usbip: fix a refcount leak in stub_probe()
	usb: usbip: add missing device lock on tweak configuration cmd
	USB: storage: karma: fix rio_karma_init return
	usb: musb: Fix missing of_node_put() in omap2430_probe
	pwm: lp3943: Fix duty calculation in case period was clamped
	rpmsg: qcom_smd: Fix irq_of_parse_and_map() return value
	usb: dwc3: pci: Fix pm_runtime_get_sync() error checking
	iio: adc: sc27xx: fix read big scale voltage not right
	rpmsg: qcom_smd: Fix returning 0 if irq_of_parse_and_map() fails
	coresight: cpu-debug: Replace mutex with mutex_trylock on panic notifier
	soc: rockchip: Fix refcount leak in rockchip_grf_init
	clocksource/drivers/riscv: Events are stopped during CPU suspend
	rtc: mt6397: check return value after calling platform_get_resource()
	serial: meson: acquire port->lock in startup()
	serial: 8250_fintek: Check SER_RS485_RTS_* only with RS485
	serial: digicolor-usart: Don't allow CS5-6
	serial: txx9: Don't allow CS5-6
	serial: sh-sci: Don't allow CS5-6
	serial: st-asc: Sanitize CSIZE and correct PARENB for CS7
	serial: stm32-usart: Correct CSIZE, bits, and parity
	firmware: dmi-sysfs: Fix memory leak in dmi_sysfs_register_handle
	bus: ti-sysc: Fix warnings for unbind for serial
	clocksource/drivers/oxnas-rps: Fix irq_of_parse_and_map() return value
	s390/crypto: fix scatterwalk_unmap() callers in AES-GCM
	net: ethernet: mtk_eth_soc: out of bounds read in mtk_hwlro_get_fdir_entry()
	net: dsa: mv88e6xxx: Fix refcount leak in mv88e6xxx_mdios_register
	modpost: fix removing numeric suffixes
	jffs2: fix memory leak in jffs2_do_fill_super
	ubi: ubi_create_volume: Fix use-after-free when volume creation failed
	nfp: only report pause frame configuration for physical device
	net/mlx5e: Update netdev features after changing XDP state
	tcp: tcp_rtx_synack() can be called from process context
	afs: Fix infinite loop found by xfstest generic/676
	tipc: check attribute length for bearer name
	perf c2c: Fix sorting in percent_rmt_hitm_cmp()
	mips: cpc: Fix refcount leak in mips_cpc_default_phys_base
	tracing: Fix sleeping function called from invalid context on RT kernel
	tracing: Avoid adding tracer option before update_tracer_options
	i2c: cadence: Increase timeout per message if necessary
	m68knommu: set ZERO_PAGE() to the allocated zeroed page
	m68knommu: fix undefined reference to `_init_sp'
	NFSv4: Don't hold the layoutget locks across multiple RPC calls
	video: fbdev: pxa3xx-gcu: release the resources correctly in pxa3xx_gcu_probe/remove()
	xprtrdma: treat all calls not a bcall when bc_serv is NULL
	ata: pata_octeon_cf: Fix refcount leak in octeon_cf_probe
	af_unix: Fix a data-race in unix_dgram_peer_wake_me().
	bpf, arm64: Clear prog->jited_len along prog->jited
	net/mlx4_en: Fix wrong return value on ioctl EEPROM query failure
	SUNRPC: Fix the calculation of xdr->end in xdr_get_next_encode_buffer()
	net: mdio: unexport __init-annotated mdio_bus_init()
	net: xfrm: unexport __init-annotated xfrm4_protocol_init()
	net: ipv6: unexport __init-annotated seg6_hmac_init()
	net/mlx5: Rearm the FW tracer after each tracer event
	ip_gre: test csum_start instead of transport header
	net: altera: Fix refcount leak in altera_tse_mdio_create
	drm: imx: fix compiler warning with gcc-12
	iio: dummy: iio_simple_dummy: check the return value of kstrdup()
	lkdtm/usercopy: Expand size of "out of frame" object
	tty: synclink_gt: Fix null-pointer-dereference in slgt_clean()
	tty: Fix a possible resource leak in icom_probe
	drivers: staging: rtl8192u: Fix deadlock in ieee80211_beacons_stop()
	drivers: staging: rtl8192e: Fix deadlock in rtllib_beacons_stop()
	USB: host: isp116x: check return value after calling platform_get_resource()
	drivers: tty: serial: Fix deadlock in sa1100_set_termios()
	drivers: usb: host: Fix deadlock in oxu_bus_suspend()
	USB: hcd-pci: Fully suspend across freeze/thaw cycle
	usb: dwc2: gadget: don't reset gadget's driver->bus
	misc: rtsx: set NULL intfdata when probe fails
	extcon: Modify extcon device to be created after driver data is set
	clocksource/drivers/sp804: Avoid error on multiple instances
	staging: rtl8712: fix uninit-value in r871xu_drv_init()
	serial: msm_serial: disable interrupts in __msm_console_write()
	kernfs: Separate kernfs_pr_cont_buf and rename_lock.
	md: protect md_unregister_thread from reentrancy
	Revert "net: af_key: add check for pfkey_broadcast in function pfkey_process"
	ceph: allow ceph.dir.rctime xattr to be updatable
	drm/radeon: fix a possible null pointer dereference
	modpost: fix undefined behavior of is_arm_mapping_symbol()
	nbd: call genl_unregister_family() first in nbd_cleanup()
	nbd: fix race between nbd_alloc_config() and module removal
	nbd: fix io hung while disconnecting device
	nodemask: Fix return values to be unsigned
	vringh: Fix loop descriptors check in the indirect cases
	ALSA: hda/conexant - Fix loopback issue with CX20632
	cifs: return errors during session setup during reconnects
	ata: libata-transport: fix {dma|pio|xfer}_mode sysfs files
	mmc: block: Fix CQE recovery reset success
	nfc: st21nfca: fix incorrect validating logic in EVT_TRANSACTION
	nfc: st21nfca: fix memory leaks in EVT_TRANSACTION handling
	ixgbe: fix bcast packets Rx on VF after promisc removal
	ixgbe: fix unexpected VLAN Rx in promisc mode on VF
	Input: bcm5974 - set missing URB_NO_TRANSFER_DMA_MAP urb flag
	powerpc/32: Fix overread/overwrite of thread_struct via ptrace
	md/raid0: Ignore RAID0 layout if the second zone has only one device
	mtd: cfi_cmdset_0002: Move and rename chip_check/chip_ready/chip_good_for_write
	mtd: cfi_cmdset_0002: Use chip_ready() for write on S29GL064N
	tcp: fix tcp_mtup_probe_success vs wrong snd_cwnd
	Linux 4.19.247

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I58c002ddc38e389a13e2bdb9f291f05805718c9d
2022-06-14 17:16:36 +02:00

992 lines
28 KiB
C

/*
* Block rq-qos base io controller
*
* This works similar to wbt with a few exceptions
*
* - It's bio based, so the latency covers the whole block layer in addition to
* the actual io.
* - We will throttle all IO that comes in here if we need to.
* - We use the mean latency over the 100ms window. This is because writes can
* be particularly fast, which could give us a false sense of the impact of
* other workloads on our protected workload.
* - By default there's no throttling, we set the queue_depth to UINT_MAX so
* that we can have as many outstanding bio's as we're allowed to. Only at
* throttle time do we pay attention to the actual queue depth.
*
* The hierarchy works like the cpu controller does, we track the latency at
* every configured node, and each configured node has it's own independent
* queue depth. This means that we only care about our latency targets at the
* peer level. Some group at the bottom of the hierarchy isn't going to affect
* a group at the end of some other path if we're only configred at leaf level.
*
* Consider the following
*
* root blkg
* / \
* fast (target=5ms) slow (target=10ms)
* / \ / \
* a b normal(15ms) unloved
*
* "a" and "b" have no target, but their combined io under "fast" cannot exceed
* an average latency of 5ms. If it does then we will throttle the "slow"
* group. In the case of "normal", if it exceeds its 15ms target, we will
* throttle "unloved", but nobody else.
*
* In this example "fast", "slow", and "normal" will be the only groups actually
* accounting their io latencies. We have to walk up the heirarchy to the root
* on every submit and complete so we can do the appropriate stat recording and
* adjust the queue depth of ourselves if needed.
*
* There are 2 ways we throttle IO.
*
* 1) Queue depth throttling. As we throttle down we will adjust the maximum
* number of IO's we're allowed to have in flight. This starts at (u64)-1 down
* to 1. If the group is only ever submitting IO for itself then this is the
* only way we throttle.
*
* 2) Induced delay throttling. This is for the case that a group is generating
* IO that has to be issued by the root cg to avoid priority inversion. So think
* REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot
* of work done for us on behalf of the root cg and are being asked to scale
* down more then we induce a latency at userspace return. We accumulate the
* total amount of time we need to be punished by doing
*
* total_time += min_lat_nsec - actual_io_completion
*
* and then at throttle time will do
*
* throttle_time = min(total_time, NSEC_PER_SEC)
*
* This induced delay will throttle back the activity that is generating the
* root cg issued io's, wethere that's some metadata intensive operation or the
* group is using so much memory that it is pushing us into swap.
*
* Copyright (C) 2018 Josef Bacik
*/
#include <linux/kernel.h>
#include <linux/blk_types.h>
#include <linux/backing-dev.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/memcontrol.h>
#include <linux/sched/loadavg.h>
#include <linux/sched/signal.h>
#include <trace/events/block.h>
#include <linux/blk-mq.h>
#include "blk-rq-qos.h"
#include "blk-stat.h"
#include "blk.h"
#define DEFAULT_SCALE_COOKIE 1000000U
static struct blkcg_policy blkcg_policy_iolatency;
struct iolatency_grp;
struct blk_iolatency {
struct rq_qos rqos;
struct timer_list timer;
/*
* ->enabled is the master enable switch gating the throttling logic and
* inflight tracking. The number of cgroups which have iolat enabled is
* tracked in ->enable_cnt, and ->enable is flipped on/off accordingly
* from ->enable_work with the request_queue frozen. For details, See
* blkiolatency_enable_work_fn().
*/
bool enabled;
atomic_t enable_cnt;
struct work_struct enable_work;
};
static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
{
return container_of(rqos, struct blk_iolatency, rqos);
}
struct child_latency_info {
spinlock_t lock;
/* Last time we adjusted the scale of everybody. */
u64 last_scale_event;
/* The latency that we missed. */
u64 scale_lat;
/* Total io's from all of our children for the last summation. */
u64 nr_samples;
/* The guy who actually changed the latency numbers. */
struct iolatency_grp *scale_grp;
/* Cookie to tell if we need to scale up or down. */
atomic_t scale_cookie;
};
struct iolatency_grp {
struct blkg_policy_data pd;
struct blk_rq_stat __percpu *stats;
struct blk_iolatency *blkiolat;
struct rq_depth rq_depth;
struct rq_wait rq_wait;
atomic64_t window_start;
atomic_t scale_cookie;
u64 min_lat_nsec;
u64 cur_win_nsec;
/* total running average of our io latency. */
u64 lat_avg;
/* Our current number of IO's for the last summation. */
u64 nr_samples;
struct child_latency_info child_lat;
};
#define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
#define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
/*
* These are the constants used to fake the fixed-point moving average
* calculation just like load average. The call to calc_load() folds
* (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling
* window size is bucketed to try to approximately calculate average
* latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
* elapse immediately. Note, windows only elapse with IO activity. Idle
* periods extend the most recent window.
*/
#define BLKIOLATENCY_NR_EXP_FACTORS 5
#define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
(BLKIOLATENCY_NR_EXP_FACTORS - 1))
static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
2045, // exp(1/600) - 600 samples
2039, // exp(1/240) - 240 samples
2031, // exp(1/120) - 120 samples
2023, // exp(1/80) - 80 samples
2014, // exp(1/60) - 60 samples
};
static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
{
return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
}
static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
{
return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
}
static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
{
return pd_to_blkg(&iolat->pd);
}
static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
wait_queue_entry_t *wait,
bool first_block)
{
struct rq_wait *rqw = &iolat->rq_wait;
if (first_block && waitqueue_active(&rqw->wait) &&
rqw->wait.head.next != &wait->entry)
return false;
return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
}
static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
struct iolatency_grp *iolat,
spinlock_t *lock, bool issue_as_root,
bool use_memdelay)
__releases(lock)
__acquires(lock)
{
struct rq_wait *rqw = &iolat->rq_wait;
unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
DEFINE_WAIT(wait);
bool first_block = true;
if (use_delay)
blkcg_schedule_throttle(rqos->q, use_memdelay);
/*
* To avoid priority inversions we want to just take a slot if we are
* issuing as root. If we're being killed off there's no point in
* delaying things, we may have been killed by OOM so throttling may
* make recovery take even longer, so just let the IO's through so the
* task can go away.
*/
if (issue_as_root || fatal_signal_pending(current)) {
atomic_inc(&rqw->inflight);
return;
}
if (iolatency_may_queue(iolat, &wait, first_block))
return;
do {
prepare_to_wait_exclusive(&rqw->wait, &wait,
TASK_UNINTERRUPTIBLE);
if (iolatency_may_queue(iolat, &wait, first_block))
break;
first_block = false;
if (lock) {
spin_unlock_irq(lock);
io_schedule();
spin_lock_irq(lock);
} else {
io_schedule();
}
} while (1);
finish_wait(&rqw->wait, &wait);
}
#define SCALE_DOWN_FACTOR 2
#define SCALE_UP_FACTOR 4
static inline unsigned long scale_amount(unsigned long qd, bool up)
{
return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
}
/*
* We scale the qd down faster than we scale up, so we need to use this helper
* to adjust the scale_cookie accordingly so we don't prematurely get
* scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
*
* Each group has their own local copy of the last scale cookie they saw, so if
* the global scale cookie goes up or down they know which way they need to go
* based on their last knowledge of it.
*/
static void scale_cookie_change(struct blk_iolatency *blkiolat,
struct child_latency_info *lat_info,
bool up)
{
unsigned long qd = blk_queue_depth(blkiolat->rqos.q);
unsigned long scale = scale_amount(qd, up);
unsigned long old = atomic_read(&lat_info->scale_cookie);
unsigned long max_scale = qd << 1;
unsigned long diff = 0;
if (old < DEFAULT_SCALE_COOKIE)
diff = DEFAULT_SCALE_COOKIE - old;
if (up) {
if (scale + old > DEFAULT_SCALE_COOKIE)
atomic_set(&lat_info->scale_cookie,
DEFAULT_SCALE_COOKIE);
else if (diff > qd)
atomic_inc(&lat_info->scale_cookie);
else
atomic_add(scale, &lat_info->scale_cookie);
} else {
/*
* We don't want to dig a hole so deep that it takes us hours to
* dig out of it. Just enough that we don't throttle/unthrottle
* with jagged workloads but can still unthrottle once pressure
* has sufficiently dissipated.
*/
if (diff > qd) {
if (diff < max_scale)
atomic_dec(&lat_info->scale_cookie);
} else {
atomic_sub(scale, &lat_info->scale_cookie);
}
}
}
/*
* Change the queue depth of the iolatency_grp. We add/subtract 1/16th of the
* queue depth at a time so we don't get wild swings and hopefully dial in to
* fairer distribution of the overall queue depth.
*/
static void scale_change(struct iolatency_grp *iolat, bool up)
{
unsigned long qd = blk_queue_depth(iolat->blkiolat->rqos.q);
unsigned long scale = scale_amount(qd, up);
unsigned long old = iolat->rq_depth.max_depth;
bool changed = false;
if (old > qd)
old = qd;
if (up) {
if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
return;
if (old < qd) {
changed = true;
old += scale;
old = min(old, qd);
iolat->rq_depth.max_depth = old;
wake_up_all(&iolat->rq_wait.wait);
}
} else if (old > 1) {
old >>= 1;
changed = true;
iolat->rq_depth.max_depth = max(old, 1UL);
}
}
/* Check our parent and see if the scale cookie has changed. */
static void check_scale_change(struct iolatency_grp *iolat)
{
struct iolatency_grp *parent;
struct child_latency_info *lat_info;
unsigned int cur_cookie;
unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
u64 scale_lat;
unsigned int old;
int direction = 0;
if (lat_to_blkg(iolat)->parent == NULL)
return;
parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
if (!parent)
return;
lat_info = &parent->child_lat;
cur_cookie = atomic_read(&lat_info->scale_cookie);
scale_lat = READ_ONCE(lat_info->scale_lat);
if (cur_cookie < our_cookie)
direction = -1;
else if (cur_cookie > our_cookie)
direction = 1;
else
return;
old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
/* Somebody beat us to the punch, just bail. */
if (old != our_cookie)
return;
if (direction < 0 && iolat->min_lat_nsec) {
u64 samples_thresh;
if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
return;
/*
* Sometimes high priority groups are their own worst enemy, so
* instead of taking it out on some poor other group that did 5%
* or less of the IO's for the last summation just skip this
* scale down event.
*/
samples_thresh = lat_info->nr_samples * 5;
samples_thresh = div64_u64(samples_thresh, 100);
if (iolat->nr_samples <= samples_thresh)
return;
}
/* We're as low as we can go. */
if (iolat->rq_depth.max_depth == 1 && direction < 0) {
blkcg_use_delay(lat_to_blkg(iolat));
return;
}
/* We're back to the default cookie, unthrottle all the things. */
if (cur_cookie == DEFAULT_SCALE_COOKIE) {
blkcg_clear_delay(lat_to_blkg(iolat));
iolat->rq_depth.max_depth = UINT_MAX;
wake_up_all(&iolat->rq_wait.wait);
return;
}
scale_change(iolat, direction > 0);
}
static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
spinlock_t *lock)
{
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
struct blkcg *blkcg;
struct blkcg_gq *blkg;
struct request_queue *q = rqos->q;
bool issue_as_root = bio_issue_as_root_blkg(bio);
if (!blkiolat->enabled)
return;
rcu_read_lock();
blkcg = bio_blkcg(bio);
bio_associate_blkcg(bio, &blkcg->css);
blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg)) {
if (!lock)
spin_lock_irq(q->queue_lock);
blkg = blkg_lookup_create(blkcg, q);
if (IS_ERR(blkg))
blkg = NULL;
if (!lock)
spin_unlock_irq(q->queue_lock);
}
if (!blkg)
goto out;
bio_issue_init(&bio->bi_issue, bio_sectors(bio));
bio_associate_blkg(bio, blkg);
out:
rcu_read_unlock();
while (blkg && blkg->parent) {
struct iolatency_grp *iolat = blkg_to_lat(blkg);
if (!iolat) {
blkg = blkg->parent;
continue;
}
check_scale_change(iolat);
__blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
blkg = blkg->parent;
}
if (!timer_pending(&blkiolat->timer))
mod_timer(&blkiolat->timer, jiffies + HZ);
}
static void iolatency_record_time(struct iolatency_grp *iolat,
struct bio_issue *issue, u64 now,
bool issue_as_root)
{
struct blk_rq_stat *rq_stat;
u64 start = bio_issue_time(issue);
u64 req_time;
/*
* Have to do this so we are truncated to the correct time that our
* issue is truncated to.
*/
now = __bio_issue_time(now);
if (now <= start)
return;
req_time = now - start;
/*
* We don't want to count issue_as_root bio's in the cgroups latency
* statistics as it could skew the numbers downwards.
*/
if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
u64 sub = iolat->min_lat_nsec;
if (req_time < sub)
blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
return;
}
rq_stat = get_cpu_ptr(iolat->stats);
blk_rq_stat_add(rq_stat, req_time);
put_cpu_ptr(rq_stat);
}
#define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
#define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
{
struct blkcg_gq *blkg = lat_to_blkg(iolat);
struct iolatency_grp *parent;
struct child_latency_info *lat_info;
struct blk_rq_stat stat;
unsigned long flags;
int cpu, exp_idx;
blk_rq_stat_init(&stat);
preempt_disable();
for_each_online_cpu(cpu) {
struct blk_rq_stat *s;
s = per_cpu_ptr(iolat->stats, cpu);
blk_rq_stat_sum(&stat, s);
blk_rq_stat_init(s);
}
preempt_enable();
parent = blkg_to_lat(blkg->parent);
if (!parent)
return;
lat_info = &parent->child_lat;
/*
* calc_load() takes in a number stored in fixed point representation.
* Because we are using this for IO time in ns, the values stored
* are significantly larger than the FIXED_1 denominator (2048).
* Therefore, rounding errors in the calculation are negligible and
* can be ignored.
*/
exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
div64_u64(iolat->cur_win_nsec,
BLKIOLATENCY_EXP_BUCKET_SIZE));
iolat->lat_avg = calc_load(iolat->lat_avg,
iolatency_exp_factors[exp_idx],
stat.mean);
/* Everything is ok and we don't need to adjust the scale. */
if (stat.mean <= iolat->min_lat_nsec &&
atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
return;
/* Somebody beat us to the punch, just bail. */
spin_lock_irqsave(&lat_info->lock, flags);
lat_info->nr_samples -= iolat->nr_samples;
lat_info->nr_samples += stat.nr_samples;
iolat->nr_samples = stat.nr_samples;
if ((lat_info->last_scale_event >= now ||
now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME) &&
lat_info->scale_lat <= iolat->min_lat_nsec)
goto out;
if (stat.mean <= iolat->min_lat_nsec &&
stat.nr_samples >= BLKIOLATENCY_MIN_GOOD_SAMPLES) {
if (lat_info->scale_grp == iolat) {
lat_info->last_scale_event = now;
scale_cookie_change(iolat->blkiolat, lat_info, true);
}
} else if (stat.mean > iolat->min_lat_nsec) {
lat_info->last_scale_event = now;
if (!lat_info->scale_grp ||
lat_info->scale_lat > iolat->min_lat_nsec) {
WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
lat_info->scale_grp = iolat;
}
scale_cookie_change(iolat->blkiolat, lat_info, false);
}
out:
spin_unlock_irqrestore(&lat_info->lock, flags);
}
static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
{
struct blkcg_gq *blkg;
struct rq_wait *rqw;
struct iolatency_grp *iolat;
u64 window_start;
u64 now = ktime_to_ns(ktime_get());
bool issue_as_root = bio_issue_as_root_blkg(bio);
int inflight = 0;
blkg = bio->bi_blkg;
if (!blkg)
return;
iolat = blkg_to_lat(bio->bi_blkg);
if (!iolat)
return;
if (!iolat->blkiolat->enabled)
return;
while (blkg && blkg->parent) {
iolat = blkg_to_lat(blkg);
if (!iolat) {
blkg = blkg->parent;
continue;
}
rqw = &iolat->rq_wait;
inflight = atomic_dec_return(&rqw->inflight);
WARN_ON_ONCE(inflight < 0);
/*
* If bi_status is BLK_STS_AGAIN, the bio wasn't actually
* submitted, so do not account for it.
*/
if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
iolatency_record_time(iolat, &bio->bi_issue, now,
issue_as_root);
window_start = atomic64_read(&iolat->window_start);
if (now > window_start &&
(now - window_start) >= iolat->cur_win_nsec) {
if (atomic64_cmpxchg(&iolat->window_start,
window_start, now) == window_start)
iolatency_check_latencies(iolat, now);
}
}
wake_up(&rqw->wait);
blkg = blkg->parent;
}
}
static void blkcg_iolatency_exit(struct rq_qos *rqos)
{
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
del_timer_sync(&blkiolat->timer);
flush_work(&blkiolat->enable_work);
blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
kfree(blkiolat);
}
static struct rq_qos_ops blkcg_iolatency_ops = {
.throttle = blkcg_iolatency_throttle,
.done_bio = blkcg_iolatency_done_bio,
.exit = blkcg_iolatency_exit,
};
static void blkiolatency_timer_fn(struct timer_list *t)
{
struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
struct blkcg_gq *blkg;
struct cgroup_subsys_state *pos_css;
u64 now = ktime_to_ns(ktime_get());
rcu_read_lock();
blkg_for_each_descendant_pre(blkg, pos_css,
blkiolat->rqos.q->root_blkg) {
struct iolatency_grp *iolat;
struct child_latency_info *lat_info;
unsigned long flags;
u64 cookie;
/*
* We could be exiting, don't access the pd unless we have a
* ref on the blkg.
*/
if (!blkg_try_get(blkg))
continue;
iolat = blkg_to_lat(blkg);
if (!iolat)
goto next;
lat_info = &iolat->child_lat;
cookie = atomic_read(&lat_info->scale_cookie);
if (cookie >= DEFAULT_SCALE_COOKIE)
goto next;
spin_lock_irqsave(&lat_info->lock, flags);
if (lat_info->last_scale_event >= now)
goto next_lock;
/*
* We scaled down but don't have a scale_grp, scale up and carry
* on.
*/
if (lat_info->scale_grp == NULL) {
scale_cookie_change(iolat->blkiolat, lat_info, true);
goto next_lock;
}
/*
* It's been 5 seconds since our last scale event, clear the
* scale grp in case the group that needed the scale down isn't
* doing any IO currently.
*/
if (now - lat_info->last_scale_event >=
((u64)NSEC_PER_SEC * 5))
lat_info->scale_grp = NULL;
next_lock:
spin_unlock_irqrestore(&lat_info->lock, flags);
next:
blkg_put(blkg);
}
rcu_read_unlock();
}
/**
* blkiolatency_enable_work_fn - Enable or disable iolatency on the device
* @work: enable_work of the blk_iolatency of interest
*
* iolatency needs to keep track of the number of in-flight IOs per cgroup. This
* is relatively expensive as it involves walking up the hierarchy twice for
* every IO. Thus, if iolatency is not enabled in any cgroup for the device, we
* want to disable the in-flight tracking.
*
* We have to make sure that the counting is balanced - we don't want to leak
* the in-flight counts by disabling accounting in the completion path while IOs
* are in flight. This is achieved by ensuring that no IO is in flight by
* freezing the queue while flipping ->enabled. As this requires a sleepable
* context, ->enabled flipping is punted to this work function.
*/
static void blkiolatency_enable_work_fn(struct work_struct *work)
{
struct blk_iolatency *blkiolat = container_of(work, struct blk_iolatency,
enable_work);
bool enabled;
/*
* There can only be one instance of this function running for @blkiolat
* and it's guaranteed to be executed at least once after the latest
* ->enabled_cnt modification. Acting on the latest ->enable_cnt is
* sufficient.
*
* Also, we know @blkiolat is safe to access as ->enable_work is flushed
* in blkcg_iolatency_exit().
*/
enabled = atomic_read(&blkiolat->enable_cnt);
if (enabled != blkiolat->enabled) {
blk_mq_freeze_queue(blkiolat->rqos.q);
blkiolat->enabled = enabled;
blk_mq_unfreeze_queue(blkiolat->rqos.q);
}
}
int blk_iolatency_init(struct request_queue *q)
{
struct blk_iolatency *blkiolat;
struct rq_qos *rqos;
int ret;
blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
if (!blkiolat)
return -ENOMEM;
rqos = &blkiolat->rqos;
rqos->id = RQ_QOS_CGROUP;
rqos->ops = &blkcg_iolatency_ops;
rqos->q = q;
rq_qos_add(q, rqos);
ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
if (ret) {
rq_qos_del(q, rqos);
kfree(blkiolat);
return ret;
}
timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
INIT_WORK(&blkiolat->enable_work, blkiolatency_enable_work_fn);
return 0;
}
static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
{
struct iolatency_grp *iolat = blkg_to_lat(blkg);
struct blk_iolatency *blkiolat = iolat->blkiolat;
u64 oldval = iolat->min_lat_nsec;
iolat->min_lat_nsec = val;
iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
BLKIOLATENCY_MAX_WIN_SIZE);
if (!oldval && val) {
if (atomic_inc_return(&blkiolat->enable_cnt) == 1)
schedule_work(&blkiolat->enable_work);
}
if (oldval && !val) {
blkcg_clear_delay(blkg);
if (atomic_dec_return(&blkiolat->enable_cnt) == 0)
schedule_work(&blkiolat->enable_work);
}
}
static void iolatency_clear_scaling(struct blkcg_gq *blkg)
{
if (blkg->parent) {
struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
struct child_latency_info *lat_info;
if (!iolat)
return;
lat_info = &iolat->child_lat;
spin_lock(&lat_info->lock);
atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
lat_info->last_scale_event = 0;
lat_info->scale_grp = NULL;
lat_info->scale_lat = 0;
spin_unlock(&lat_info->lock);
}
}
static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
struct blkcg *blkcg = css_to_blkcg(of_css(of));
struct blkcg_gq *blkg;
struct blk_iolatency *blkiolat;
struct blkg_conf_ctx ctx;
struct iolatency_grp *iolat;
char *p, *tok;
u64 lat_val = 0;
u64 oldval;
int ret;
ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
if (ret)
return ret;
iolat = blkg_to_lat(ctx.blkg);
blkiolat = iolat->blkiolat;
p = ctx.body;
ret = -EINVAL;
while ((tok = strsep(&p, " "))) {
char key[16];
char val[21]; /* 18446744073709551616 */
if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
goto out;
if (!strcmp(key, "target")) {
u64 v;
if (!strcmp(val, "max"))
lat_val = 0;
else if (sscanf(val, "%llu", &v) == 1)
lat_val = v * NSEC_PER_USEC;
else
goto out;
} else {
goto out;
}
}
/* Walk up the tree to see if our new val is lower than it should be. */
blkg = ctx.blkg;
oldval = iolat->min_lat_nsec;
iolatency_set_min_lat_nsec(blkg, lat_val);
if (oldval != iolat->min_lat_nsec)
iolatency_clear_scaling(blkg);
ret = 0;
out:
blkg_conf_finish(&ctx);
return ret ?: nbytes;
}
static u64 iolatency_prfill_limit(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
const char *dname = blkg_dev_name(pd->blkg);
if (!dname || !iolat->min_lat_nsec)
return 0;
seq_printf(sf, "%s target=%llu\n",
dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
return 0;
}
static int iolatency_print_limit(struct seq_file *sf, void *v)
{
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
iolatency_prfill_limit,
&blkcg_policy_iolatency, seq_cft(sf)->private, false);
return 0;
}
static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
size_t size)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
unsigned long long avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
unsigned long long cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
if (iolat->rq_depth.max_depth == UINT_MAX)
return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
avg_lat, cur_win);
return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
iolat->rq_depth.max_depth, avg_lat, cur_win);
}
static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
{
struct iolatency_grp *iolat;
iolat = kzalloc_node(sizeof(*iolat), gfp, node);
if (!iolat)
return NULL;
iolat->stats = __alloc_percpu_gfp(sizeof(struct blk_rq_stat),
__alignof__(struct blk_rq_stat), gfp);
if (!iolat->stats) {
kfree(iolat);
return NULL;
}
return &iolat->pd;
}
static void iolatency_pd_init(struct blkg_policy_data *pd)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
struct blkcg_gq *blkg = lat_to_blkg(iolat);
struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
u64 now = ktime_to_ns(ktime_get());
int cpu;
for_each_possible_cpu(cpu) {
struct blk_rq_stat *stat;
stat = per_cpu_ptr(iolat->stats, cpu);
blk_rq_stat_init(stat);
}
rq_wait_init(&iolat->rq_wait);
spin_lock_init(&iolat->child_lat.lock);
iolat->rq_depth.queue_depth = blk_queue_depth(blkg->q);
iolat->rq_depth.max_depth = UINT_MAX;
iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
iolat->blkiolat = blkiolat;
iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
atomic64_set(&iolat->window_start, now);
/*
* We init things in list order, so the pd for the parent may not be
* init'ed yet for whatever reason.
*/
if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
atomic_set(&iolat->scale_cookie,
atomic_read(&parent->child_lat.scale_cookie));
} else {
atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
}
atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
}
static void iolatency_pd_offline(struct blkg_policy_data *pd)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
struct blkcg_gq *blkg = lat_to_blkg(iolat);
iolatency_set_min_lat_nsec(blkg, 0);
iolatency_clear_scaling(blkg);
}
static void iolatency_pd_free(struct blkg_policy_data *pd)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
free_percpu(iolat->stats);
kfree(iolat);
}
static struct cftype iolatency_files[] = {
{
.name = "latency",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = iolatency_print_limit,
.write = iolatency_set_limit,
},
{}
};
static struct blkcg_policy blkcg_policy_iolatency = {
.dfl_cftypes = iolatency_files,
.pd_alloc_fn = iolatency_pd_alloc,
.pd_init_fn = iolatency_pd_init,
.pd_offline_fn = iolatency_pd_offline,
.pd_free_fn = iolatency_pd_free,
.pd_stat_fn = iolatency_pd_stat,
};
static int __init iolatency_init(void)
{
return blkcg_policy_register(&blkcg_policy_iolatency);
}
static void __exit iolatency_exit(void)
{
return blkcg_policy_unregister(&blkcg_policy_iolatency);
}
module_init(iolatency_init);
module_exit(iolatency_exit);