This is the 4.19.150 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl99WYoACgkQONu9yGCS
 aT75hA/+Lu0udzlCD/Uw1SDFKPo3Xed6hFiFUOHHtQxVel9r3DNOX1+kfNAH5ifo
 G2NW6O25Bl4qxmG02QRW85r7JRHhoMQOx1DldLGlJCfcgmVcwEaiRg6HgMh+9OiC
 qPAuE6zbB5h97dPQppe5u7e6pzjrsTgR6pYlnuwPVF6TSmTYXM3OGubXItOyneRZ
 ePnzH9w4bk/n4UAARYOowfFhRnO/Qml+QPxc8rFbK2inGXCJ31QLITJCa3Y3KXP3
 AC2aM2M8B04GJBFhXH8pLFrzvB/+S1DwzmtT3d6TWdQRqdSr+GAPJ/3jX2eKMuwK
 6vfF/caGvfYpomEFCHFKyLxmFwhbSEfVD0ht4jr3aiF/E0ii8sXKN8mnnowpNFpG
 23kG+baxxe1ZbjXw4VjtGGXruJQ6im5o7siRnmfKYv18Fo5O3yEga9pCjOdkHT20
 gjes0GfTtgr3nrlW19B03ZYL7p3ri46NlY7Zlawvtz3dlWY9rTkII3nxD5k5Ywxs
 KgDVpREwr7LuKOgGTxkwLGHNEF7b1mJrjdrlX/X2SDJD/IQc3xdD5382lXSQRaem
 QaZhu6S6SNCjI9fGQ7jOYMR3ouGegFsPOnr6BQxvKmoolsUzXTeTxR0u2R5dqYGI
 1RwmuTIQTvURoFy1XyhNyLJAbCvk+9BeNp8I5/YTNbvkFKlnawM=
 =qKZh
 -----END PGP SIGNATURE-----

Merge 4.19.150 into android-4.19-stable

Changes in 4.19.150
	mmc: sdhci: Workaround broken command queuing on Intel GLK based IRBIS models
	USB: gadget: f_ncm: Fix NDP16 datagram validation
	gpio: mockup: fix resource leak in error path
	gpio: tc35894: fix up tc35894 interrupt configuration
	clk: socfpga: stratix10: fix the divider for the emac_ptp_free_clk
	vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
	vsock/virtio: stop workers during the .remove()
	vsock/virtio: add transport parameter to the virtio_transport_reset_no_sock()
	net: virtio_vsock: Enhance connection semantics
	Input: i8042 - add nopnp quirk for Acer Aspire 5 A515
	ftrace: Move RCU is watching check after recursion check
	drm/amdgpu: restore proper ref count in amdgpu_display_crtc_set_config
	drivers/net/wan/hdlc_fr: Add needed_headroom for PVC devices
	drm/sun4i: mixer: Extend regmap max_register
	net: dec: de2104x: Increase receive ring size for Tulip
	rndis_host: increase sleep time in the query-response loop
	nvme-core: get/put ctrl and transport module in nvme_dev_open/release()
	drivers/net/wan/lapbether: Make skb->protocol consistent with the header
	drivers/net/wan/hdlc: Set skb->protocol before transmitting
	mac80211: do not allow bigger VHT MPDUs than the hardware supports
	spi: fsl-espi: Only process interrupts for expected events
	nvme-fc: fail new connections to a deleted host or remote port
	gpio: sprd: Clear interrupt when setting the type as edge
	pinctrl: mvebu: Fix i2c sda definition for 98DX3236
	nfs: Fix security label length not being reset
	clk: samsung: exynos4: mark 'chipid' clock as CLK_IGNORE_UNUSED
	iommu/exynos: add missing put_device() call in exynos_iommu_of_xlate()
	i2c: cpm: Fix i2c_ram structure
	Input: trackpoint - enable Synaptics trackpoints
	random32: Restore __latent_entropy attribute on net_rand_state
	mm: replace memmap_context by meminit_context
	mm: don't rely on system state to detect hot-plug operations
	net/packet: fix overflow in tpacket_rcv
	epoll: do not insert into poll queues until all sanity checks are done
	epoll: replace ->visited/visited_list with generation count
	epoll: EPOLL_CTL_ADD: close the race in decision to take fast path
	ep_create_wakeup_source(): dentry name can change under you...
	netfilter: ctnetlink: add a range check for l3/l4 protonum
	Linux 4.19.150

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ib6f1b6fce01bec80efd4a905d03903ff20ca89be
This commit is contained in:
Greg Kroah-Hartman 2020-10-07 08:45:35 +02:00
commit 2dce03a5c2
42 changed files with 425 additions and 299 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 149
SUBLEVEL = 150
EXTRAVERSION =
NAME = "People's Front"

View File

@ -499,7 +499,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start),
MEMMAP_EARLY, NULL);
MEMINIT_EARLY, NULL);
return 0;
}
@ -508,8 +508,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map) {
memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
NULL);
memmap_init_zone(size, nid, zone, start_pfn,
MEMINIT_EARLY, NULL);
} else {
struct page *start;
struct memmap_init_callback_data args;

View File

@ -414,10 +414,32 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
return pfn_to_nid(pfn);
}
/* register memory section under specified node if it spans that node */
int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
static int do_register_memory_block_under_node(int nid,
struct memory_block *mem_blk)
{
int ret, nid = *(int *)arg;
int ret;
/*
* If this memory block spans multiple nodes, we only indicate
* the last processed node.
*/
mem_blk->nid = nid;
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
if (ret)
return ret;
return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
&node_devices[nid]->dev.kobj,
kobject_name(&node_devices[nid]->dev.kobj));
}
/* register memory section under specified node if it spans that node */
int register_mem_block_under_node_early(struct memory_block *mem_blk, void *arg)
{
int nid = *(int *)arg;
unsigned long pfn, sect_start_pfn, sect_end_pfn;
sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
@ -437,38 +459,33 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
}
/*
* We need to check if page belongs to nid only for the boot
* case, during hotplug we know that all pages in the memory
* block belong to the same node.
* We need to check if page belongs to nid only at the boot
* case because node's ranges can be interleaved.
*/
if (system_state == SYSTEM_BOOTING) {
page_nid = get_nid_for_pfn(pfn);
if (page_nid < 0)
continue;
if (page_nid != nid)
continue;
}
page_nid = get_nid_for_pfn(pfn);
if (page_nid < 0)
continue;
if (page_nid != nid)
continue;
/*
* If this memory block spans multiple nodes, we only indicate
* the last processed node.
*/
mem_blk->nid = nid;
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
if (ret)
return ret;
return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
&node_devices[nid]->dev.kobj,
kobject_name(&node_devices[nid]->dev.kobj));
return do_register_memory_block_under_node(nid, mem_blk);
}
/* mem section does not span the specified node */
return 0;
}
/*
* During hotplug we know that all pages in the memory block belong to the same
* node.
*/
static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
void *arg)
{
int nid = *(int *)arg;
return do_register_memory_block_under_node(nid, mem_blk);
}
/*
* Unregister a memory block device under the node it spans. Memory blocks
* with multiple nodes cannot be offlined and therefore also never be removed.
@ -484,10 +501,17 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
enum meminit_context context)
{
return walk_memory_range(start_pfn, end_pfn, (void *)&nid,
register_mem_sect_under_node);
walk_memory_blocks_func_t func;
if (context == MEMINIT_HOTPLUG)
func = register_mem_block_under_node_hotplug;
else
func = register_mem_block_under_node_early;
return walk_memory_range(start_pfn, end_pfn, (void *)&nid, func);
}
#ifdef CONFIG_HUGETLBFS

View File

@ -1072,7 +1072,7 @@ static const struct samsung_gate_clock exynos4210_gate_clks[] __initconst = {
GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0),
GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0),
GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0,
CLK_IGNORE_UNUSED, 0),
GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0,
@ -1113,7 +1113,7 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = {
0),
GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0),
GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0),
GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
CLK_IGNORE_UNUSED, 0),
GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0,

View File

@ -107,7 +107,7 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
{ STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
0, 0, 2, 0xB0, 1},
{ STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2},
ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 2, 0xB0, 2},
{ STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
ARRAY_SIZE(gpio_db_free_mux), 0, 0, 0, 0xB0, 3},
{ STRATIX10_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,

View File

@ -367,6 +367,7 @@ static int __init gpio_mockup_init(void)
err = platform_driver_register(&gpio_mockup_driver);
if (err) {
gpio_mockup_err("error registering platform driver\n");
debugfs_remove_recursive(gpio_mockup_dbg_dir);
return err;
}
@ -386,6 +387,7 @@ static int __init gpio_mockup_init(void)
gpio_mockup_err("error registering device");
platform_driver_unregister(&gpio_mockup_driver);
gpio_mockup_unregister_pdevs();
debugfs_remove_recursive(gpio_mockup_dbg_dir);
return PTR_ERR(pdev);
}

View File

@ -149,17 +149,20 @@ static int sprd_gpio_irq_set_type(struct irq_data *data,
sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 1);
sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_FALLING:
sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_BOTH:
sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 1);
sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_LEVEL_HIGH:

View File

@ -209,7 +209,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
continue;
tc3589x_gpio->oldregs[i][j] = new;
tc3589x_reg_write(tc3589x, regmap[i] + j * 8, new);
tc3589x_reg_write(tc3589x, regmap[i] + j, new);
}
}

View File

@ -290,7 +290,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
take the current one */
if (active && !adev->have_disp_power_ref) {
adev->have_disp_power_ref = true;
goto out;
return ret;
}
/* if we have no active crtcs, then drop the power ref
we got before */

View File

@ -364,7 +364,7 @@ static struct regmap_config sun8i_mixer_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0xbfffc, /* guessed */
.max_register = 0xffffc, /* guessed */
};
static int sun8i_mixer_of_get_id(struct device_node *node)

View File

@ -74,6 +74,9 @@ struct i2c_ram {
char res1[4]; /* Reserved */
ushort rpbase; /* Relocation pointer */
char res2[2]; /* Reserved */
/* The following elements are only for CPM2 */
char res3[4]; /* Reserved */
uint sdmatmp; /* Internal */
};
#define I2COM_START 0x80

View File

@ -285,6 +285,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse,
case TP_VARIANT_ALPS:
case TP_VARIANT_ELAN:
case TP_VARIANT_NXP:
case TP_VARIANT_JYT_SYNAPTICS:
case TP_VARIANT_SYNAPTICS:
if (variant_id)
*variant_id = param[0];
if (firmware_id)

View File

@ -725,6 +725,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
},
},
{
/* Acer Aspire 5 A515 */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
},
},
{ }
};

View File

@ -1302,13 +1302,17 @@ static int exynos_iommu_of_xlate(struct device *dev,
return -ENODEV;
data = platform_get_drvdata(sysmmu);
if (!data)
if (!data) {
put_device(&sysmmu->dev);
return -ENODEV;
}
if (!owner) {
owner = kzalloc(sizeof(*owner), GFP_KERNEL);
if (!owner)
if (!owner) {
put_device(&sysmmu->dev);
return -ENOMEM;
}
INIT_LIST_HEAD(&owner->controllers);
mutex_init(&owner->rpm_lock);

View File

@ -739,7 +739,8 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
{
return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
dmi_match(DMI_BIOS_VENDOR, "LENOVO");
(dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
dmi_match(DMI_SYS_VENDOR, "IRBIS"));
}
static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)

View File

@ -91,7 +91,7 @@ MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copi
#define DSL CONFIG_DE2104X_DSL
#endif
#define DE_RX_RING_SIZE 64
#define DE_RX_RING_SIZE 128
#define DE_TX_RING_SIZE 64
#define DE_RING_BYTES \
((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \

View File

@ -213,7 +213,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
dev_dbg(&info->control->dev,
"rndis response error, code %d\n", retval);
}
msleep(20);
msleep(40);
}
dev_dbg(&info->control->dev, "rndis response timeout\n");
return -ETIMEDOUT;

View File

@ -121,6 +121,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
skb_put(skb, sizeof(struct cisco_packet));
skb->priority = TC_PRIO_CONTROL;
skb->dev = dev;
skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb);
dev_queue_xmit(skb);

View File

@ -436,6 +436,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
if (pvc->state.fecn) /* TX Congestion counter */
dev->stats.tx_compressed++;
skb->dev = pvc->frad;
skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb);
dev_queue_xmit(skb);
return NETDEV_TX_OK;
}
@ -558,6 +560,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
skb_put(skb, i);
skb->priority = TC_PRIO_CONTROL;
skb->dev = dev;
skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb);
dev_queue_xmit(skb);
@ -1044,7 +1047,7 @@ static void pvc_setup(struct net_device *dev)
{
dev->type = ARPHRD_DLCI;
dev->flags = IFF_POINTOPOINT;
dev->hard_header_len = 10;
dev->hard_header_len = 0;
dev->addr_len = 2;
netif_keep_dst(dev);
}
@ -1096,6 +1099,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
dev->mtu = HDLC_MAX_MTU;
dev->min_mtu = 68;
dev->max_mtu = HDLC_MAX_MTU;
dev->needed_headroom = 10;
dev->priv_flags |= IFF_NO_QUEUE;
dev->ml_priv = pvc;

View File

@ -254,6 +254,7 @@ static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
skb->priority = TC_PRIO_CONTROL;
skb->dev = dev;
skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb);
skb_queue_tail(&tx_queue, skb);
}

View File

@ -201,8 +201,6 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
struct net_device *dev;
int size = skb->len;
skb->protocol = htons(ETH_P_X25);
ptr = skb_push(skb, 2);
*ptr++ = size % 256;
@ -213,6 +211,8 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
skb->dev = dev = lapbeth->ethdev;
skb->protocol = htons(ETH_P_DEC);
skb_reset_network_header(skb);
dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);

View File

@ -2605,10 +2605,24 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
return -EWOULDBLOCK;
}
nvme_get_ctrl(ctrl);
if (!try_module_get(ctrl->ops->module))
return -EINVAL;
file->private_data = ctrl;
return 0;
}
static int nvme_dev_release(struct inode *inode, struct file *file)
{
struct nvme_ctrl *ctrl =
container_of(inode->i_cdev, struct nvme_ctrl, cdev);
module_put(ctrl->ops->module);
nvme_put_ctrl(ctrl);
return 0;
}
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
{
struct nvme_ns *ns;
@ -2669,6 +2683,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations nvme_dev_fops = {
.owner = THIS_MODULE,
.open = nvme_dev_open,
.release = nvme_dev_release,
.unlocked_ioctl = nvme_dev_ioctl,
.compat_ioctl = nvme_dev_ioctl,
};

View File

@ -3294,12 +3294,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
spin_lock_irqsave(&nvme_fc_lock, flags);
list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
if (lport->localport.node_name != laddr.nn ||
lport->localport.port_name != laddr.pn)
lport->localport.port_name != laddr.pn ||
lport->localport.port_state != FC_OBJSTATE_ONLINE)
continue;
list_for_each_entry(rport, &lport->endp_list, endp_list) {
if (rport->remoteport.node_name != raddr.nn ||
rport->remoteport.port_name != raddr.pn)
rport->remoteport.port_name != raddr.pn ||
rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
continue;
/* if fail to get reference fall through. Will error */

View File

@ -418,7 +418,7 @@ static struct mvebu_mpp_mode mv98dx3236_mpp_modes[] = {
MPP_VAR_FUNCTION(0x1, "i2c0", "sck", V_98DX3236_PLUS)),
MPP_MODE(15,
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
MPP_VAR_FUNCTION(0x4, "i2c0", "sda", V_98DX3236_PLUS)),
MPP_VAR_FUNCTION(0x1, "i2c0", "sda", V_98DX3236_PLUS)),
MPP_MODE(16,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
MPP_VAR_FUNCTION(0x4, "dev", "oe", V_98DX3236_PLUS)),

View File

@ -559,13 +559,14 @@ static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events)
static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
{
struct fsl_espi *espi = context_data;
u32 events;
u32 events, mask;
spin_lock(&espi->lock);
/* Get interrupt events(tx/rx) */
events = fsl_espi_read_reg(espi, ESPI_SPIE);
if (!events) {
mask = fsl_espi_read_reg(espi, ESPI_SPIM);
if (!(events & mask)) {
spin_unlock(&espi->lock);
return IRQ_NONE;
}

View File

@ -1192,7 +1192,6 @@ static int ncm_unwrap_ntb(struct gether *port,
const struct ndp_parser_opts *opts = ncm->parser_opts;
unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
int dgram_counter;
bool ndp_after_header;
/* dwSignature */
if (get_unaligned_le32(tmp) != opts->nth_sign) {
@ -1219,7 +1218,6 @@ static int ncm_unwrap_ntb(struct gether *port,
}
ndp_index = get_ncm(&tmp, opts->ndp_index);
ndp_after_header = false;
/* Run through all the NDP's in the NTB */
do {
@ -1235,8 +1233,6 @@ static int ncm_unwrap_ntb(struct gether *port,
ndp_index);
goto err;
}
if (ndp_index == opts->nth_size)
ndp_after_header = true;
/*
* walk through NDP
@ -1315,37 +1311,13 @@ static int ncm_unwrap_ntb(struct gether *port,
index2 = get_ncm(&tmp, opts->dgram_item_len);
dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
if (index2 == 0 || dg_len2 == 0)
break;
/* wDatagramIndex[1] */
if (ndp_after_header) {
if (index2 < opts->nth_size + opts->ndp_size) {
INFO(port->func.config->cdev,
"Bad index: %#X\n", index2);
goto err;
}
} else {
if (index2 < opts->nth_size + opts->dpe_size) {
INFO(port->func.config->cdev,
"Bad index: %#X\n", index2);
goto err;
}
}
if (index2 > block_len - opts->dpe_size) {
INFO(port->func.config->cdev,
"Bad index: %#X\n", index2);
goto err;
}
/* wDatagramLength[1] */
if ((dg_len2 < 14 + crc_len) ||
(dg_len2 > frame_max)) {
INFO(port->func.config->cdev,
"Bad dgram length: %#X\n", dg_len);
goto err;
}
/*
* Copy the data into a new skb.
* This ensures the truesize is correct
@ -1362,6 +1334,8 @@ static int ncm_unwrap_ntb(struct gether *port,
ndp_len -= 2 * (opts->dgram_item_len * 2);
dgram_counter++;
if (index2 == 0 || dg_len2 == 0)
break;
} while (ndp_len > 2 * (opts->dgram_item_len * 2));
} while (ndp_index);

View File

@ -383,6 +383,52 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
return val < vq->num;
}
static struct virtio_transport vhost_transport = {
.transport = {
.get_local_cid = vhost_transport_get_local_cid,
.init = virtio_transport_do_socket_init,
.destruct = virtio_transport_destruct,
.release = virtio_transport_release,
.connect = virtio_transport_connect,
.shutdown = virtio_transport_shutdown,
.cancel_pkt = vhost_transport_cancel_pkt,
.dgram_enqueue = virtio_transport_dgram_enqueue,
.dgram_dequeue = virtio_transport_dgram_dequeue,
.dgram_bind = virtio_transport_dgram_bind,
.dgram_allow = virtio_transport_dgram_allow,
.stream_enqueue = virtio_transport_stream_enqueue,
.stream_dequeue = virtio_transport_stream_dequeue,
.stream_has_data = virtio_transport_stream_has_data,
.stream_has_space = virtio_transport_stream_has_space,
.stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
.stream_is_active = virtio_transport_stream_is_active,
.stream_allow = virtio_transport_stream_allow,
.notify_poll_in = virtio_transport_notify_poll_in,
.notify_poll_out = virtio_transport_notify_poll_out,
.notify_recv_init = virtio_transport_notify_recv_init,
.notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
.notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
.notify_send_init = virtio_transport_notify_send_init,
.notify_send_pre_block = virtio_transport_notify_send_pre_block,
.notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
.set_buffer_size = virtio_transport_set_buffer_size,
.set_min_buffer_size = virtio_transport_set_min_buffer_size,
.set_max_buffer_size = virtio_transport_set_max_buffer_size,
.get_buffer_size = virtio_transport_get_buffer_size,
.get_min_buffer_size = virtio_transport_get_min_buffer_size,
.get_max_buffer_size = virtio_transport_get_max_buffer_size,
},
.send_pkt = vhost_transport_send_pkt,
};
static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@ -439,7 +485,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
le64_to_cpu(pkt->hdr.dst_cid) ==
vhost_transport_get_local_cid())
virtio_transport_recv_pkt(pkt);
virtio_transport_recv_pkt(&vhost_transport, pkt);
else
virtio_transport_free_pkt(pkt);
@ -792,52 +838,6 @@ static struct miscdevice vhost_vsock_misc = {
.fops = &vhost_vsock_fops,
};
static struct virtio_transport vhost_transport = {
.transport = {
.get_local_cid = vhost_transport_get_local_cid,
.init = virtio_transport_do_socket_init,
.destruct = virtio_transport_destruct,
.release = virtio_transport_release,
.connect = virtio_transport_connect,
.shutdown = virtio_transport_shutdown,
.cancel_pkt = vhost_transport_cancel_pkt,
.dgram_enqueue = virtio_transport_dgram_enqueue,
.dgram_dequeue = virtio_transport_dgram_dequeue,
.dgram_bind = virtio_transport_dgram_bind,
.dgram_allow = virtio_transport_dgram_allow,
.stream_enqueue = virtio_transport_stream_enqueue,
.stream_dequeue = virtio_transport_stream_dequeue,
.stream_has_data = virtio_transport_stream_has_data,
.stream_has_space = virtio_transport_stream_has_space,
.stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
.stream_is_active = virtio_transport_stream_is_active,
.stream_allow = virtio_transport_stream_allow,
.notify_poll_in = virtio_transport_notify_poll_in,
.notify_poll_out = virtio_transport_notify_poll_out,
.notify_recv_init = virtio_transport_notify_recv_init,
.notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
.notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
.notify_send_init = virtio_transport_notify_send_init,
.notify_send_pre_block = virtio_transport_notify_send_pre_block,
.notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
.set_buffer_size = virtio_transport_set_buffer_size,
.set_min_buffer_size = virtio_transport_set_min_buffer_size,
.set_max_buffer_size = virtio_transport_set_max_buffer_size,
.get_buffer_size = virtio_transport_get_buffer_size,
.get_min_buffer_size = virtio_transport_get_min_buffer_size,
.get_max_buffer_size = virtio_transport_get_max_buffer_size,
},
.send_pkt = vhost_transport_send_pkt,
};
static int __init vhost_vsock_init(void)
{
int ret;

View File

@ -223,8 +223,7 @@ struct eventpoll {
struct file *file;
/* used to optimize loop detection check */
int visited;
struct list_head visited_list_link;
u64 gen;
#ifdef CONFIG_NET_RX_BUSY_POLL
/* used to track busy poll napi_id */
@ -274,6 +273,8 @@ static long max_user_watches __read_mostly;
*/
static DEFINE_MUTEX(epmutex);
static u64 loop_check_gen = 0;
/* Used to check for epoll file descriptor inclusion loops */
static struct nested_calls poll_loop_ncalls;
@ -283,9 +284,6 @@ static struct kmem_cache *epi_cache __read_mostly;
/* Slab cache used to allocate "struct eppoll_entry" */
static struct kmem_cache *pwq_cache __read_mostly;
/* Visited nodes during ep_loop_check(), so we can unset them when we finish */
static LIST_HEAD(visited_list);
/*
* List of files with newly added links, where we may need to limit the number
* of emanating paths. Protected by the epmutex.
@ -1379,7 +1377,7 @@ static int reverse_path_check(void)
static int ep_create_wakeup_source(struct epitem *epi)
{
const char *name;
struct name_snapshot n;
struct wakeup_source *ws;
if (!epi->ep->ws) {
@ -1388,8 +1386,9 @@ static int ep_create_wakeup_source(struct epitem *epi)
return -ENOMEM;
}
name = epi->ffd.file->f_path.dentry->d_name.name;
ws = wakeup_source_register(NULL, name);
take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
ws = wakeup_source_register(NULL, n.name);
release_dentry_name_snapshot(&n);
if (!ws)
return -ENOMEM;
@ -1451,6 +1450,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
RCU_INIT_POINTER(epi->ws, NULL);
}
/* Add the current item to the list of active epoll hook for this file */
spin_lock(&tfile->f_lock);
list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
spin_unlock(&tfile->f_lock);
/*
* Add the current item to the RB tree. All RB tree operations are
* protected by "mtx", and ep_insert() is called with "mtx" held.
*/
ep_rbtree_insert(ep, epi);
/* now check if we've created too many backpaths */
error = -EINVAL;
if (full_check && reverse_path_check())
goto error_remove_epi;
/* Initialize the poll table using the queue callback */
epq.epi = epi;
init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
@ -1473,22 +1488,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
if (epi->nwait < 0)
goto error_unregister;
/* Add the current item to the list of active epoll hook for this file */
spin_lock(&tfile->f_lock);
list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
spin_unlock(&tfile->f_lock);
/*
* Add the current item to the RB tree. All RB tree operations are
* protected by "mtx", and ep_insert() is called with "mtx" held.
*/
ep_rbtree_insert(ep, epi);
/* now check if we've created too many backpaths */
error = -EINVAL;
if (full_check && reverse_path_check())
goto error_remove_epi;
/* We have to drop the new item inside our item list to keep track of it */
spin_lock_irq(&ep->wq.lock);
@ -1517,6 +1516,8 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
return 0;
error_unregister:
ep_unregister_pollwait(ep, epi);
error_remove_epi:
spin_lock(&tfile->f_lock);
list_del_rcu(&epi->fllink);
@ -1524,9 +1525,6 @@ error_remove_epi:
rb_erase_cached(&epi->rbn, &ep->rbr);
error_unregister:
ep_unregister_pollwait(ep, epi);
/*
* We need to do this because an event could have been arrived on some
* allocated wait queue. Note that we don't care about the ep->ovflist
@ -1870,13 +1868,12 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
struct epitem *epi;
mutex_lock_nested(&ep->mtx, call_nests + 1);
ep->visited = 1;
list_add(&ep->visited_list_link, &visited_list);
ep->gen = loop_check_gen;
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn);
if (unlikely(is_file_epoll(epi->ffd.file))) {
ep_tovisit = epi->ffd.file->private_data;
if (ep_tovisit->visited)
if (ep_tovisit->gen == loop_check_gen)
continue;
error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
ep_loop_check_proc, epi->ffd.file,
@ -1917,18 +1914,8 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
*/
static int ep_loop_check(struct eventpoll *ep, struct file *file)
{
int ret;
struct eventpoll *ep_cur, *ep_next;
ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
ep_loop_check_proc, file, ep, current);
/* clear visited list */
list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
visited_list_link) {
ep_cur->visited = 0;
list_del(&ep_cur->visited_list_link);
}
return ret;
}
static void clear_tfile_check_list(void)
@ -2090,6 +2077,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
mutex_lock_nested(&ep->mtx, 0);
if (op == EPOLL_CTL_ADD) {
if (!list_empty(&f.file->f_ep_links) ||
ep->gen == loop_check_gen ||
is_file_epoll(tf.file)) {
full_check = 1;
mutex_unlock(&ep->mtx);
@ -2150,6 +2138,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
error_tgt_fput:
if (full_check) {
clear_tfile_check_list();
loop_check_gen++;
mutex_unlock(&epmutex);
}

View File

@ -553,6 +553,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
do {
if (entry->label)
entry->label->len = NFS4_MAXLABELLEN;
status = xdr_decode(desc, entry, &stream);
if (status != 0) {
if (status == -EAGAIN)

View File

@ -2234,7 +2234,7 @@ static inline void zero_resv_unavail(void) {}
extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
enum memmap_context, struct vmem_altmap *);
enum meminit_context, struct vmem_altmap *);
extern void setup_per_zone_wmarks(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);

View File

@ -779,10 +779,15 @@ bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx);
enum memmap_context {
MEMMAP_EARLY,
MEMMAP_HOTPLUG,
/*
* Memory initialization context, use to differentiate memory added by
* the platform statically or via memory hotplug interface.
*/
enum meminit_context {
MEMINIT_EARLY,
MEMINIT_HOTPLUG,
};
extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size);

View File

@ -32,11 +32,13 @@ extern struct node *node_devices[];
typedef void (*node_registration_func_t)(struct node *);
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
extern int link_mem_sections(int nid, unsigned long start_pfn,
unsigned long end_pfn);
int link_mem_sections(int nid, unsigned long start_pfn,
unsigned long end_pfn,
enum meminit_context context);
#else
static inline int link_mem_sections(int nid, unsigned long start_pfn,
unsigned long end_pfn)
unsigned long end_pfn,
enum meminit_context context)
{
return 0;
}
@ -61,7 +63,8 @@ static inline int register_one_node(int nid)
if (error)
return error;
/* link memory sections under this node */
error = link_mem_sections(nid, start_pfn, end_pfn);
error = link_mem_sections(nid, start_pfn, end_pfn,
MEMINIT_EARLY);
}
return error;

View File

@ -149,7 +149,8 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
void virtio_transport_destruct(struct vsock_sock *vsk);
void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt);
void virtio_transport_recv_pkt(struct virtio_transport *t,
struct virtio_vsock_pkt *pkt);
void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);

View File

@ -6372,16 +6372,14 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
{
int bit;
if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
return;
bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
if (bit < 0)
return;
preempt_disable_notrace();
op->func(ip, parent_ip, op, regs);
if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
op->func(ip, parent_ip, op, regs);
preempt_enable_notrace();
trace_clear_recursion(bit);

View File

@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void)
}
#endif
DEFINE_PER_CPU(struct rnd_state, net_rand_state);
DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
/**
* prandom_u32_state - seeded pseudo-random number generator.

View File

@ -733,7 +733,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
* are reserved so nobody should be touching them so we should be safe
*/
memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
MEMMAP_HOTPLUG, altmap);
MEMINIT_HOTPLUG, altmap);
set_zone_contiguous(zone);
}
@ -1102,7 +1102,8 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online)
}
/* link memory sections under this node.*/
ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1));
ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1),
MEMINIT_HOTPLUG);
BUG_ON(ret);
/* create new memmap entry */

View File

@ -5634,7 +5634,7 @@ void __ref build_all_zonelists(pg_data_t *pgdat)
* done. Non-atomic initialization, single-pass.
*/
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn, enum memmap_context context,
unsigned long start_pfn, enum meminit_context context,
struct vmem_altmap *altmap)
{
unsigned long end_pfn = start_pfn + size;
@ -5661,7 +5661,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
* There can be holes in boot-time mem_map[]s handed to this
* function. They do not exist on hotplugged memory.
*/
if (context != MEMMAP_EARLY)
if (context != MEMINIT_EARLY)
goto not_early;
if (!early_pfn_valid(pfn))
@ -5696,7 +5696,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
not_early:
page = pfn_to_page(pfn);
__init_single_page(page, pfn, zone, nid);
if (context == MEMMAP_HOTPLUG)
if (context == MEMINIT_HOTPLUG)
SetPageReserved(page);
/*
@ -5711,7 +5711,7 @@ not_early:
* check here not to call set_pageblock_migratetype() against
* pfn out of zone.
*
* Please note that MEMMAP_HOTPLUG path doesn't clear memmap
* Please note that MEMINIT_HOTPLUG path doesn't clear memmap
* because this is done early in sparse_add_one_section
*/
if (!(pfn & (pageblock_nr_pages - 1))) {
@ -5732,7 +5732,8 @@ static void __meminit zone_init_free_lists(struct zone *zone)
#ifndef __HAVE_ARCH_MEMMAP_INIT
#define memmap_init(size, nid, zone, start_pfn) \
memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY, NULL)
memmap_init_zone((size), (nid), (zone), (start_pfn), \
MEMINIT_EARLY, NULL)
#endif
static int zone_batchsize(struct zone *zone)

View File

@ -170,10 +170,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
/* take some capabilities as-is */
cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info);
vht_cap->cap = cap_info;
vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 |
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_RXLDPC |
vht_cap->cap &= IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_VHT_TXOP_PS |
IEEE80211_VHT_CAP_HTC_VHT |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
@ -182,6 +179,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
vht_cap->cap |= min_t(u32, cap_info & IEEE80211_VHT_CAP_MAX_MPDU_MASK,
own_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK);
/* and some based on our own capabilities */
switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:

View File

@ -1129,6 +1129,8 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
if (!tb[CTA_TUPLE_IP])
return -EINVAL;
if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6)
return -EOPNOTSUPP;
tuple->src.l3num = l3num;
err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);

View File

@ -2162,7 +2162,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
int skb_len = skb->len;
unsigned int snaplen, res;
unsigned long status = TP_STATUS_USER;
unsigned short macoff, netoff, hdrlen;
unsigned short macoff, hdrlen;
unsigned int netoff;
struct sk_buff *copy_skb = NULL;
struct timespec ts;
__u32 ts_status;
@ -2225,6 +2226,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
}
macoff = netoff - maclen;
}
if (netoff > USHRT_MAX) {
spin_lock(&sk->sk_receive_queue.lock);
po->stats.stats1.tp_drops++;
spin_unlock(&sk->sk_receive_queue.lock);
goto drop_n_restore;
}
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
if (po->copy_thresh &&

View File

@ -39,6 +39,7 @@ struct virtio_vsock {
* must be accessed with tx_lock held.
*/
struct mutex tx_lock;
bool tx_run;
struct work_struct send_pkt_work;
spinlock_t send_pkt_list_lock;
@ -54,6 +55,7 @@ struct virtio_vsock {
* must be accessed with rx_lock held.
*/
struct mutex rx_lock;
bool rx_run;
int rx_buf_nr;
int rx_buf_max_nr;
@ -61,46 +63,28 @@ struct virtio_vsock {
* vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
*/
struct mutex event_lock;
bool event_run;
struct virtio_vsock_event event_list[8];
u32 guest_cid;
};
static struct virtio_vsock *virtio_vsock_get(void)
{
return the_virtio_vsock;
}
static u32 virtio_transport_get_local_cid(void)
{
struct virtio_vsock *vsock = virtio_vsock_get();
struct virtio_vsock *vsock;
u32 ret;
if (!vsock)
return VMADDR_CID_ANY;
return vsock->guest_cid;
}
static void virtio_transport_loopback_work(struct work_struct *work)
{
struct virtio_vsock *vsock =
container_of(work, struct virtio_vsock, loopback_work);
LIST_HEAD(pkts);
spin_lock_bh(&vsock->loopback_list_lock);
list_splice_init(&vsock->loopback_list, &pkts);
spin_unlock_bh(&vsock->loopback_list_lock);
mutex_lock(&vsock->rx_lock);
while (!list_empty(&pkts)) {
struct virtio_vsock_pkt *pkt;
pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
list_del_init(&pkt->list);
virtio_transport_recv_pkt(pkt);
rcu_read_lock();
vsock = rcu_dereference(the_virtio_vsock);
if (!vsock) {
ret = VMADDR_CID_ANY;
goto out_rcu;
}
mutex_unlock(&vsock->rx_lock);
ret = vsock->guest_cid;
out_rcu:
rcu_read_unlock();
return ret;
}
static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock,
@ -128,6 +112,9 @@ virtio_transport_send_pkt_work(struct work_struct *work)
mutex_lock(&vsock->tx_lock);
if (!vsock->tx_run)
goto out;
vq = vsock->vqs[VSOCK_VQ_TX];
for (;;) {
@ -186,6 +173,7 @@ virtio_transport_send_pkt_work(struct work_struct *work)
if (added)
virtqueue_kick(vq);
out:
mutex_unlock(&vsock->tx_lock);
if (restart_rx)
@ -198,14 +186,18 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
struct virtio_vsock *vsock;
int len = pkt->len;
vsock = virtio_vsock_get();
rcu_read_lock();
vsock = rcu_dereference(the_virtio_vsock);
if (!vsock) {
virtio_transport_free_pkt(pkt);
return -ENODEV;
len = -ENODEV;
goto out_rcu;
}
if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
return virtio_transport_send_pkt_loopback(vsock, pkt);
if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) {
len = virtio_transport_send_pkt_loopback(vsock, pkt);
goto out_rcu;
}
if (pkt->reply)
atomic_inc(&vsock->queued_replies);
@ -215,6 +207,9 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
spin_unlock_bh(&vsock->send_pkt_list_lock);
queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
out_rcu:
rcu_read_unlock();
return len;
}
@ -223,12 +218,14 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
{
struct virtio_vsock *vsock;
struct virtio_vsock_pkt *pkt, *n;
int cnt = 0;
int cnt = 0, ret;
LIST_HEAD(freeme);
vsock = virtio_vsock_get();
rcu_read_lock();
vsock = rcu_dereference(the_virtio_vsock);
if (!vsock) {
return -ENODEV;
ret = -ENODEV;
goto out_rcu;
}
spin_lock_bh(&vsock->send_pkt_list_lock);
@ -256,7 +253,11 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
queue_work(virtio_vsock_workqueue, &vsock->rx_work);
}
return 0;
ret = 0;
out_rcu:
rcu_read_unlock();
return ret;
}
static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
@ -308,6 +309,10 @@ static void virtio_transport_tx_work(struct work_struct *work)
vq = vsock->vqs[VSOCK_VQ_TX];
mutex_lock(&vsock->tx_lock);
if (!vsock->tx_run)
goto out;
do {
struct virtio_vsock_pkt *pkt;
unsigned int len;
@ -318,6 +323,8 @@ static void virtio_transport_tx_work(struct work_struct *work)
added = true;
}
} while (!virtqueue_enable_cb(vq));
out:
mutex_unlock(&vsock->tx_lock);
if (added)
@ -336,56 +343,6 @@ static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
return val < virtqueue_get_vring_size(vq);
}
static void virtio_transport_rx_work(struct work_struct *work)
{
struct virtio_vsock *vsock =
container_of(work, struct virtio_vsock, rx_work);
struct virtqueue *vq;
vq = vsock->vqs[VSOCK_VQ_RX];
mutex_lock(&vsock->rx_lock);
do {
virtqueue_disable_cb(vq);
for (;;) {
struct virtio_vsock_pkt *pkt;
unsigned int len;
if (!virtio_transport_more_replies(vsock)) {
/* Stop rx until the device processes already
* pending replies. Leave rx virtqueue
* callbacks disabled.
*/
goto out;
}
pkt = virtqueue_get_buf(vq, &len);
if (!pkt) {
break;
}
vsock->rx_buf_nr--;
/* Drop short/long packets */
if (unlikely(len < sizeof(pkt->hdr) ||
len > sizeof(pkt->hdr) + pkt->len)) {
virtio_transport_free_pkt(pkt);
continue;
}
pkt->len = len - sizeof(pkt->hdr);
virtio_transport_deliver_tap_pkt(pkt);
virtio_transport_recv_pkt(pkt);
}
} while (!virtqueue_enable_cb(vq));
out:
if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
virtio_vsock_rx_fill(vsock);
mutex_unlock(&vsock->rx_lock);
}
/* event_lock must be held */
static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
struct virtio_vsock_event *event)
@ -455,6 +412,9 @@ static void virtio_transport_event_work(struct work_struct *work)
mutex_lock(&vsock->event_lock);
if (!vsock->event_run)
goto out;
do {
struct virtio_vsock_event *event;
unsigned int len;
@ -469,7 +429,7 @@ static void virtio_transport_event_work(struct work_struct *work)
} while (!virtqueue_enable_cb(vq));
virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
out:
mutex_unlock(&vsock->event_lock);
}
@ -546,6 +506,86 @@ static struct virtio_transport virtio_transport = {
.send_pkt = virtio_transport_send_pkt,
};
static void virtio_transport_loopback_work(struct work_struct *work)
{
struct virtio_vsock *vsock =
container_of(work, struct virtio_vsock, loopback_work);
LIST_HEAD(pkts);
spin_lock_bh(&vsock->loopback_list_lock);
list_splice_init(&vsock->loopback_list, &pkts);
spin_unlock_bh(&vsock->loopback_list_lock);
mutex_lock(&vsock->rx_lock);
if (!vsock->rx_run)
goto out;
while (!list_empty(&pkts)) {
struct virtio_vsock_pkt *pkt;
pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
list_del_init(&pkt->list);
virtio_transport_recv_pkt(&virtio_transport, pkt);
}
out:
mutex_unlock(&vsock->rx_lock);
}
static void virtio_transport_rx_work(struct work_struct *work)
{
struct virtio_vsock *vsock =
container_of(work, struct virtio_vsock, rx_work);
struct virtqueue *vq;
vq = vsock->vqs[VSOCK_VQ_RX];
mutex_lock(&vsock->rx_lock);
if (!vsock->rx_run)
goto out;
do {
virtqueue_disable_cb(vq);
for (;;) {
struct virtio_vsock_pkt *pkt;
unsigned int len;
if (!virtio_transport_more_replies(vsock)) {
/* Stop rx until the device processes already
* pending replies. Leave rx virtqueue
* callbacks disabled.
*/
goto out;
}
pkt = virtqueue_get_buf(vq, &len);
if (!pkt) {
break;
}
vsock->rx_buf_nr--;
/* Drop short/long packets */
if (unlikely(len < sizeof(pkt->hdr) ||
len > sizeof(pkt->hdr) + pkt->len)) {
virtio_transport_free_pkt(pkt);
continue;
}
pkt->len = len - sizeof(pkt->hdr);
virtio_transport_deliver_tap_pkt(pkt);
virtio_transport_recv_pkt(&virtio_transport, pkt);
}
} while (!virtqueue_enable_cb(vq));
out:
if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
virtio_vsock_rx_fill(vsock);
mutex_unlock(&vsock->rx_lock);
}
static int virtio_vsock_probe(struct virtio_device *vdev)
{
vq_callback_t *callbacks[] = {
@ -566,7 +606,8 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
return ret;
/* Only one virtio-vsock device per guest is supported */
if (the_virtio_vsock) {
if (rcu_dereference_protected(the_virtio_vsock,
lockdep_is_held(&the_virtio_vsock_mutex))) {
ret = -EBUSY;
goto out;
}
@ -591,8 +632,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
vsock->rx_buf_max_nr = 0;
atomic_set(&vsock->queued_replies, 0);
vdev->priv = vsock;
the_virtio_vsock = vsock;
mutex_init(&vsock->tx_lock);
mutex_init(&vsock->rx_lock);
mutex_init(&vsock->event_lock);
@ -606,14 +645,23 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
INIT_WORK(&vsock->loopback_work, virtio_transport_loopback_work);
mutex_lock(&vsock->tx_lock);
vsock->tx_run = true;
mutex_unlock(&vsock->tx_lock);
mutex_lock(&vsock->rx_lock);
virtio_vsock_rx_fill(vsock);
vsock->rx_run = true;
mutex_unlock(&vsock->rx_lock);
mutex_lock(&vsock->event_lock);
virtio_vsock_event_fill(vsock);
vsock->event_run = true;
mutex_unlock(&vsock->event_lock);
vdev->priv = vsock;
rcu_assign_pointer(the_virtio_vsock, vsock);
mutex_unlock(&the_virtio_vsock_mutex);
return 0;
@ -628,6 +676,12 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
struct virtio_vsock *vsock = vdev->priv;
struct virtio_vsock_pkt *pkt;
mutex_lock(&the_virtio_vsock_mutex);
vdev->priv = NULL;
rcu_assign_pointer(the_virtio_vsock, NULL);
synchronize_rcu();
flush_work(&vsock->loopback_work);
flush_work(&vsock->rx_work);
flush_work(&vsock->tx_work);
@ -637,6 +691,24 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
/* Reset all connected sockets when the device disappear */
vsock_for_each_connected_socket(virtio_vsock_reset_sock);
/* Stop all work handlers to make sure no one is accessing the device,
* so we can safely call vdev->config->reset().
*/
mutex_lock(&vsock->rx_lock);
vsock->rx_run = false;
mutex_unlock(&vsock->rx_lock);
mutex_lock(&vsock->tx_lock);
vsock->tx_run = false;
mutex_unlock(&vsock->tx_lock);
mutex_lock(&vsock->event_lock);
vsock->event_run = false;
mutex_unlock(&vsock->event_lock);
/* Flush all device writes and interrupts, device will not use any
* more buffers.
*/
vdev->config->reset(vdev);
mutex_lock(&vsock->rx_lock);
@ -667,12 +739,11 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
}
spin_unlock_bh(&vsock->loopback_list_lock);
mutex_lock(&the_virtio_vsock_mutex);
the_virtio_vsock = NULL;
mutex_unlock(&the_virtio_vsock_mutex);
/* Delete virtqueues and flush outstanding callbacks if any */
vdev->config->del_vqs(vdev);
mutex_unlock(&the_virtio_vsock_mutex);
kfree(vsock);
}

View File

@ -669,9 +669,9 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
/* Normally packets are associated with a socket. There may be no socket if an
* attempt was made to connect to a socket that does not exist.
*/
static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
struct virtio_vsock_pkt *pkt)
{
const struct virtio_transport *t;
struct virtio_vsock_pkt *reply;
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RST,
@ -691,7 +691,6 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
if (!reply)
return -ENOMEM;
t = virtio_transport_get_ops();
if (!t) {
virtio_transport_free_pkt(reply);
return -ENOTCONN;
@ -993,7 +992,8 @@ static bool virtio_transport_space_update(struct sock *sk,
/* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
* lock.
*/
void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
void virtio_transport_recv_pkt(struct virtio_transport *t,
struct virtio_vsock_pkt *pkt)
{
struct sockaddr_vm src, dst;
struct vsock_sock *vsk;
@ -1015,7 +1015,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
le32_to_cpu(pkt->hdr.fwd_cnt));
if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
(void)virtio_transport_reset_no_sock(pkt);
(void)virtio_transport_reset_no_sock(t, pkt);
goto free_pkt;
}
@ -1026,7 +1026,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
if (!sk) {
sk = vsock_find_bound_socket(&dst);
if (!sk) {
(void)virtio_transport_reset_no_sock(pkt);
(void)virtio_transport_reset_no_sock(t, pkt);
goto free_pkt;
}
}
@ -1060,6 +1060,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
virtio_transport_free_pkt(pkt);
break;
default:
(void)virtio_transport_reset_no_sock(t, pkt);
virtio_transport_free_pkt(pkt);
break;
}