This is the 4.19.232 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmIfSSAACgkQONu9yGCS
 aT5czA//bnvOgPI7cMkzMAx+9aHIk4WaleBwUbEO475IY2NbbJCDsLOYQ5s+FgtB
 AyXSsFyUjy6Ka+mvnqRXsLSshbkfCE8tGP0cUFs/0N6Nr84s9t+1frUWprG6p2A2
 mjEboEwLRuaxfVormh6G2aqv1E4Ira8D3DSbz6MOnCoAqbRCN+k1CgwJOSImSKb8
 TIvQHo3Eqc+jiRE4Och6aKkOCJYAPtIqYtA1sy9LuZ36sVdBpFlWdCpX0drQ3SYV
 frXP6zXO++cNmAOtgSDVfmht05P5Qc3ZHDm/O0Bwl7BMW7RqU81DgmkmZnK5iKk/
 R0iWf+e6CpfiZCrVErbk/hwmE89Ig/60YOwLg5UqfYKOMFG/88b9awlPvygvRDac
 /NpSvbP924GwKUccmF0+p0RFbxMpg6ZG4JXP5FZAzCkfqX//SZScxSTQeeQ5FBCH
 LAY6TmMi9MicOEaGdlrR6sIpvhMRJ5kkw0C9Q31JybUkczB6zT9ci+GRiohi58cG
 Gom+8i2nCcKV38YwmjO/Z9eL24cYSM2NbzEy7Yf3Sz0Nb5UEUAJSEZ0vN196dm+R
 WTDZXLLsdBHAsubS2R0t1NB7KvQYZIT/xP/49NDoXOmOSYI9aJ4gAhpuLX9HsHAD
 wFR41aGuefnSm7dunOTmDMFIWurUb74TSxk5Iv6Ni2ZawmV4NNM=
 =EQLE
 -----END PGP SIGNATURE-----

Merge 4.19.232 into android-4.19-stable

Changes in 4.19.232
	cgroup/cpuset: Fix a race between cpuset_attach() and cpu hotplug
	vhost/vsock: don't check owner in vhost_vsock_stop() while releasing
	parisc/unaligned: Fix fldd and fstd unaligned handlers on 32-bit kernel
	parisc/unaligned: Fix ldw() and stw() unalignment handlers
	sr9700: sanity check for packet length
	USB: zaurus: support another broken Zaurus
	ping: remove pr_err from ping_lookup
	net: __pskb_pull_tail() & pskb_carve_frag_list() drop_monitor friends
	tipc: Fix end of loop tests for list_for_each_entry()
	gso: do not skip outer ip header in case of ipip and net_failover
	openvswitch: Fix setting ipv6 fields causing hw csum failure
	drm/edid: Always set RGB444
	net/mlx5e: Fix wrong return value on ioctl EEPROM query failure
	configfs: fix a race in configfs_{,un}register_subsystem()
	RDMA/ib_srp: Fix a deadlock
	tty: n_gsm: fix proper link termination after failed open
	gpio: tegra186: Fix chip_data type confusion
	Revert "drm/nouveau/pmu/gm200-: avoid touching PMU outside of DEVINIT/PREOS/ACR"
	memblock: use kfree() to release kmalloced memblock regions
	fget: clarify and improve __fget_files() implementation
	tracing: Have traceon and traceoff trigger honor the instance
	iio: adc: men_z188_adc: Fix a resource leak in an error handling path
	ata: pata_hpt37x: disable primary channel on HPT371
	Revert "USB: serial: ch341: add new Product ID for CH341A"
	usb: gadget: rndis: add spinlock for rndis response list
	USB: gadget: validate endpoint index for xilinx udc
	tracefs: Set the group ownership in apply_options() not parse_options()
	USB: serial: option: add support for DW5829e
	USB: serial: option: add Telit LE910R1 compositions
	usb: dwc3: pci: Fix Bay Trail phy GPIO mappings
	usb: dwc3: gadget: Let the interrupt handler disable bottom halves.
	xhci: re-initialize the HC during resume if HCE was set
	xhci: Prevent futile URB re-submissions due to incorrect return value.
	tty: n_gsm: fix encoding of control signal octet bit DV
	Linux 4.19.232

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I7604e43e0d2cc019835f0508e0a0e5116d72bc85
This commit is contained in:
Greg Kroah-Hartman 2022-03-02 15:29:10 +01:00
commit b33f661bba
36 changed files with 336 additions and 100 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 231
SUBLEVEL = 232
EXTRAVERSION =
NAME = "People's Front"

View File

@ -354,7 +354,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
return 0;
return ret;
}
static int emulate_std(struct pt_regs *regs, int frreg, int flop)
{
@ -411,7 +411,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
__asm__ __volatile__ (
" mtsp %4, %%sr1\n"
" zdep %2, 29, 2, %%r19\n"
" dep %%r0, 31, 2, %2\n"
" dep %%r0, 31, 2, %3\n"
" mtsar %%r19\n"
" zvdepi -2, 32, %%r19\n"
"1: ldw 0(%%sr1,%3),%%r20\n"
@ -423,7 +423,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
" andcm %%r21, %%r19, %%r21\n"
" or %1, %%r20, %1\n"
" or %2, %%r21, %2\n"
"3: stw %1,0(%%sr1,%1)\n"
"3: stw %1,0(%%sr1,%3)\n"
"4: stw %%r1,4(%%sr1,%3)\n"
"5: stw %2,8(%%sr1,%3)\n"
" copy %%r0, %0\n"
@ -610,7 +610,6 @@ void handle_unaligned(struct pt_regs *regs)
ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */
break;
}
#ifdef CONFIG_PA20
switch (regs->iir & OPCODE2_MASK)
{
case OPCODE_FLDD_L:
@ -621,22 +620,23 @@ void handle_unaligned(struct pt_regs *regs)
flop=1;
ret = emulate_std(regs, R2(regs->iir),1);
break;
#ifdef CONFIG_PA20
case OPCODE_LDD_L:
ret = emulate_ldd(regs, R2(regs->iir),0);
break;
case OPCODE_STD_L:
ret = emulate_std(regs, R2(regs->iir),0);
break;
}
#endif
}
switch (regs->iir & OPCODE3_MASK)
{
case OPCODE_FLDW_L:
flop=1;
ret = emulate_ldw(regs, R2(regs->iir),0);
ret = emulate_ldw(regs, R2(regs->iir), 1);
break;
case OPCODE_LDW_M:
ret = emulate_ldw(regs, R2(regs->iir),1);
ret = emulate_ldw(regs, R2(regs->iir), 0);
break;
case OPCODE_FSTW_L:

View File

@ -916,6 +916,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
irqmask &= ~0x10;
pci_write_config_byte(dev, 0x5a, irqmask);
/*
* HPT371 chips physically have only one channel, the secondary one,
* but the primary channel registers do exist! Go figure...
* So, we manually disable the non-existing channel here
* (if the BIOS hasn't done this already).
*/
if (dev->device == PCI_DEVICE_ID_TTI_HPT371) {
u8 mcr1;
pci_read_config_byte(dev, 0x50, &mcr1);
mcr1 &= ~0x04;
pci_write_config_byte(dev, 0x50, mcr1);
}
/*
* default to pci clock. make sure MA15/16 are set to output
* to prevent drives having problems with 40-pin cables. Needed

View File

@ -237,9 +237,12 @@ static int tegra186_gpio_of_xlate(struct gpio_chip *chip,
return offset + pin;
}
#define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio)
static void tegra186_irq_ack(struct irq_data *data)
{
struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
base = tegra186_gpio_get_base(gpio, data->hwirq);
@ -251,7 +254,8 @@ static void tegra186_irq_ack(struct irq_data *data)
static void tegra186_irq_mask(struct irq_data *data)
{
struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
u32 value;
@ -266,7 +270,8 @@ static void tegra186_irq_mask(struct irq_data *data)
static void tegra186_irq_unmask(struct irq_data *data)
{
struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
u32 value;
@ -281,7 +286,8 @@ static void tegra186_irq_unmask(struct irq_data *data)
static int tegra186_irq_set_type(struct irq_data *data, unsigned int flow)
{
struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
u32 value;

View File

@ -4899,6 +4899,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
return quirks;
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
drm_parse_cea_ext(connector, edid);
/*
@ -4952,7 +4953,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
connector->name, info->bpc);
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)

View File

@ -70,13 +70,20 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
return 0;
}
static void
static int
nvkm_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
if (!pmu->func->enabled(pmu))
return;
return 0;
/* Inhibit interrupts, and wait for idle. */
nvkm_wr32(device, 0x10a014, 0x0000ffff);
nvkm_msec(device, 2000,
if (!nvkm_rd32(device, 0x10a04c))
break;
);
/* Reset. */
if (pmu->func->reset)
@ -87,37 +94,25 @@ nvkm_pmu_reset(struct nvkm_pmu *pmu)
if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
break;
);
return 0;
}
static int
nvkm_pmu_preinit(struct nvkm_subdev *subdev)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
nvkm_pmu_reset(pmu);
return 0;
return nvkm_pmu_reset(pmu);
}
static int
nvkm_pmu_init(struct nvkm_subdev *subdev)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
struct nvkm_device *device = pmu->subdev.device;
if (!pmu->func->init)
return 0;
if (pmu->func->enabled(pmu)) {
/* Inhibit interrupts, and wait for idle. */
nvkm_wr32(device, 0x10a014, 0x0000ffff);
nvkm_msec(device, 2000,
if (!nvkm_rd32(device, 0x10a04c))
break;
);
nvkm_pmu_reset(pmu);
}
return pmu->func->init(pmu);
int ret = nvkm_pmu_reset(pmu);
if (ret == 0 && pmu->func->init)
ret = pmu->func->init(pmu);
return ret;
}
static int

View File

@ -106,6 +106,7 @@ static int men_z188_probe(struct mcb_device *dev,
struct z188_adc *adc;
struct iio_dev *indio_dev;
struct resource *mem;
int ret;
indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc));
if (!indio_dev)
@ -132,8 +133,14 @@ static int men_z188_probe(struct mcb_device *dev,
adc->mem = mem;
mcb_set_drvdata(dev, indio_dev);
return iio_device_register(indio_dev);
ret = iio_device_register(indio_dev);
if (ret)
goto err_unmap;
return 0;
err_unmap:
iounmap(adc->base);
err:
mcb_release_mem(mem);
return -ENXIO;

View File

@ -4154,9 +4154,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
spin_unlock(&host->target_lock);
/*
* Wait for tl_err and target port removal tasks.
* srp_queue_remove_work() queues a call to
* srp_remove_target(). The latter function cancels
* target->tl_err_work so waiting for the remove works to
* finish is sufficient.
*/
flush_workqueue(system_long_wq);
flush_workqueue(srp_remove_wq);
kfree(host);

View File

@ -1357,7 +1357,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
if (size_read < 0) {
netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
__func__, size_read);
return 0;
return size_read;
}
i += size_read;

View File

@ -584,6 +584,11 @@ static const struct usb_device_id products[] = {
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
#define ZAURUS_FAKE_INTERFACE \
.bInterfaceClass = USB_CLASS_COMM, \
.bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
/* SA-1100 based Sharp Zaurus ("collie"), or compatible;
* wire-incompatible with true CDC Ethernet implementations.
* (And, it seems, needlessly so...)
@ -637,6 +642,13 @@ static const struct usb_device_id products[] = {
.idProduct = 0x9032, /* SL-6000 */
ZAURUS_MASTER_INTERFACE,
.driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x9032, /* SL-6000 */
ZAURUS_FAKE_INTERFACE,
.driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,

View File

@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* ignore the CRC length */
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
if (len > ETH_FRAME_LEN)
if (len > ETH_FRAME_LEN || len > skb->len)
return 0;
/* the last packet of current skb */

View File

@ -268,6 +268,11 @@ static const struct usb_device_id products [] = {
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
#define ZAURUS_FAKE_INTERFACE \
.bInterfaceClass = USB_CLASS_COMM, \
.bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
/* SA-1100 based Sharp Zaurus ("collie"), or compatible. */
{
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
@ -325,6 +330,13 @@ static const struct usb_device_id products [] = {
.idProduct = 0x9032, /* SL-6000 */
ZAURUS_MASTER_INTERFACE,
.driver_info = ZAURUS_PXA_INFO,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x9032, /* SL-6000 */
ZAURUS_FAKE_INTERFACE,
.driver_info = (unsigned long)&bogus_mdlm_info,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,

View File

@ -428,7 +428,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
modembits |= MDM_RTR;
if (dlci->modem_tx & TIOCM_RI)
modembits |= MDM_IC;
if (dlci->modem_tx & TIOCM_CD)
if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator)
modembits |= MDM_DV;
return modembits;
}
@ -1490,7 +1490,7 @@ static void gsm_dlci_t1(struct timer_list *t)
dlci->mode = DLCI_MODE_ADM;
gsm_dlci_open(dlci);
} else {
gsm_dlci_close(dlci);
gsm_dlci_begin_close(dlci); /* prevent half open link */
}
break;

View File

@ -76,8 +76,8 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = {
static struct gpiod_lookup_table platform_bytcr_gpios = {
.dev_id = "0000:00:16.0",
.table = {
GPIO_LOOKUP("INT33FC:00", 54, "reset", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("INT33FC:02", 14, "cs", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH),
{}
},
};

View File

@ -3181,9 +3181,11 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
unsigned long flags;
irqreturn_t ret = IRQ_NONE;
local_bh_disable();
spin_lock_irqsave(&dwc->lock, flags);
ret = dwc3_process_event_buf(evt);
spin_unlock_irqrestore(&dwc->lock, flags);
local_bh_enable();
return ret;
}

View File

@ -922,6 +922,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
params->resp_avail = resp_avail;
params->v = v;
INIT_LIST_HEAD(&params->resp_queue);
spin_lock_init(&params->resp_lock);
pr_debug("%s: configNr = %d\n", __func__, i);
return params;
@ -1015,12 +1016,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf)
{
rndis_resp_t *r, *n;
spin_lock(&params->resp_lock);
list_for_each_entry_safe(r, n, &params->resp_queue, list) {
if (r->buf == buf) {
list_del(&r->list);
kfree(r);
}
}
spin_unlock(&params->resp_lock);
}
EXPORT_SYMBOL_GPL(rndis_free_response);
@ -1030,14 +1033,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length)
if (!length) return NULL;
spin_lock(&params->resp_lock);
list_for_each_entry_safe(r, n, &params->resp_queue, list) {
if (!r->send) {
r->send = 1;
*length = r->length;
spin_unlock(&params->resp_lock);
return r->buf;
}
}
spin_unlock(&params->resp_lock);
return NULL;
}
EXPORT_SYMBOL_GPL(rndis_get_next_response);
@ -1054,7 +1060,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length)
r->length = length;
r->send = 0;
spin_lock(&params->resp_lock);
list_add_tail(&r->list, &params->resp_queue);
spin_unlock(&params->resp_lock);
return r;
}

View File

@ -174,6 +174,7 @@ typedef struct rndis_params {
void (*resp_avail)(void *v);
void *v;
struct list_head resp_queue;
spinlock_t resp_lock;
} rndis_params;
/* RNDIS Message parser and other useless functions */

View File

@ -1613,6 +1613,8 @@ static void xudc_getstatus(struct xusb_udc *udc)
break;
case USB_RECIP_ENDPOINT:
epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
if (epnum >= XUSB_MAX_ENDPOINTS)
goto stall;
target_ep = &udc->ep[epnum];
epcfgreg = udc->read_fn(udc->addr + target_ep->offset);
halt = epcfgreg & XUSB_EP_CFG_STALL_MASK;
@ -1680,6 +1682,10 @@ static void xudc_set_clear_feature(struct xusb_udc *udc)
case USB_RECIP_ENDPOINT:
if (!udc->setup.wValue) {
endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
if (endpoint >= XUSB_MAX_ENDPOINTS) {
xudc_ep0_stall(udc);
return;
}
target_ep = &udc->ep[endpoint];
outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK;
outinbit = outinbit >> 7;

View File

@ -1083,6 +1083,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
int retval = 0;
bool comp_timer_running = false;
bool pending_portevent = false;
bool reinit_xhc = false;
if (!hcd->state)
return 0;
@ -1099,10 +1100,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
spin_lock_irq(&xhci->lock);
if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
hibernated = true;
if (!hibernated) {
if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
reinit_xhc = true;
if (!reinit_xhc) {
/*
* Some controllers might lose power during suspend, so wait
* for controller not ready bit to clear, just as in xHC init.
@ -1135,12 +1137,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
temp = readl(&xhci->op_regs->status);
}
/* If restore operation fails, re-initialize the HC during resume */
if ((temp & STS_SRE) || hibernated) {
temp = readl(&xhci->op_regs->status);
/* re-initialize the HC on Restore Error, or Host Controller Error */
if (temp & (STS_SRE | STS_HCE)) {
reinit_xhc = true;
xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
}
if (reinit_xhc) {
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
!(xhci_all_ports_seen_u0(xhci))) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
@ -1457,9 +1464,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
struct urb_priv *urb_priv;
int num_tds;
if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
true, true, __func__) <= 0)
if (!urb)
return -EINVAL;
ret = xhci_check_args(hcd, urb->dev, urb->ep,
true, true, __func__);
if (ret <= 0)
return ret ? ret : -EINVAL;
slot_id = urb->dev->slot_id;
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
@ -3203,7 +3213,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
return -EINVAL;
ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
if (ret <= 0)
return -EINVAL;
return ret ? ret : -EINVAL;
if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
" descriptor for ep 0x%x does not support streams\n",

View File

@ -80,7 +80,6 @@
#define CH341_LCR_CS5 0x00
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1a86, 0x5512) },
{ USB_DEVICE(0x1a86, 0x5523) },
{ USB_DEVICE(0x1a86, 0x7522) },
{ USB_DEVICE(0x1a86, 0x7523) },

View File

@ -198,6 +198,8 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5821E 0x81d7
#define DELL_PRODUCT_5821E_ESIM 0x81e0
#define DELL_PRODUCT_5829E_ESIM 0x81e4
#define DELL_PRODUCT_5829E 0x81e6
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
@ -1063,6 +1065,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E),
.driver_info = RSVD(0) | RSVD(6) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
.driver_info = RSVD(0) | RSVD(6) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@ -1273,10 +1279,16 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */
.driver_info = NCTRL(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */
.driver_info = NCTRL(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */
.driver_info = NCTRL(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
.driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
.driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */
.driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },

View File

@ -569,16 +569,18 @@ err:
return ret;
}
static int vhost_vsock_stop(struct vhost_vsock *vsock)
static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
{
size_t i;
int ret;
int ret = 0;
mutex_lock(&vsock->dev.mutex);
ret = vhost_dev_check_owner(&vsock->dev);
if (ret)
goto err;
if (check_owner) {
ret = vhost_dev_check_owner(&vsock->dev);
if (ret)
goto err;
}
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
struct vhost_virtqueue *vq = &vsock->vqs[i];
@ -693,7 +695,12 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
* inefficient. Room for improvement here. */
vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
vhost_vsock_stop(vsock);
/* Don't check the owner, because we are in the release path, so we
* need to stop the vsock device in any case.
* vhost_vsock_stop() can not fail in this case, so we don't need to
* check the return code.
*/
vhost_vsock_stop(vsock, false);
vhost_vsock_flush(vsock);
vhost_dev_stop(&vsock->dev);
@ -791,7 +798,7 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
if (start)
return vhost_vsock_start(vsock);
else
return vhost_vsock_stop(vsock);
return vhost_vsock_stop(vsock, true);
case VHOST_GET_FEATURES:
features = VHOST_VSOCK_FEATURES;
if (copy_to_user(argp, &features, sizeof(features)))

View File

@ -50,6 +50,14 @@ DECLARE_RWSEM(configfs_rename_sem);
*/
DEFINE_SPINLOCK(configfs_dirent_lock);
/*
* All of link_obj/unlink_obj/link_group/unlink_group require that
* subsys->su_mutex is held.
* But parent configfs_subsystem is NULL when config_item is root.
* Use this mutex when config_item is root.
*/
static DEFINE_MUTEX(configfs_subsystem_mutex);
static void configfs_d_iput(struct dentry * dentry,
struct inode * inode)
{
@ -1937,7 +1945,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
group->cg_item.ci_name = group->cg_item.ci_namebuf;
sd = root->d_fsdata;
mutex_lock(&configfs_subsystem_mutex);
link_group(to_config_group(sd->s_element), group);
mutex_unlock(&configfs_subsystem_mutex);
inode_lock_nested(d_inode(root), I_MUTEX_PARENT);
@ -1962,7 +1972,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
inode_unlock(d_inode(root));
if (err) {
mutex_lock(&configfs_subsystem_mutex);
unlink_group(group);
mutex_unlock(&configfs_subsystem_mutex);
configfs_release_fs();
}
put_fragment(frag);
@ -2008,7 +2020,9 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
dput(dentry);
mutex_lock(&configfs_subsystem_mutex);
unlink_group(group);
mutex_unlock(&configfs_subsystem_mutex);
configfs_release_fs();
}

View File

@ -677,28 +677,69 @@ void do_close_on_exec(struct files_struct *files)
spin_unlock(&files->file_lock);
}
static inline struct file *__fget_files_rcu(struct files_struct *files,
unsigned int fd, fmode_t mask, unsigned int refs)
{
for (;;) {
struct file *file;
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
struct file __rcu **fdentry;
if (unlikely(fd >= fdt->max_fds))
return NULL;
fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
file = rcu_dereference_raw(*fdentry);
if (unlikely(!file))
return NULL;
if (unlikely(file->f_mode & mask))
return NULL;
/*
* Ok, we have a file pointer. However, because we do
* this all locklessly under RCU, we may be racing with
* that file being closed.
*
* Such a race can take two forms:
*
* (a) the file ref already went down to zero,
* and get_file_rcu_many() fails. Just try
* again:
*/
if (unlikely(!get_file_rcu_many(file, refs)))
continue;
/*
* (b) the file table entry has changed under us.
* Note that we don't need to re-check the 'fdt->fd'
* pointer having changed, because it always goes
* hand-in-hand with 'fdt'.
*
* If so, we need to put our refs and try again.
*/
if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
unlikely(rcu_dereference_raw(*fdentry) != file)) {
fput_many(file, refs);
continue;
}
/*
* Ok, we have a ref to the file, and checked that it
* still exists.
*/
return file;
}
}
static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
{
struct files_struct *files = current->files;
struct file *file;
rcu_read_lock();
loop:
file = fcheck_files(files, fd);
if (file) {
/* File object ref couldn't be taken.
* dup2() atomicity guarantee is the reason
* we loop to catch the new file (or NULL pointer)
*/
if (file->f_mode & mask)
file = NULL;
else if (!get_file_rcu_many(file, refs))
goto loop;
else if (__fcheck_files(files, fd) != file) {
fput_many(file, refs);
goto loop;
}
}
file = __fget_files_rcu(files, fd, mask, refs);
rcu_read_unlock();
return file;

View File

@ -265,7 +265,6 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
if (!gid_valid(gid))
return -EINVAL;
opts->gid = gid;
set_gid(tracefs_mount->mnt_root, gid);
break;
case Opt_mode:
if (match_octal(&args[0], &option))
@ -292,7 +291,9 @@ static int tracefs_apply_options(struct super_block *sb)
inode->i_mode |= opts->mode;
inode->i_uid = opts->uid;
inode->i_gid = opts->gid;
/* Set all the group ids to the mount option */
set_gid(sb->s_root, opts->gid);
return 0;
}

View File

@ -143,6 +143,11 @@ static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
*sum = ~csum16_add(csum16_sub(~(*sum), old), new);
}
static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
{
*csum = csum_add(csum_sub(*csum, old), new);
}
struct sk_buff;
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
__be32 from, __be32 to, bool pseudohdr);

View File

@ -1537,6 +1537,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
cpus_read_lock();
mutex_lock(&cpuset_mutex);
/* prepare for attach */
@ -1592,6 +1593,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
wake_up(&cpuset_attach_wq);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
}
/* The various types of files and directories in a cpuset file system */

View File

@ -933,6 +933,16 @@ static void
traceon_trigger(struct event_trigger_data *data, void *rec,
struct ring_buffer_event *event)
{
struct trace_event_file *file = data->private_data;
if (file) {
if (tracer_tracing_is_on(file->tr))
return;
tracer_tracing_on(file->tr);
return;
}
if (tracing_is_on())
return;
@ -943,8 +953,15 @@ static void
traceon_count_trigger(struct event_trigger_data *data, void *rec,
struct ring_buffer_event *event)
{
if (tracing_is_on())
return;
struct trace_event_file *file = data->private_data;
if (file) {
if (tracer_tracing_is_on(file->tr))
return;
} else {
if (tracing_is_on())
return;
}
if (!data->count)
return;
@ -952,13 +969,26 @@ traceon_count_trigger(struct event_trigger_data *data, void *rec,
if (data->count != -1)
(data->count)--;
tracing_on();
if (file)
tracer_tracing_on(file->tr);
else
tracing_on();
}
static void
traceoff_trigger(struct event_trigger_data *data, void *rec,
struct ring_buffer_event *event)
{
struct trace_event_file *file = data->private_data;
if (file) {
if (!tracer_tracing_is_on(file->tr))
return;
tracer_tracing_off(file->tr);
return;
}
if (!tracing_is_on())
return;
@ -969,8 +999,15 @@ static void
traceoff_count_trigger(struct event_trigger_data *data, void *rec,
struct ring_buffer_event *event)
{
if (!tracing_is_on())
return;
struct trace_event_file *file = data->private_data;
if (file) {
if (!tracer_tracing_is_on(file->tr))
return;
} else {
if (!tracing_is_on())
return;
}
if (!data->count)
return;
@ -978,7 +1015,10 @@ traceoff_count_trigger(struct event_trigger_data *data, void *rec,
if (data->count != -1)
(data->count)--;
tracing_off();
if (file)
tracer_tracing_off(file->tr);
else
tracing_off();
}
static int

View File

@ -323,14 +323,20 @@ void __init memblock_discard(void)
addr = __pa(memblock.reserved.regions);
size = PAGE_ALIGN(sizeof(struct memblock_region) *
memblock.reserved.max);
__memblock_free_late(addr, size);
if (memblock_reserved_in_slab)
kfree(memblock.reserved.regions);
else
__memblock_free_late(addr, size);
}
if (memblock.memory.regions != memblock_memory_init_regions) {
addr = __pa(memblock.memory.regions);
size = PAGE_ALIGN(sizeof(struct memblock_region) *
memblock.memory.max);
__memblock_free_late(addr, size);
if (memblock_memory_in_slab)
kfree(memblock.memory.regions);
else
__memblock_free_late(addr, size);
}
}
#endif

View File

@ -1977,7 +1977,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
/* Free pulled out fragments. */
while ((list = skb_shinfo(skb)->frag_list) != insp) {
skb_shinfo(skb)->frag_list = list->next;
kfree_skb(list);
consume_skb(list);
}
/* And insert new clone at head. */
if (clone) {
@ -5482,7 +5482,7 @@ static int pskb_carve_frag_list(struct sk_buff *skb,
/* Free pulled out fragments. */
while ((list = shinfo->frag_list) != insp) {
shinfo->frag_list = list->next;
kfree_skb(list);
consume_skb(list);
}
/* And insert new clone at head. */
if (clone) {

View File

@ -1338,8 +1338,11 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
}
ops = rcu_dereference(inet_offloads[proto]);
if (likely(ops && ops->callbacks.gso_segment))
if (likely(ops && ops->callbacks.gso_segment)) {
segs = ops->callbacks.gso_segment(skb, features);
if (!segs)
skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
}
if (IS_ERR_OR_NULL(segs))
goto out;

View File

@ -192,7 +192,6 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
(int)ident, &ipv6_hdr(skb)->daddr, dif);
#endif
} else {
pr_err("ping: protocol(%x) is not supported\n", ntohs(skb->protocol));
return NULL;
}

View File

@ -98,6 +98,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
if (likely(ops && ops->callbacks.gso_segment)) {
skb_reset_transport_header(skb);
segs = ops->callbacks.gso_segment(skb, features);
if (!segs)
skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
}
if (IS_ERR_OR_NULL(segs))

View File

@ -493,12 +493,43 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
memcpy(addr, new_addr, sizeof(__be32[4]));
}
static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
{
u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
if (skb->ip_summed == CHECKSUM_COMPLETE)
csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
(__force __wsum)(ipv6_tclass << 12));
ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
}
static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
{
u32 ofl;
ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
fl = OVS_MASKED(ofl, fl, mask);
/* Bits 21-24 are always unmasked, so this retains their values. */
OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
nh->flow_lbl[0] = (u8)(fl >> 16);
nh->flow_lbl[1] = (u8)(fl >> 8);
nh->flow_lbl[2] = (u8)fl;
if (skb->ip_summed == CHECKSUM_COMPLETE)
csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
}
static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
{
new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
if (skb->ip_summed == CHECKSUM_COMPLETE)
csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
(__force __wsum)(new_ttl << 8));
nh->hop_limit = new_ttl;
}
static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
@ -616,18 +647,17 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
}
}
if (mask->ipv6_tclass) {
ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
flow_key->ip.tos = ipv6_get_dsfield(nh);
}
if (mask->ipv6_label) {
set_ipv6_fl(nh, ntohl(key->ipv6_label),
set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
ntohl(mask->ipv6_label));
flow_key->ipv6.label =
*(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
}
if (mask->ipv6_hlimit) {
OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
mask->ipv6_hlimit);
set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
flow_key->ip.ttl = nh->hop_limit;
}
return 0;

View File

@ -812,7 +812,7 @@ static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
list_for_each_entry(p, &sr->all_publ, all_publ)
if (p->key == *last_key)
break;
if (p->key != *last_key)
if (list_entry_is_head(p, &sr->all_publ, all_publ))
return -EPIPE;
} else {
p = list_first_entry(&sr->all_publ,

View File

@ -3487,7 +3487,7 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
if (p->key == *last_publ)
break;
}
if (p->key != *last_publ) {
if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
/* We never set seq or call nl_dump_check_consistent()
* this means that setting prev_seq here will cause the
* consistence check to fail in the netlink callback