Merge android-4.19.20 (c28f73f) into msm-4.19

* refs/heads/tmp-c28f73f:
  Linux 4.19.20
  cifs: Always resolve hostname before reconnecting
  md/raid5: fix 'out of memory' during raid cache recovery
  of: overlay: do not duplicate properties from overlay for new nodes
  of: overlay: use prop add changeset entry for property in new nodes
  of: overlay: add missing of_node_get() in __of_attach_node_sysfs
  of: overlay: add tests to validate kfrees from overlay removal
  of: Convert to using %pOFn instead of device_node.name
  mm: migrate: don't rely on __PageMovable() of newpage after unlocking it
  mm: hwpoison: use do_send_sig_info() instead of force_sig()
  mm, oom: fix use-after-free in oom_kill_process
  mm,memory_hotplug: fix scan_movable_pages() for gigantic hugepages
  oom, oom_reaper: do not enqueue same task twice
  mm/hugetlb.c: teach follow_hugetlb_page() to handle FOLL_NOWAIT
  kernel/exit.c: release ptraced tasks before zap_pid_ns_processes
  btrfs: On error always free subvol_name in btrfs_mount
  Btrfs: fix deadlock when allocating tree block during leaf/node split
  mmc: sdhci-iproc: handle mmc_of_parse() errors during probe
  platform/x86: asus-nb-wmi: Drop mapping of 0x33 and 0x34 scan codes
  platform/x86: asus-nb-wmi: Map 0x35 to KEY_SCREENLOCK
  IB/hfi1: Remove overly conservative VM_EXEC flag check
  ALSA: hda/realtek - Fixed hp_pin no value
  ALSA: usb-audio: Add Opus #3 to quirks for native DSD support
  mmc: mediatek: fix incorrect register setting of hs400_cmd_int_delay
  mmc: bcm2835: Fix DMA channel leak on probe error
  gfs2: Revert "Fix loop in gfs2_rbm_find"
  gpio: sprd: Fix incorrect irq type setting for the async EIC
  gpio: sprd: Fix the incorrect data register
  gpio: pcf857x: Fix interrupts on multiple instances
  gpiolib: fix line event timestamps for nested irqs
  gpio: altera-a10sr: Set proper output level for direction_output
  arm64: hibernate: Clean the __hyp_text to PoC after resume
  arm64: hyp-stub: Forbid kprobing of the hyp-stub
  arm64: Do not issue IPIs for user executable ptes
  arm64: kaslr: ensure randomized quantities are clean also when kaslr is off
  ARM: cns3xxx: Fix writing to wrong PCI config registers after alignment
  NFS: Fix up return value on fatal errors in nfs_page_async_flush()
  selftests/seccomp: Enhance per-arch ptrace syscall skip tests
  iommu/vt-d: Fix memory leak in intel_iommu_put_resv_regions()
  fs/dcache: Fix incorrect nr_dentry_unused accounting in shrink_dcache_sb()
  CIFS: Do not consider -ENODATA as stat failure for reads
  CIFS: Fix trace command logging for SMB2 reads and writes
  CIFS: Do not count -ENODATA as failure for query directory
  virtio_net: Differentiate sk_buff and xdp_frame on freeing
  virtio_net: Use xdp_return_frame to free xdp_frames on destroying vqs
  virtio_net: Don't process redirected XDP frames when XDP is disabled
  virtio_net: Fix out of bounds access of sq
  virtio_net: Fix not restoring real_num_rx_queues
  virtio_net: Don't call free_old_xmit_skbs for xdp_frames
  virtio_net: Don't enable NAPI when interface is down
  sctp: set flow sport from saddr only when it's 0
  sctp: set chunk transport correctly when it's a new asoc
  Revert "net/mlx5e: E-Switch, Initialize eswitch only if eswitch manager"
  ip6mr: Fix notifiers call on mroute_clean_tables()
  net/mlx5e: Allow MAC invalidation while spoofchk is ON
  sctp: improve the events for sctp stream adding
  net: ip6_gre: always reports o_key to userspace
  vhost: fix OOB in get_rx_bufs()
  ucc_geth: Reset BQL queue when stopping device
  tun: move the call to tun_set_real_num_queues
  sctp: improve the events for sctp stream reset
  ravb: expand rx descriptor data to accommodate hw checksum
  net: set default network namespace in init_dummy_netdev()
  net/rose: fix NULL ax25_cb kernel panic
  netrom: switch to sock timer API
  net/mlx4_core: Add masking for a few queries on HCA caps
  net: ip_gre: use erspan key field for tunnel lookup
  net: ip_gre: always reports o_key to userspace
  l2tp: fix reading optional fields of L2TPv3
  l2tp: copy 4 more bytes to linear part if necessary
  ipvlan, l3mdev: fix broken l3s mode wrt local routes
  ipv6: sr: clear IP6CB(skb) on SRH ip4ip6 encapsulation
  ipv6: Consider sk_bound_dev_if when binding a socket to an address
  drm/msm/gpu: fix building without debugfs
  Fix "net: ipv4: do not handle duplicate fragments as overlapping"
  UPSTREAM: net: dev_is_mac_header_xmit() true for ARPHRD_RAWIP
  UPSTREAM: binder: filter out nodes when showing binder procs
  UPSTREAM: xfrm: Make set-mark default behavior backward compatible
  ANDROID: cuttlefish_defconfig: Enable CONFIG_RTC_HCTOSYS

Conflicts:
	mm/oom_kill.c

Change-Id: I95452a9f286b95924096afd2dfe439f9d638d404
Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
Ivaylo Georgiev 2019-02-27 02:23:30 -08:00
commit c26cab8987
82 changed files with 865 additions and 425 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 19
SUBLEVEL = 20
EXTRAVERSION =
NAME = "People's Front"

View File

@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
} else /* remote PCI bus */
base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
return base + (where & 0xffc) + (devfn << 12);
return base + where + (devfn << 12);
}
static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,

View File

@ -369,7 +369,6 @@ CONFIG_MMC=y
# CONFIG_PWRSEQ_SIMPLE is not set
# CONFIG_MMC_BLOCK is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
# CONFIG_RTC_SYSTOHC is not set
CONFIG_RTC_DRV_PL031=y
CONFIG_VIRTIO_PCI=y

View File

@ -299,8 +299,10 @@ int swsusp_arch_suspend(void)
dcache_clean_range(__idmap_text_start, __idmap_text_end);
/* Clean kvm setup code to PoC? */
if (el2_reset_needed())
if (el2_reset_needed()) {
dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
dcache_clean_range(__hyp_text_start, __hyp_text_end);
}
/* make the crash dump kernel image protected again */
crash_post_resume();

View File

@ -28,6 +28,8 @@
#include <asm/virt.h>
.text
.pushsection .hyp.text, "ax"
.align 11
ENTRY(__hyp_stub_vectors)

View File

@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* we end up running with module randomization disabled.
*/
module_alloc_base = (u64)_etext - MODULES_VSIZE;
__flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
/*
* Try to map the FDT early. If this fails, we simply bail,

View File

@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len)
__clean_dcache_area_pou(kaddr, len);
__flush_icache_all();
} else {
flush_icache_range(addr, addr + len);
/*
* Don't issue kick_all_cpus_sync() after I-cache invalidation
* for user mappings.
*/
__flush_icache_range(addr, addr + len);
}
}

View File

@ -382,7 +382,6 @@ CONFIG_MMC=y
# CONFIG_PWRSEQ_SIMPLE is not set
# CONFIG_MMC_BLOCK is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
CONFIG_SW_SYNC=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y

View File

@ -5474,6 +5474,9 @@ static void print_binder_proc(struct seq_file *m,
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
if (!print_all && !node->has_async_transaction)
continue;
/*
* take a temporary reference on the node so it
* survives and isn't removed from the tree

View File

@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
unsigned int nr, int value)
{
if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT))
if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) {
altr_a10sr_gpio_set(gc, nr, value);
return 0;
}
return -EINVAL;
}

View File

@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset)
static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset)
{
return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
switch (sprd_eic->type) {
case SPRD_EIC_DEBOUNCE:
return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
case SPRD_EIC_ASYNC:
return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA);
case SPRD_EIC_SYNC:
return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA);
default:
return -ENOTSUPP;
}
}
static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_BOTH:
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;

View File

@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table);
*/
struct pcf857x {
struct gpio_chip chip;
struct irq_chip irqchip;
struct i2c_client *client;
struct mutex lock; /* protect 'out' */
unsigned out; /* software latch */
@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data)
mutex_unlock(&gpio->lock);
}
static struct irq_chip pcf857x_irq_chip = {
.name = "pcf857x",
.irq_enable = pcf857x_irq_enable,
.irq_disable = pcf857x_irq_disable,
.irq_ack = noop,
.irq_mask = noop,
.irq_unmask = noop,
.irq_set_wake = pcf857x_irq_set_wake,
.irq_bus_lock = pcf857x_irq_bus_lock,
.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
};
/*-------------------------------------------------------------------------*/
static int pcf857x_probe(struct i2c_client *client,
@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client,
/* Enable irqchip if we have an interrupt */
if (client->irq) {
gpio->irqchip.name = "pcf857x",
gpio->irqchip.irq_enable = pcf857x_irq_enable,
gpio->irqchip.irq_disable = pcf857x_irq_disable,
gpio->irqchip.irq_ack = noop,
gpio->irqchip.irq_mask = noop,
gpio->irqchip.irq_unmask = noop,
gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake,
gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock,
gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
status = gpiochip_irqchip_add_nested(&gpio->chip,
&pcf857x_irq_chip,
&gpio->irqchip,
0, handle_level_irq,
IRQ_TYPE_NONE);
if (status) {
@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client,
if (status)
goto fail;
gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip,
gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip,
client->irq);
gpio->irq_parent = client->irq;
}

View File

@ -817,7 +817,15 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
/* Do not leak kernel stack to userspace */
memset(&ge, 0, sizeof(ge));
ge.timestamp = le->timestamp;
/*
* We may be running from a nested threaded interrupt in which case
* we didn't get the timestamp from lineevent_irq_handler().
*/
if (!le->timestamp)
ge.timestamp = ktime_get_real_ns();
else
ge.timestamp = le->timestamp;
level = gpiod_get_value_cansleep(le->desc);
if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE

View File

@ -63,7 +63,7 @@ struct msm_gpu_funcs {
struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu);
void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
/* show GPU status in debugfs: */
void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
struct drm_printer *p);

View File

@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
vmf = 1;
break;
case STATUS:
if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
if (flags & VM_WRITE) {
ret = -EPERM;
goto done;
}

View File

@ -5230,7 +5230,7 @@ static void intel_iommu_put_resv_regions(struct device *dev,
struct iommu_resv_region *entry, *next;
list_for_each_entry_safe(entry, next, head, list) {
if (entry->type == IOMMU_RESV_RESERVED)
if (entry->type == IOMMU_RESV_MSI)
kfree(entry);
}
}

View File

@ -1935,12 +1935,14 @@ out:
}
static struct stripe_head *
r5c_recovery_alloc_stripe(struct r5conf *conf,
sector_t stripe_sect)
r5c_recovery_alloc_stripe(
struct r5conf *conf,
sector_t stripe_sect,
int noblock)
{
struct stripe_head *sh;
sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
if (!sh)
return NULL; /* no more stripe available */
@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
stripe_sect);
if (!sh) {
sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
/*
* cannot get stripe from raid5_get_active_stripe
* try replay some stripes
@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
r5c_recovery_replay_stripes(
cached_stripe_list, ctx);
sh = r5c_recovery_alloc_stripe(
conf, stripe_sect);
conf, stripe_sect, 1);
}
if (!sh) {
int new_size = conf->min_nr_stripes * 2;
pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
mdname(mddev),
conf->min_nr_stripes * 2);
raid5_set_cache_size(mddev,
conf->min_nr_stripes * 2);
sh = r5c_recovery_alloc_stripe(conf,
stripe_sect);
new_size);
ret = raid5_set_cache_size(mddev, new_size);
if (conf->min_nr_stripes <= new_size / 2) {
pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
mdname(mddev),
ret,
new_size,
conf->min_nr_stripes,
conf->max_nr_stripes);
return -ENOMEM;
}
sh = r5c_recovery_alloc_stripe(
conf, stripe_sect, 0);
}
if (!sh) {
pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
mdname(mddev));
mdname(mddev));
return -ENOMEM;
}
list_add_tail(&sh->lru, cached_stripe_list);

View File

@ -6357,6 +6357,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
int
raid5_set_cache_size(struct mddev *mddev, int size)
{
int result = 0;
struct r5conf *conf = mddev->private;
if (size <= 16 || size > 32768)
@ -6373,11 +6374,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
mutex_lock(&conf->cache_size_mutex);
while (size > conf->max_nr_stripes)
if (!grow_one_stripe(conf, GFP_KERNEL))
if (!grow_one_stripe(conf, GFP_KERNEL)) {
conf->min_nr_stripes = conf->max_nr_stripes;
result = -ENOMEM;
break;
}
mutex_unlock(&conf->cache_size_mutex);
return 0;
return result;
}
EXPORT_SYMBOL(raid5_set_cache_size);

View File

@ -1427,6 +1427,8 @@ static int bcm2835_probe(struct platform_device *pdev)
err:
dev_dbg(dev, "%s -> err %d\n", __func__, ret);
if (host->dma_chan_rxtx)
dma_release_channel(host->dma_chan_rxtx);
mmc_free_host(mmc);
return ret;

View File

@ -784,7 +784,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
if (timing == MMC_TIMING_MMC_HS400 &&
host->dev_comp->hs400_tune)
sdr_set_field(host->base + PAD_CMD_TUNE,
sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs400_cmd_int_delay);
dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing);

View File

@ -279,7 +279,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
iproc_host->data = iproc_data;
mmc_of_parse(host->mmc);
ret = mmc_of_parse(host->mmc);
if (ret)
goto err;
sdhci_get_of_property(pdev);
host->mmc->caps |= iproc_host->data->mmc_caps;

View File

@ -1888,6 +1888,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
u16 i, j;
u8 __iomem *bd;
netdev_reset_queue(ugeth->ndev);
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;

View File

@ -2064,9 +2064,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
{
struct mlx4_cmd_mailbox *mailbox;
__be32 *outbox;
u64 qword_field;
u32 dword_field;
int err;
u16 word_field;
u8 byte_field;
int err;
static const u8 a0_dmfs_query_hw_steering[] = {
[0] = MLX4_STEERING_DMFS_A0_DEFAULT,
[1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
@ -2094,19 +2096,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
param->qpc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
param->log_num_qps = byte_field & 0x1f;
MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
param->srqc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
param->log_num_srqs = byte_field & 0x1f;
MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
param->cqc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
param->log_num_cqs = byte_field & 0x1f;
MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
param->altc_base = qword_field;
MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
param->auxc_base = qword_field;
MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
param->eqc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
param->log_num_eqs = byte_field & 0x1f;
MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
param->num_sys_eqs = word_field & 0xfff;
MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
param->rdmarc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
param->log_rd_per_qp = byte_field & 0x7;
MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
@ -2125,22 +2140,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* steering attributes */
if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
MLX4_GET(param->log_mc_entry_sz, outbox,
INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
MLX4_GET(param->log_mc_table_sz, outbox,
INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
MLX4_GET(byte_field, outbox,
INIT_HCA_FS_A0_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
param->log_mc_entry_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
param->log_mc_table_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
param->dmfs_high_steer_mode =
a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
} else {
MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
MLX4_GET(param->log_mc_entry_sz, outbox,
INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
MLX4_GET(param->log_mc_hash_sz, outbox,
INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
MLX4_GET(param->log_mc_table_sz, outbox,
INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
param->log_mc_entry_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
param->log_mc_hash_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
param->log_mc_table_sz = byte_field & 0x1f;
}
/* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
@ -2164,15 +2178,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* TPT attributes */
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
param->mw_enabled = byte_field >> 7;
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
param->log_mpt_sz = byte_field & 0x3f;
MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
/* UAR attributes */
MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
param->log_uar_sz = byte_field & 0xf;
/* phv_check enable */
MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);

View File

@ -1133,13 +1133,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
int err = 0;
u8 *smac_v;
if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
mlx5_core_warn(esw->dev,
"vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
vport->vport);
return -EPERM;
}
esw_vport_cleanup_ingress_rules(esw, vport);
if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
@ -1696,7 +1689,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
int vport_num;
int err;
if (!MLX5_ESWITCH_MANAGER(dev))
if (!MLX5_VPORT_MANAGER(dev))
return 0;
esw_info(dev,
@ -1765,7 +1758,7 @@ abort:
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
{
if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
return;
esw_info(esw->dev, "cleanup\n");
@ -1812,13 +1805,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
if (evport->info.spoofchk && !is_valid_ether_addr(mac))
mlx5_core_warn(esw->dev,
"MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
"Set invalid MAC while spoofchk is on, vport(%d)\n",
vport);
err = -EPERM;
goto unlock;
}
err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
if (err) {
@ -1964,6 +1954,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
evport = &esw->vports[vport];
pschk = evport->info.spoofchk;
evport->info.spoofchk = spoofchk;
if (pschk && !is_valid_ether_addr(evport->info.mac))
mlx5_core_warn(esw->dev,
"Spoofchk in set while MAC is invalid, vport(%d)\n",
evport->vport);
if (evport->enabled && esw->mode == SRIOV_LEGACY)
err = esw_vport_ingress_config(esw, evport);
if (err)

View File

@ -344,7 +344,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
int i;
priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
ETH_HLEN + VLAN_HLEN;
ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
/* Allocate RX and TX skb rings */
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@ -525,13 +525,15 @@ static void ravb_rx_csum(struct sk_buff *skb)
{
u8 *hw_csum;
/* The hardware checksum is 2 bytes appended to packet data */
if (unlikely(skb->len < 2))
/* The hardware checksum is contained in sizeof(__sum16) (2) bytes
* appended to packet data
*/
if (unlikely(skb->len < sizeof(__sum16)))
return;
hw_csum = skb_tail_pointer(skb) - 2;
hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
skb->ip_summed = CHECKSUM_COMPLETE;
skb_trim(skb, skb->len - 2);
skb_trim(skb, skb->len - sizeof(__sum16));
}
/* Packet receive function for Ethernet AVB */

View File

@ -97,12 +97,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
if (!err) {
mdev->l3mdev_ops = &ipvl_l3mdev_ops;
mdev->priv_flags |= IFF_L3MDEV_MASTER;
mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
} else
goto fail;
} else if (port->mode == IPVLAN_MODE_L3S) {
/* Old mode was L3S */
mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
mdev->l3mdev_ops = NULL;
}
@ -162,7 +162,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
struct sk_buff *skb;
if (port->mode == IPVLAN_MODE_L3S) {
dev->priv_flags &= ~IFF_L3MDEV_MASTER;
dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
ipvlan_unregister_nf_hook(dev_net(dev));
dev->l3mdev_ops = NULL;
}

View File

@ -866,8 +866,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
tun_napi_init(tun, tfile, napi, napi_frags);
}
tun_set_real_num_queues(tun);
/* device is allowed to go away first, so no need to hold extra
* refcnt.
*/
@ -879,6 +877,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
rcu_assign_pointer(tfile->tun, tun);
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
tun->numqueues++;
tun_set_real_num_queues(tun);
out:
return err;
}

View File

@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644);
#define VIRTIO_XDP_TX BIT(0)
#define VIRTIO_XDP_REDIR BIT(1)
#define VIRTIO_XDP_FLAG BIT(0)
/* RX packet size EWMA. The average packet size is used to determine the packet
* buffer size when refilling RX rings. As the entire RX ring may be refilled
* at once, the weight is chosen so that the EWMA will be insensitive to short-
@ -251,6 +253,21 @@ struct padded_vnet_hdr {
char padding[4];
};
static bool is_xdp_frame(void *ptr)
{
return (unsigned long)ptr & VIRTIO_XDP_FLAG;
}
static void *xdp_to_ptr(struct xdp_frame *ptr)
{
return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
}
static struct xdp_frame *ptr_to_xdp(void *ptr)
{
return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
}
/* Converting between virtqueue no. and kernel tx/rx queue no.
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
*/
@ -461,7 +478,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
sg_init_one(sq->sg, xdpf->data, xdpf->len);
err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC);
err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
GFP_ATOMIC);
if (unlikely(err))
return -ENOSPC; /* Caller handle free/refcnt */
@ -481,15 +499,22 @@ static int virtnet_xdp_xmit(struct net_device *dev,
{
struct virtnet_info *vi = netdev_priv(dev);
struct receive_queue *rq = vi->rq;
struct xdp_frame *xdpf_sent;
struct bpf_prog *xdp_prog;
struct send_queue *sq;
unsigned int len;
int drops = 0;
int kicks = 0;
int ret, err;
void *ptr;
int i;
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
* indicate XDP resources have been successfully allocated.
*/
xdp_prog = rcu_dereference(rq->xdp_prog);
if (!xdp_prog)
return -ENXIO;
sq = virtnet_xdp_sq(vi);
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
@ -498,19 +523,13 @@ static int virtnet_xdp_xmit(struct net_device *dev,
goto out;
}
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
* indicate XDP resources have been successfully allocated.
*/
xdp_prog = rcu_dereference(rq->xdp_prog);
if (!xdp_prog) {
ret = -ENXIO;
drops = n;
goto out;
}
/* Free up any pending old buffers before queueing new ones. */
while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
xdp_return_frame(xdpf_sent);
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
if (likely(is_xdp_frame(ptr)))
xdp_return_frame(ptr_to_xdp(ptr));
else
napi_consume_skb(ptr, false);
}
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
@ -1329,20 +1348,28 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
return stats.packets;
}
static void free_old_xmit_skbs(struct send_queue *sq)
static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
{
struct sk_buff *skb;
unsigned int len;
unsigned int packets = 0;
unsigned int bytes = 0;
void *ptr;
while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
if (likely(!is_xdp_frame(ptr))) {
struct sk_buff *skb = ptr;
bytes += skb->len;
pr_debug("Sent skb %p\n", skb);
bytes += skb->len;
napi_consume_skb(skb, in_napi);
} else {
struct xdp_frame *frame = ptr_to_xdp(ptr);
bytes += frame->len;
xdp_return_frame(frame);
}
packets++;
dev_consume_skb_any(skb);
}
/* Avoid overhead when no packets have been processed
@ -1357,6 +1384,16 @@ static void free_old_xmit_skbs(struct send_queue *sq)
u64_stats_update_end(&sq->stats.syncp);
}
static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
{
if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
return false;
else if (q < vi->curr_queue_pairs)
return true;
else
return false;
}
static void virtnet_poll_cleantx(struct receive_queue *rq)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
@ -1364,11 +1401,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
struct send_queue *sq = &vi->sq[index];
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
if (!sq->napi.weight)
if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
return;
if (__netif_tx_trylock(txq)) {
free_old_xmit_skbs(sq);
free_old_xmit_skbs(sq, true);
__netif_tx_unlock(txq);
}
@ -1441,10 +1478,18 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
{
struct send_queue *sq = container_of(napi, struct send_queue, napi);
struct virtnet_info *vi = sq->vq->vdev->priv;
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
unsigned int index = vq2txq(sq->vq);
struct netdev_queue *txq;
if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
/* We don't need to enable cb for XDP */
napi_complete_done(napi, 0);
return 0;
}
txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
free_old_xmit_skbs(sq);
free_old_xmit_skbs(sq, true);
__netif_tx_unlock(txq);
virtqueue_napi_complete(napi, sq->vq, 0);
@ -1513,7 +1558,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(sq);
free_old_xmit_skbs(sq, false);
if (use_napi && kick)
virtqueue_enable_cb_delayed(sq->vq);
@ -1556,7 +1601,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!use_napi &&
unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
free_old_xmit_skbs(sq);
free_old_xmit_skbs(sq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
virtqueue_disable_cb(sq->vq);
@ -2345,6 +2390,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
return -ENOMEM;
}
old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
if (!prog && !old_prog)
return 0;
if (prog) {
prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
if (IS_ERR(prog))
@ -2352,36 +2401,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
}
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev))
for (i = 0; i < vi->max_queue_pairs; i++)
if (netif_running(dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
virtnet_napi_tx_disable(&vi->sq[i].napi);
}
}
if (!prog) {
for (i = 0; i < vi->max_queue_pairs; i++) {
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
if (i == 0)
virtnet_restore_guest_offloads(vi);
}
synchronize_net();
}
netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
if (err)
goto err;
netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
vi->xdp_queue_pairs = xdp_qp;
for (i = 0; i < vi->max_queue_pairs; i++) {
old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
if (i == 0) {
if (!old_prog)
if (prog) {
for (i = 0; i < vi->max_queue_pairs; i++) {
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
if (i == 0 && !old_prog)
virtnet_clear_guest_offloads(vi);
if (!prog)
virtnet_restore_guest_offloads(vi);
}
}
for (i = 0; i < vi->max_queue_pairs; i++) {
if (old_prog)
bpf_prog_put(old_prog);
if (netif_running(dev))
if (netif_running(dev)) {
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
virtnet_napi_tx_enable(vi, vi->sq[i].vq,
&vi->sq[i].napi);
}
}
return 0;
err:
for (i = 0; i < vi->max_queue_pairs; i++)
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
if (!prog) {
virtnet_clear_guest_offloads(vi);
for (i = 0; i < vi->max_queue_pairs; i++)
rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
}
if (netif_running(dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
virtnet_napi_tx_enable(vi, vi->sq[i].vq,
&vi->sq[i].napi);
}
}
if (prog)
bpf_prog_sub(prog, vi->max_queue_pairs - 1);
return err;
@ -2537,16 +2612,6 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
{
if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
return false;
else if (q < vi->curr_queue_pairs)
return true;
else
return false;
}
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@ -2555,10 +2620,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
if (!is_xdp_raw_buffer_queue(vi, i))
if (!is_xdp_frame(buf))
dev_kfree_skb(buf);
else
put_page(virt_to_head_page(buf));
xdp_return_frame(ptr_to_xdp(buf));
}
}

View File

@ -221,7 +221,8 @@ static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len
return -ENODEV;
/* Name & Type */
csize = snprintf(str, len, "of:N%sT%s", dev->of_node->name,
/* %p eats all alphanum characters, so %c must be used here */
csize = snprintf(str, len, "of:N%pOFn%c%s", dev->of_node, 'T',
dev->of_node->type);
tsize = csize;
len -= csize;
@ -300,7 +301,7 @@ void of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
if ((!dev) || (!dev->of_node))
return;
add_uevent_var(env, "OF_NAME=%s", dev->of_node->name);
add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
if (dev->of_node->type && strcmp("<NULL>", dev->of_node->type) != 0)
add_uevent_var(env, "OF_TYPE=%s", dev->of_node->type);

View File

@ -275,9 +275,6 @@ void __of_detach_node(struct device_node *np)
/**
* of_detach_node() - "Unplug" a node from the device tree.
*
* The caller must hold a reference to the node. The memory associated with
* the node is not freed until its refcount goes to zero.
*/
int of_detach_node(struct device_node *np)
{
@ -333,6 +330,25 @@ void of_node_release(struct kobject *kobj)
if (!of_node_check_flag(node, OF_DYNAMIC))
return;
if (of_node_check_flag(node, OF_OVERLAY)) {
if (!of_node_check_flag(node, OF_OVERLAY_FREE_CSET)) {
/* premature refcount of zero, do not free memory */
pr_err("ERROR: memory leak before free overlay changeset, %pOF\n",
node);
return;
}
/*
* If node->properties non-empty then properties were added
* to this node either by different overlay that has not
* yet been removed, or by a non-overlay mechanism.
*/
if (node->properties)
pr_err("ERROR: %s(), unexpected properties in %pOF\n",
__func__, node);
}
property_list_free(node->properties);
property_list_free(node->deadprops);
@ -437,6 +453,16 @@ struct device_node *__of_node_dup(const struct device_node *np,
static void __of_changeset_entry_destroy(struct of_changeset_entry *ce)
{
if (ce->action == OF_RECONFIG_ATTACH_NODE &&
of_node_check_flag(ce->np, OF_OVERLAY)) {
if (kref_read(&ce->np->kobj.kref) > 1) {
pr_err("ERROR: memory leak, expected refcount 1 instead of %d, of_node_get()/of_node_put() unbalanced - destroy cset entry: attach overlay node %pOF\n",
kref_read(&ce->np->kobj.kref), ce->np);
} else {
of_node_set_flag(ce->np, OF_OVERLAY_FREE_CSET);
}
}
of_node_put(ce->np);
list_del(&ce->node);
kfree(ce);

View File

@ -133,6 +133,9 @@ int __of_attach_node_sysfs(struct device_node *np)
}
if (!name)
return -ENOMEM;
of_node_get(np);
rc = kobject_add(&np->kobj, parent, "%s", name);
kfree(name);
if (rc)
@ -159,6 +162,5 @@ void __of_detach_node_sysfs(struct device_node *np)
kobject_del(&np->kobj);
}
/* finally remove the kobj_init ref */
of_node_put(np);
}

View File

@ -97,8 +97,8 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
return rc;
}
dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
child->name, addr);
dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
child, addr);
return 0;
}
@ -127,8 +127,8 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
return rc;
}
dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
child->name, addr);
dev_dbg(&mdio->dev, "registered mdio device %pOFn at address %i\n",
child, addr);
return 0;
}
@ -263,8 +263,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
continue;
/* be noisy to encourage people to set reg property */
dev_info(&mdio->dev, "scan phy %s at address %i\n",
child->name, addr);
dev_info(&mdio->dev, "scan phy %pOFn at address %i\n",
child, addr);
if (of_mdiobus_child_is_phy(child)) {
rc = of_mdiobus_register_phy(mdio, child, addr);

View File

@ -168,8 +168,8 @@ int of_node_to_nid(struct device_node *device)
np = of_get_next_parent(np);
}
if (np && r)
pr_warn("Invalid \"numa-node-id\" property in node %s\n",
np->name);
pr_warn("Invalid \"numa-node-id\" property in node %pOFn\n",
np);
of_node_put(np);
/*

View File

@ -23,6 +23,26 @@
#include "of_private.h"
/**
* struct target - info about current target node as recursing through overlay
* @np: node where current level of overlay will be applied
* @in_livetree: @np is a node in the live devicetree
*
* Used in the algorithm to create the portion of a changeset that describes
* an overlay fragment, which is a devicetree subtree. Initially @np is a node
* in the live devicetree where the overlay subtree is targeted to be grafted
* into. When recursing to the next level of the overlay subtree, the target
* also recurses to the next level of the live devicetree, as long as overlay
* subtree node also exists in the live devicetree. When a node in the overlay
* subtree does not exist at the same level in the live devicetree, target->np
* points to a newly allocated node, and all subsequent targets in the subtree
* will be newly allocated nodes.
*/
struct target {
struct device_node *np;
bool in_livetree;
};
/**
* struct fragment - info about fragment nodes in overlay expanded device tree
* @target: target of the overlay operation
@ -72,8 +92,7 @@ static int devicetree_corrupt(void)
}
static int build_changeset_next_level(struct overlay_changeset *ovcs,
struct device_node *target_node,
const struct device_node *overlay_node);
struct target *target, const struct device_node *overlay_node);
/*
* of_resolve_phandles() finds the largest phandle in the live tree.
@ -257,14 +276,17 @@ err_free_target_path:
/**
* add_changeset_property() - add @overlay_prop to overlay changeset
* @ovcs: overlay changeset
* @target_node: where to place @overlay_prop in live tree
* @target: where @overlay_prop will be placed
* @overlay_prop: property to add or update, from overlay tree
* @is_symbols_prop: 1 if @overlay_prop is from node "/__symbols__"
*
* If @overlay_prop does not already exist in @target_node, add changeset entry
* to add @overlay_prop in @target_node, else add changeset entry to update
* If @overlay_prop does not already exist in live devicetree, add changeset
* entry to add @overlay_prop in @target, else add changeset entry to update
* value of @overlay_prop.
*
* @target may be either in the live devicetree or in a new subtree that
* is contained in the changeset.
*
* Some special properties are not updated (no error returned).
*
* Update of property in symbols node is not allowed.
@ -273,20 +295,22 @@ err_free_target_path:
* invalid @overlay.
*/
static int add_changeset_property(struct overlay_changeset *ovcs,
struct device_node *target_node,
struct property *overlay_prop,
struct target *target, struct property *overlay_prop,
bool is_symbols_prop)
{
struct property *new_prop = NULL, *prop;
int ret = 0;
prop = of_find_property(target_node, overlay_prop->name, NULL);
if (!of_prop_cmp(overlay_prop->name, "name") ||
!of_prop_cmp(overlay_prop->name, "phandle") ||
!of_prop_cmp(overlay_prop->name, "linux,phandle"))
return 0;
if (target->in_livetree)
prop = of_find_property(target->np, overlay_prop->name, NULL);
else
prop = NULL;
if (is_symbols_prop) {
if (prop)
return -EINVAL;
@ -299,10 +323,10 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
return -ENOMEM;
if (!prop)
ret = of_changeset_add_property(&ovcs->cset, target_node,
ret = of_changeset_add_property(&ovcs->cset, target->np,
new_prop);
else
ret = of_changeset_update_property(&ovcs->cset, target_node,
ret = of_changeset_update_property(&ovcs->cset, target->np,
new_prop);
if (ret) {
@ -315,14 +339,14 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
/**
* add_changeset_node() - add @node (and children) to overlay changeset
* @ovcs: overlay changeset
* @target_node: where to place @node in live tree
* @node: node from within overlay device tree fragment
* @ovcs: overlay changeset
* @target: where @node will be placed in live tree or changeset
* @node: node from within overlay device tree fragment
*
* If @node does not already exist in @target_node, add changeset entry
* to add @node in @target_node.
* If @node does not already exist in @target, add changeset entry
* to add @node in @target.
*
* If @node already exists in @target_node, and the existing node has
* If @node already exists in @target, and the existing node has
* a phandle, the overlay node is not allowed to have a phandle.
*
* If @node has child nodes, add the children recursively via
@ -355,38 +379,46 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
* invalid @overlay.
*/
static int add_changeset_node(struct overlay_changeset *ovcs,
struct device_node *target_node, struct device_node *node)
struct target *target, struct device_node *node)
{
const char *node_kbasename;
struct device_node *tchild;
struct target target_child;
int ret = 0;
node_kbasename = kbasename(node->full_name);
for_each_child_of_node(target_node, tchild)
for_each_child_of_node(target->np, tchild)
if (!of_node_cmp(node_kbasename, kbasename(tchild->full_name)))
break;
if (!tchild) {
tchild = __of_node_dup(node, node_kbasename);
tchild = __of_node_dup(NULL, node_kbasename);
if (!tchild)
return -ENOMEM;
tchild->parent = target_node;
tchild->parent = target->np;
of_node_set_flag(tchild, OF_OVERLAY);
ret = of_changeset_attach_node(&ovcs->cset, tchild);
if (ret)
return ret;
ret = build_changeset_next_level(ovcs, tchild, node);
target_child.np = tchild;
target_child.in_livetree = false;
ret = build_changeset_next_level(ovcs, &target_child, node);
of_node_put(tchild);
return ret;
}
if (node->phandle && tchild->phandle)
if (node->phandle && tchild->phandle) {
ret = -EINVAL;
else
ret = build_changeset_next_level(ovcs, tchild, node);
} else {
target_child.np = tchild;
target_child.in_livetree = target->in_livetree;
ret = build_changeset_next_level(ovcs, &target_child, node);
}
of_node_put(tchild);
return ret;
@ -395,7 +427,7 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
/**
* build_changeset_next_level() - add level of overlay changeset
* @ovcs: overlay changeset
* @target_node: where to place @overlay_node in live tree
* @target: where to place @overlay_node in live tree
* @overlay_node: node from within an overlay device tree fragment
*
* Add the properties (if any) and nodes (if any) from @overlay_node to the
@ -408,27 +440,26 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
* invalid @overlay_node.
*/
static int build_changeset_next_level(struct overlay_changeset *ovcs,
struct device_node *target_node,
const struct device_node *overlay_node)
struct target *target, const struct device_node *overlay_node)
{
struct device_node *child;
struct property *prop;
int ret;
for_each_property_of_node(overlay_node, prop) {
ret = add_changeset_property(ovcs, target_node, prop, 0);
ret = add_changeset_property(ovcs, target, prop, 0);
if (ret) {
pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
target_node, prop->name, ret);
target->np, prop->name, ret);
return ret;
}
}
for_each_child_of_node(overlay_node, child) {
ret = add_changeset_node(ovcs, target_node, child);
ret = add_changeset_node(ovcs, target, child);
if (ret) {
pr_debug("Failed to apply node @%pOF/%s, err=%d\n",
target_node, child->name, ret);
pr_debug("Failed to apply node @%pOF/%pOFn, err=%d\n",
target->np, child, ret);
of_node_put(child);
return ret;
}
@ -441,17 +472,17 @@ static int build_changeset_next_level(struct overlay_changeset *ovcs,
* Add the properties from __overlay__ node to the @ovcs->cset changeset.
*/
static int build_changeset_symbols_node(struct overlay_changeset *ovcs,
struct device_node *target_node,
struct target *target,
const struct device_node *overlay_symbols_node)
{
struct property *prop;
int ret;
for_each_property_of_node(overlay_symbols_node, prop) {
ret = add_changeset_property(ovcs, target_node, prop, 1);
ret = add_changeset_property(ovcs, target, prop, 1);
if (ret) {
pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
target_node, prop->name, ret);
target->np, prop->name, ret);
return ret;
}
}
@ -474,6 +505,7 @@ static int build_changeset_symbols_node(struct overlay_changeset *ovcs,
static int build_changeset(struct overlay_changeset *ovcs)
{
struct fragment *fragment;
struct target target;
int fragments_count, i, ret;
/*
@ -488,7 +520,9 @@ static int build_changeset(struct overlay_changeset *ovcs)
for (i = 0; i < fragments_count; i++) {
fragment = &ovcs->fragments[i];
ret = build_changeset_next_level(ovcs, fragment->target,
target.np = fragment->target;
target.in_livetree = true;
ret = build_changeset_next_level(ovcs, &target,
fragment->overlay);
if (ret) {
pr_debug("apply failed '%pOF'\n", fragment->target);
@ -498,7 +532,10 @@ static int build_changeset(struct overlay_changeset *ovcs)
if (ovcs->symbols_fragment) {
fragment = &ovcs->fragments[ovcs->count - 1];
ret = build_changeset_symbols_node(ovcs, fragment->target,
target.np = fragment->target;
target.in_livetree = true;
ret = build_changeset_symbols_node(ovcs, &target,
fragment->overlay);
if (ret) {
pr_debug("apply failed '%pOF'\n", fragment->target);
@ -516,7 +553,7 @@ static int build_changeset(struct overlay_changeset *ovcs)
* 1) "target" property containing the phandle of the target
* 2) "target-path" property containing the path of the target
*/
static struct device_node *find_target_node(struct device_node *info_node)
static struct device_node *find_target(struct device_node *info_node)
{
struct device_node *node;
const char *path;
@ -622,7 +659,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
fragment = &fragments[cnt];
fragment->overlay = overlay_node;
fragment->target = find_target_node(node);
fragment->target = find_target(node);
if (!fragment->target) {
of_node_put(fragment->overlay);
ret = -EINVAL;

View File

@ -92,8 +92,8 @@ static void of_device_make_bus_id(struct device *dev)
*/
reg = of_get_property(node, "reg", NULL);
if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
dev_set_name(dev, dev_name(dev) ? "%llx.%s:%s" : "%llx.%s",
(unsigned long long)addr, node->name,
dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
(unsigned long long)addr, node,
dev_name(dev));
return;
}
@ -143,8 +143,8 @@ struct platform_device *of_device_alloc(struct device_node *np,
WARN_ON(rc);
}
if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
pr_debug("not all legacy IRQ resources mapped for %s\n",
np->name);
pr_debug("not all legacy IRQ resources mapped for %pOFn\n",
np);
}
dev->dev.of_node = of_node_get(np);

View File

@ -212,8 +212,8 @@ static int __init of_unittest_check_node_linkage(struct device_node *np)
for_each_child_of_node(np, child) {
if (child->parent != np) {
pr_err("Child node %s links to wrong parent %s\n",
child->name, np->name);
pr_err("Child node %pOFn links to wrong parent %pOFn\n",
child, np);
rc = -EINVAL;
goto put_child;
}
@ -1046,16 +1046,16 @@ static void __init of_unittest_platform_populate(void)
for_each_child_of_node(np, child) {
for_each_child_of_node(child, grandchild)
unittest(of_find_device_by_node(grandchild),
"Could not create device for node '%s'\n",
grandchild->name);
"Could not create device for node '%pOFn'\n",
grandchild);
}
of_platform_depopulate(&test_bus->dev);
for_each_child_of_node(np, child) {
for_each_child_of_node(child, grandchild)
unittest(!of_find_device_by_node(grandchild),
"device didn't get destroyed '%s'\n",
grandchild->name);
"device didn't get destroyed '%pOFn'\n",
grandchild);
}
platform_device_unregister(test_bus);

View File

@ -442,8 +442,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x32, { KEY_MUTE } },
{ KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
{ KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
{ KE_KEY, 0x35, { KEY_SCREENLOCK } },
{ KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
{ KE_KEY, 0x41, { KEY_NEXTSONG } },
{ KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */

View File

@ -1114,7 +1114,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
n->vqs[i].rx_ring = NULL;
vhost_net_buf_init(&n->vqs[i].rxq);
}
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
UIO_MAXIOV + VHOST_NET_BATCH);
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);

View File

@ -1398,7 +1398,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vqs[i] = &vs->vqs[i].vq;
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
vhost_scsi_init_inflight(vs, NULL);

View File

@ -390,9 +390,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
vq->indirect = kmalloc_array(UIO_MAXIOV,
sizeof(*vq->indirect),
GFP_KERNEL);
vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log),
vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
GFP_KERNEL);
vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads),
vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
GFP_KERNEL);
if (!vq->indirect || !vq->log || !vq->heads)
goto err_nomem;
@ -414,7 +414,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
}
void vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue **vqs, int nvqs)
struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
{
struct vhost_virtqueue *vq;
int i;
@ -427,6 +427,7 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->iotlb = NULL;
dev->mm = NULL;
dev->worker = NULL;
dev->iov_limit = iov_limit;
init_llist_head(&dev->work_list);
init_waitqueue_head(&dev->wait);
INIT_LIST_HEAD(&dev->read_list);

View File

@ -170,9 +170,11 @@ struct vhost_dev {
struct list_head read_list;
struct list_head pending_list;
wait_queue_head_t wait;
int iov_limit;
};
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
int nvqs, int iov_limit);
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);

View File

@ -531,7 +531,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
file->private_data = vsock;
spin_lock_init(&vsock->send_pkt_list_lock);

View File

@ -1003,6 +1003,48 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
return 0;
}
static struct extent_buffer *alloc_tree_block_no_bg_flush(
struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 parent_start,
const struct btrfs_disk_key *disk_key,
int level,
u64 hint,
u64 empty_size)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *ret;
/*
* If we are COWing a node/leaf from the extent, chunk, device or free
* space trees, make sure that we do not finish block group creation of
* pending block groups. We do this to avoid a deadlock.
* COWing can result in allocation of a new chunk, and flushing pending
* block groups (btrfs_create_pending_block_groups()) can be triggered
* when finishing allocation of a new chunk. Creation of a pending block
* group modifies the extent, chunk, device and free space trees,
* therefore we could deadlock with ourselves since we are holding a
* lock on an extent buffer that btrfs_create_pending_block_groups() may
* try to COW later.
* For similar reasons, we also need to delay flushing pending block
* groups when splitting a leaf or node, from one of those trees, since
* we are holding a write lock on it and its parent or when inserting a
* new root node for one of those trees.
*/
if (root == fs_info->extent_root ||
root == fs_info->chunk_root ||
root == fs_info->dev_root ||
root == fs_info->free_space_root)
trans->can_flush_pending_bgs = false;
ret = btrfs_alloc_tree_block(trans, root, parent_start,
root->root_key.objectid, disk_key, level,
hint, empty_size);
trans->can_flush_pending_bgs = true;
return ret;
}
/*
* does the dirty work in cow of a single block. The parent block (if
* supplied) is updated to point to the new cow copy. The new buffer is marked
@ -1050,28 +1092,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
parent_start = parent->start;
/*
* If we are COWing a node/leaf from the extent, chunk, device or free
* space trees, make sure that we do not finish block group creation of
* pending block groups. We do this to avoid a deadlock.
* COWing can result in allocation of a new chunk, and flushing pending
* block groups (btrfs_create_pending_block_groups()) can be triggered
* when finishing allocation of a new chunk. Creation of a pending block
* group modifies the extent, chunk, device and free space trees,
* therefore we could deadlock with ourselves since we are holding a
* lock on an extent buffer that btrfs_create_pending_block_groups() may
* try to COW later.
*/
if (root == fs_info->extent_root ||
root == fs_info->chunk_root ||
root == fs_info->dev_root ||
root == fs_info->free_space_root)
trans->can_flush_pending_bgs = false;
cow = btrfs_alloc_tree_block(trans, root, parent_start,
root->root_key.objectid, &disk_key, level,
search_start, empty_size);
trans->can_flush_pending_bgs = true;
cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
level, search_start, empty_size);
if (IS_ERR(cow))
return PTR_ERR(cow);
@ -3383,8 +3405,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
else
btrfs_node_key(lower, &lower_key, 0);
c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
&lower_key, level, root->node->start, 0);
c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
root->node->start, 0);
if (IS_ERR(c))
return PTR_ERR(c);
@ -3513,8 +3535,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
mid = (c_nritems + 1) / 2;
btrfs_node_key(c, &disk_key, mid);
split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
&disk_key, level, c->start, 0);
split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
c->start, 0);
if (IS_ERR(split))
return PTR_ERR(split);
@ -4298,8 +4320,8 @@ again:
else
btrfs_item_key(l, &disk_key, mid);
right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
&disk_key, 0, l->start, 0);
right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
l->start, 0);
if (IS_ERR(right))
return PTR_ERR(right);

View File

@ -1677,6 +1677,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
flags | SB_RDONLY, device_name, data);
if (IS_ERR(mnt_root)) {
root = ERR_CAST(mnt_root);
kfree(subvol_name);
goto out;
}
@ -1686,12 +1687,14 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
if (error < 0) {
root = ERR_PTR(error);
mntput(mnt_root);
kfree(subvol_name);
goto out;
}
}
}
if (IS_ERR(mnt_root)) {
root = ERR_CAST(mnt_root);
kfree(subvol_name);
goto out;
}

View File

@ -50,6 +50,7 @@
#include "cifs_unicode.h"
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
#include "dns_resolve.h"
#include "ntlmssp.h"
#include "nterr.h"
#include "rfc1002pdu.h"
@ -317,6 +318,53 @@ static void cifs_prune_tlinks(struct work_struct *work);
static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
const char *devname, bool is_smb3);
/*
* Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
* get their ip addresses changed at some point.
*
* This should be called with server->srv_mutex held.
*/
#ifdef CONFIG_CIFS_DFS_UPCALL
static int reconn_set_ipaddr(struct TCP_Server_Info *server)
{
int rc;
int len;
char *unc, *ipaddr = NULL;
if (!server->hostname)
return -EINVAL;
len = strlen(server->hostname) + 3;
unc = kmalloc(len, GFP_KERNEL);
if (!unc) {
cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
return -ENOMEM;
}
snprintf(unc, len, "\\\\%s", server->hostname);
rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
kfree(unc);
if (rc < 0) {
cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
__func__, server->hostname, rc);
return rc;
}
rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
strlen(ipaddr));
kfree(ipaddr);
return !rc ? -1 : 0;
}
#else
static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
{
return 0;
}
#endif
/*
* cifs tcp session reconnection
*
@ -417,6 +465,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
rc = generic_ip_connect(server);
if (rc) {
cifs_dbg(FYI, "reconnect error %d\n", rc);
rc = reconn_set_ipaddr(server);
if (rc) {
cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
__func__, rc);
}
mutex_unlock(&server->srv_mutex);
msleep(3000);
} else {

View File

@ -3127,8 +3127,17 @@ smb2_readv_callback(struct mid_q_entry *mid)
rdata->mr = NULL;
}
#endif
if (rdata->result)
if (rdata->result && rdata->result != -ENODATA) {
cifs_stats_fail_inc(tcon, SMB2_READ_HE);
trace_smb3_read_err(0 /* xid */,
rdata->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid, rdata->offset,
rdata->bytes, rdata->result);
} else
trace_smb3_read_done(0 /* xid */,
rdata->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid,
rdata->offset, rdata->got_bytes);
queue_work(cifsiod_wq, &rdata->work);
DeleteMidQEntry(mid);
@ -3203,13 +3212,11 @@ smb2_async_readv(struct cifs_readdata *rdata)
if (rc) {
kref_put(&rdata->refcount, cifs_readdata_release);
cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
trace_smb3_read_err(rc, 0 /* xid */, io_parms.persistent_fid,
io_parms.tcon->tid, io_parms.tcon->ses->Suid,
io_parms.offset, io_parms.length);
} else
trace_smb3_read_done(0 /* xid */, io_parms.persistent_fid,
io_parms.tcon->tid, io_parms.tcon->ses->Suid,
io_parms.offset, io_parms.length);
trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
io_parms.tcon->tid,
io_parms.tcon->ses->Suid,
io_parms.offset, io_parms.length, rc);
}
cifs_small_buf_release(buf);
return rc;
@ -3253,10 +3260,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
if (rc != -ENODATA) {
cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
cifs_dbg(VFS, "Send error in read = %d\n", rc);
trace_smb3_read_err(xid, req->PersistentFileId,
io_parms->tcon->tid, ses->Suid,
io_parms->offset, io_parms->length,
rc);
}
trace_smb3_read_err(rc, xid, req->PersistentFileId,
io_parms->tcon->tid, ses->Suid,
io_parms->offset, io_parms->length);
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
return rc == -ENODATA ? 0 : rc;
} else
@ -3342,8 +3350,17 @@ smb2_writev_callback(struct mid_q_entry *mid)
wdata->mr = NULL;
}
#endif
if (wdata->result)
if (wdata->result) {
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
trace_smb3_write_err(0 /* no xid */,
wdata->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid, wdata->offset,
wdata->bytes, wdata->result);
} else
trace_smb3_write_done(0 /* no xid */,
wdata->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid,
wdata->offset, wdata->bytes);
queue_work(cifsiod_wq, &wdata->work);
DeleteMidQEntry(mid);
@ -3485,10 +3502,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
wdata->bytes, rc);
kref_put(&wdata->refcount, release);
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
} else
trace_smb3_write_done(0 /* no xid */, req->PersistentFileId,
tcon->tid, tcon->ses->Suid, wdata->offset,
wdata->bytes);
}
async_writev_out:
cifs_small_buf_release(req);
@ -3714,8 +3728,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
srch_inf->endOfSearch = true;
rc = 0;
}
cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
} else
cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
goto qdir_exit;
}

View File

@ -1202,15 +1202,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
*/
void shrink_dcache_sb(struct super_block *sb)
{
long freed;
do {
LIST_HEAD(dispose);
freed = list_lru_walk(&sb->s_dentry_lru,
list_lru_walk(&sb->s_dentry_lru,
dentry_lru_isolate_shrink, &dispose, 1024);
this_cpu_sub(nr_dentry_unused, freed);
shrink_dentry_list(&dispose);
} while (list_lru_count(&sb->s_dentry_lru) > 0);
}

View File

@ -1747,9 +1747,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
goto next_iter;
}
if (ret == -E2BIG) {
n += rbm->bii - initial_bii;
rbm->bii = 0;
rbm->offset = 0;
n += (rbm->bii - initial_bii);
goto res_covered_end_of_rgrp;
}
return ret;

View File

@ -621,11 +621,12 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
nfs_set_page_writeback(page);
WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
ret = 0;
ret = req->wb_context->error;
/* If there is a fatal error that covers this write, just exit */
if (nfs_error_is_fatal_on_server(req->wb_context->error))
if (nfs_error_is_fatal_on_server(ret))
goto out_launder;
ret = 0;
if (!nfs_pageio_add_request(pgio, req)) {
ret = pgio->pg_error;
/*
@ -635,9 +636,9 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
nfs_context_set_write_error(req->wb_context, ret);
if (nfs_error_is_fatal_on_server(ret))
goto out_launder;
}
} else
ret = -EAGAIN;
nfs_redirty_request(req);
ret = -EAGAIN;
} else
nfs_add_stats(page_file_mapping(page)->host,
NFSIOS_WRITEPAGES, 1);

View File

@ -54,6 +54,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
case ARPHRD_IPGRE:
case ARPHRD_VOID:
case ARPHRD_NONE:
case ARPHRD_RAWIP:
return false;
default:
return true;

View File

@ -1455,6 +1455,7 @@ struct net_device_ops {
* @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
* @IFF_FAILOVER: device is a failover master device
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@ -1486,6 +1487,7 @@ enum netdev_priv_flags {
IFF_NO_RX_HANDLER = 1<<26,
IFF_FAILOVER = 1<<27,
IFF_FAILOVER_SLAVE = 1<<28,
IFF_L3MDEV_RX_HANDLER = 1<<29,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@ -1516,6 +1518,7 @@ enum netdev_priv_flags {
#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
#define IFF_FAILOVER IFF_FAILOVER
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
/**
* struct net_device - The DEVICE structure.
@ -4465,6 +4468,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
return dev->priv_flags & IFF_SUPP_NOFCS;
}
static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
{
return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
}
static inline bool netif_is_l3_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_L3MDEV_MASTER;

View File

@ -138,11 +138,16 @@ extern struct device_node *of_aliases;
extern struct device_node *of_stdout;
extern raw_spinlock_t devtree_lock;
/* flag descriptions (need to be visible even when !CONFIG_OF) */
#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
#define OF_DETACHED 2 /* node has been detached from the device tree */
#define OF_POPULATED 3 /* device already created for the node */
#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */
/*
* struct device_node flag descriptions
* (need to be visible even when !CONFIG_OF)
*/
#define OF_DYNAMIC 1 /* (and properties) allocated via kmalloc */
#define OF_DETACHED 2 /* detached from the device tree */
#define OF_POPULATED 3 /* device already created */
#define OF_POPULATED_BUS 4 /* platform bus created for children */
#define OF_OVERLAY 5 /* allocated for an overlay */
#define OF_OVERLAY_FREE_CSET 6 /* in overlay cset being freed */
#define OF_BAD_ADDR ((u64)-1)

View File

@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\

View File

@ -142,7 +142,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
if (netif_is_l3_slave(skb->dev))
master = netdev_master_upper_dev_get_rcu(skb->dev);
else if (netif_is_l3_master(skb->dev))
else if (netif_is_l3_master(skb->dev) ||
netif_has_l3_rx_handler(skb->dev))
master = skb->dev;
if (master && master->l3mdev_ops->l3mdev_l3_rcv)

View File

@ -558,12 +558,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p)
return NULL;
}
static struct task_struct *find_child_reaper(struct task_struct *father)
static struct task_struct *find_child_reaper(struct task_struct *father,
struct list_head *dead)
__releases(&tasklist_lock)
__acquires(&tasklist_lock)
{
struct pid_namespace *pid_ns = task_active_pid_ns(father);
struct task_struct *reaper = pid_ns->child_reaper;
struct task_struct *p, *n;
if (likely(reaper != father))
return reaper;
@ -579,6 +581,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father)
panic("Attempted to kill init! exitcode=0x%08x\n",
father->signal->group_exit_code ?: father->exit_code);
}
list_for_each_entry_safe(p, n, dead, ptrace_entry) {
list_del_init(&p->ptrace_entry);
release_task(p);
}
zap_pid_ns_processes(pid_ns);
write_lock_irq(&tasklist_lock);
@ -668,7 +676,7 @@ static void forget_original_parent(struct task_struct *father,
exit_ptrace(father, dead);
/* Can drop and reacquire tasklist_lock */
reaper = find_child_reaper(father);
reaper = find_child_reaper(father, dead);
if (list_empty(&father->children))
return;

View File

@ -4269,7 +4269,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
break;
}
if (ret & VM_FAULT_RETRY) {
if (nonblocking)
if (nonblocking &&
!(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
*nonblocking = 0;
*nr_pages = 0;
/*

View File

@ -372,7 +372,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
if (fail || tk->addr_valid == 0) {
pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
pfn, tk->tsk->comm, tk->tsk->pid);
force_sig(SIGKILL, tk->tsk);
do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
tk->tsk, PIDTYPE_PID);
}
/*

View File

@ -1400,23 +1400,27 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
{
unsigned long pfn;
struct page *page;
for (pfn = start; pfn < end; pfn++) {
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (PageLRU(page))
return pfn;
if (__PageMovable(page))
return pfn;
if (PageHuge(page)) {
if (hugepage_migration_supported(page_hstate(page)) &&
page_huge_active(page))
return pfn;
else
pfn = round_up(pfn + 1,
1 << compound_order(page)) - 1;
}
}
struct page *page, *head;
unsigned long skip;
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
if (PageLRU(page))
return pfn;
if (__PageMovable(page))
return pfn;
if (!PageHuge(page))
continue;
head = compound_head(page);
if (hugepage_migration_supported(page_hstate(head)) &&
page_huge_active(head))
return pfn;
skip = (1 << compound_order(head)) - (page - head);
pfn += skip - 1;
}
return 0;
}

View File

@ -1118,10 +1118,13 @@ out:
* If migration is successful, decrease refcount of the newpage
* which will not free the page because new page owner increased
* refcounter. As well, if it is LRU page, add the page to LRU
* list in here.
* list in here. Use the old state of the isolated source page to
* determine if we migrated a LRU page. newpage was already unlocked
* and possibly modified by its owner - don't rely on the page
* state.
*/
if (rc == MIGRATEPAGE_SUCCESS) {
if (unlikely(__PageMovable(newpage)))
if (unlikely(!is_lru))
put_page(newpage);
else
putback_lru_page(newpage);

View File

@ -647,8 +647,8 @@ static void wake_oom_reaper(struct task_struct *tsk)
*/
spin_lock(&oom_reaper_lock);
/* tsk is already queued? */
if (tsk == oom_reaper_list || tsk->oom_reaper_list) {
/* mm is already queued? */
if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) {
spin_unlock(&oom_reaper_lock);
return;
}
@ -981,6 +981,13 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
* still freeing memory.
*/
read_lock(&tasklist_lock);
/*
* The task 'p' might have already exited before reaching here. The
* put_task_struct() will free task_struct 'p' while the loop still try
* to access the field of 'p', so, get an extra reference.
*/
get_task_struct(p);
for_each_thread(p, t) {
list_for_each_entry(child, &t->children, sibling) {
unsigned int child_points;
@ -1000,6 +1007,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
}
}
}
put_task_struct(p);
read_unlock(&tasklist_lock);
/*

View File

@ -8629,6 +8629,9 @@ int init_dummy_netdev(struct net_device *dev)
set_bit(__LINK_STATE_PRESENT, &dev->state);
set_bit(__LINK_STATE_START, &dev->state);
/* napi_busy_loop stats accounting wants this */
dev_net_set(dev, &init_net);
/* Note : We dont allocate pcpu_refcnt for dummy devices,
* because users of this 'device' dont need to change
* its refcount.

View File

@ -25,6 +25,7 @@
#include <linux/spinlock.h>
#include <net/protocol.h>
#include <net/gre.h>
#include <net/erspan.h>
#include <net/icmp.h>
#include <net/route.h>
@ -118,6 +119,22 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
hdr_len += 4;
}
tpi->hdr_len = hdr_len;
/* ERSPAN ver 1 and 2 protocol sets GRE key field
* to 0 and sets the configured key in the
* inner erspan header field
*/
if (greh->protocol == htons(ETH_P_ERSPAN) ||
greh->protocol == htons(ETH_P_ERSPAN2)) {
struct erspan_base_hdr *ershdr;
if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
return -EINVAL;
ershdr = (struct erspan_base_hdr *)options;
tpi->key = cpu_to_be32(get_session_id(ershdr));
}
return hdr_len;
}
EXPORT_SYMBOL(gre_parse_header);

View File

@ -425,6 +425,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
* fragment.
*/
err = -EINVAL;
/* Find out where to put this fragment. */
prev_tail = qp->q.fragments_tail;
if (!prev_tail)
@ -501,7 +502,6 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
discard_qp:
inet_frag_kill(&qp->q);
err = -EINVAL;
__IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
err:
kfree_skb(skb);

View File

@ -269,20 +269,11 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
int len;
itn = net_generic(net, erspan_net_id);
len = gre_hdr_len + sizeof(*ershdr);
/* Check based hdr len */
if (unlikely(!pskb_may_pull(skb, len)))
return PACKET_REJECT;
iph = ip_hdr(skb);
ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
ver = ershdr->ver;
/* The original GRE header does not have key field,
* Use ERSPAN 10-bit session ID as key.
*/
tpi->key = cpu_to_be32(get_session_id(ershdr));
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
tpi->flags | TUNNEL_KEY,
iph->saddr, iph->daddr, tpi->key);
@ -1471,12 +1462,17 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm *p = &t->parms;
__be16 o_flags = p->o_flags;
if ((t->erspan_ver == 1 || t->erspan_ver == 2) &&
!t->collect_md)
o_flags |= TUNNEL_KEY;
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
nla_put_be16(skb, IFLA_GRE_IFLAGS,
gre_tnl_flags_to_gre_flags(p->i_flags)) ||
nla_put_be16(skb, IFLA_GRE_OFLAGS,
gre_tnl_flags_to_gre_flags(p->o_flags)) ||
gre_tnl_flags_to_gre_flags(o_flags)) ||
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||

View File

@ -377,6 +377,9 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
err = -EINVAL;
goto out_unlock;
}
}
if (sk->sk_bound_dev_if) {
dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
if (!dev) {
err = -ENODEV;

View File

@ -550,13 +550,9 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
struct ip6_tnl *tunnel;
u8 ver;
if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
return PACKET_REJECT;
ipv6h = ipv6_hdr(skb);
ershdr = (struct erspan_base_hdr *)skb->data;
ver = ershdr->ver;
tpi->key = cpu_to_be32(get_session_id(ershdr));
tunnel = ip6gre_tunnel_lookup(skb->dev,
&ipv6h->saddr, &ipv6h->daddr, tpi->key,
@ -2124,12 +2120,17 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct __ip6_tnl_parm *p = &t->parms;
__be16 o_flags = p->o_flags;
if ((p->erspan_ver == 1 || p->erspan_ver == 2) &&
!p->collect_md)
o_flags |= TUNNEL_KEY;
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
nla_put_be16(skb, IFLA_GRE_IFLAGS,
gre_tnl_flags_to_gre_flags(p->i_flags)) ||
nla_put_be16(skb, IFLA_GRE_OFLAGS,
gre_tnl_flags_to_gre_flags(p->o_flags)) ||
gre_tnl_flags_to_gre_flags(o_flags)) ||
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||

View File

@ -1506,6 +1506,9 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
continue;
rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
list_del_rcu(&c->list);
call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
FIB_EVENT_ENTRY_DEL,
(struct mfc6_cache *)c, mrt->id);
mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
mr_cache_put(c);
}
@ -1514,10 +1517,6 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
list_del(&c->list);
call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
FIB_EVENT_ENTRY_DEL,
(struct mfc6_cache *)c,
mrt->id);
mr6_netlink_event(mrt, (struct mfc6_cache *)c,
RTM_DELROUTE);
ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);

View File

@ -146,6 +146,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
} else {
ip6_flow_hdr(hdr, 0, flowlabel);
hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
}
hdr->nexthdr = NEXTHDR_ROUTING;

View File

@ -83,8 +83,7 @@
#define L2TP_SLFLAG_S 0x40000000
#define L2TP_SL_SEQ_MASK 0x00ffffff
#define L2TP_HDR_SIZE_SEQ 10
#define L2TP_HDR_SIZE_NOSEQ 6
#define L2TP_HDR_SIZE_MAX 14
/* Default trace flags */
#define L2TP_DEFAULT_DEBUG_FLAGS 0
@ -808,7 +807,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
__skb_pull(skb, sizeof(struct udphdr));
/* Short packet? */
if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
l2tp_info(tunnel, L2TP_MSG_DATA,
"%s: recv short packet (len=%d)\n",
tunnel->name, skb->len);
@ -884,6 +883,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
goto error;
}
if (tunnel->version == L2TP_HDR_VER_3 &&
l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
goto error;
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
l2tp_session_dec_refcount(session);

View File

@ -301,6 +301,26 @@ static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel)
}
#endif
static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
unsigned char **ptr, unsigned char **optr)
{
int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
if (opt_len > 0) {
int off = *ptr - *optr;
if (!pskb_may_pull(skb, off + opt_len))
return -1;
if (skb->data != *optr) {
*optr = skb->data;
*ptr = skb->data + off;
}
}
return 0;
}
#define l2tp_printk(ptr, type, func, fmt, ...) \
do { \
if (((ptr)->debug) & (type)) \

View File

@ -165,6 +165,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
goto discard_sess;
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
l2tp_session_dec_refcount(session);

View File

@ -178,6 +178,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
goto discard_sess;
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
l2tp_session_dec_refcount(session);

View File

@ -52,21 +52,21 @@ void nr_start_t1timer(struct sock *sk)
{
struct nr_sock *nr = nr_sk(sk);
mod_timer(&nr->t1timer, jiffies + nr->t1);
sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
}
void nr_start_t2timer(struct sock *sk)
{
struct nr_sock *nr = nr_sk(sk);
mod_timer(&nr->t2timer, jiffies + nr->t2);
sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
}
void nr_start_t4timer(struct sock *sk)
{
struct nr_sock *nr = nr_sk(sk);
mod_timer(&nr->t4timer, jiffies + nr->t4);
sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
}
void nr_start_idletimer(struct sock *sk)
@ -74,37 +74,37 @@ void nr_start_idletimer(struct sock *sk)
struct nr_sock *nr = nr_sk(sk);
if (nr->idle > 0)
mod_timer(&nr->idletimer, jiffies + nr->idle);
sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
}
void nr_start_heartbeat(struct sock *sk)
{
mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
}
void nr_stop_t1timer(struct sock *sk)
{
del_timer(&nr_sk(sk)->t1timer);
sk_stop_timer(sk, &nr_sk(sk)->t1timer);
}
void nr_stop_t2timer(struct sock *sk)
{
del_timer(&nr_sk(sk)->t2timer);
sk_stop_timer(sk, &nr_sk(sk)->t2timer);
}
void nr_stop_t4timer(struct sock *sk)
{
del_timer(&nr_sk(sk)->t4timer);
sk_stop_timer(sk, &nr_sk(sk)->t4timer);
}
void nr_stop_idletimer(struct sock *sk)
{
del_timer(&nr_sk(sk)->idletimer);
sk_stop_timer(sk, &nr_sk(sk)->idletimer);
}
void nr_stop_heartbeat(struct sock *sk)
{
del_timer(&sk->sk_timer);
sk_stop_timer(sk, &sk->sk_timer);
}
int nr_t1timer_running(struct sock *sk)

View File

@ -850,6 +850,7 @@ void rose_link_device_down(struct net_device *dev)
/*
* Route a frame to an appropriate AX.25 connection.
* A NULL ax25_cb indicates an internally generated frame.
*/
int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
{
@ -867,6 +868,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
if (skb->len < ROSE_MIN_LEN)
return res;
if (!ax25)
return rose_loopback_queue(skb, NULL);
frametype = skb->data[2];
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
if (frametype == ROSE_CALL_REQUEST &&

View File

@ -277,7 +277,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if (saddr) {
fl6->saddr = saddr->v6.sin6_addr;
fl6->fl6_sport = saddr->v6.sin6_port;
if (!fl6->fl6_sport)
fl6->fl6_sport = saddr->v6.sin6_port;
pr_debug("src=%pI6 - ", &fl6->saddr);
}

View File

@ -440,7 +440,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
}
if (saddr) {
fl4->saddr = saddr->v4.sin_addr.s_addr;
fl4->fl4_sport = saddr->v4.sin_port;
if (!fl4->fl4_sport)
fl4->fl4_sport = saddr->v4.sin_port;
}
pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr,

View File

@ -495,7 +495,10 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
*
* [INIT ACK back to where the INIT came from.]
*/
retval->transport = chunk->transport;
if (chunk->transport)
retval->transport =
sctp_assoc_lookup_paddr(asoc,
&chunk->transport->ipaddr);
retval->subh.init_hdr =
sctp_addto_chunk(retval, sizeof(initack), &initack);
@ -642,8 +645,10 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
*
* [COOKIE ACK back to where the COOKIE ECHO came from.]
*/
if (retval && chunk)
retval->transport = chunk->transport;
if (retval && chunk && chunk->transport)
retval->transport =
sctp_assoc_lookup_paddr(asoc,
&chunk->transport->ipaddr);
return retval;
}

View File

@ -585,9 +585,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
struct sctp_strreset_outreq *outreq = param.v;
struct sctp_stream *stream = &asoc->stream;
__u32 result = SCTP_STRRESET_DENIED;
__u16 i, nums, flags = 0;
__be16 *str_p = NULL;
__u32 request_seq;
__u16 i, nums;
request_seq = ntohl(outreq->request_seq);
@ -615,6 +615,15 @@ struct sctp_chunk *sctp_process_strreset_outreq(
if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
goto out;
nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
str_p = outreq->list_of_streams;
for (i = 0; i < nums; i++) {
if (ntohs(str_p[i]) >= stream->incnt) {
result = SCTP_STRRESET_ERR_WRONG_SSN;
goto out;
}
}
if (asoc->strreset_chunk) {
if (!sctp_chunk_lookup_strreset_param(
asoc, outreq->response_seq,
@ -637,32 +646,19 @@ struct sctp_chunk *sctp_process_strreset_outreq(
sctp_chunk_put(asoc->strreset_chunk);
asoc->strreset_chunk = NULL;
}
flags = SCTP_STREAM_RESET_INCOMING_SSN;
}
nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
if (nums) {
str_p = outreq->list_of_streams;
for (i = 0; i < nums; i++) {
if (ntohs(str_p[i]) >= stream->incnt) {
result = SCTP_STRRESET_ERR_WRONG_SSN;
goto out;
}
}
if (nums)
for (i = 0; i < nums; i++)
SCTP_SI(stream, ntohs(str_p[i]))->mid = 0;
} else {
else
for (i = 0; i < stream->incnt; i++)
SCTP_SI(stream, i)->mid = 0;
}
result = SCTP_STRRESET_PERFORMED;
*evp = sctp_ulpevent_make_stream_reset_event(asoc,
flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p,
GFP_ATOMIC);
SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
out:
sctp_update_strreset_result(asoc, result);
@ -738,9 +734,6 @@ struct sctp_chunk *sctp_process_strreset_inreq(
result = SCTP_STRRESET_PERFORMED;
*evp = sctp_ulpevent_make_stream_reset_event(asoc,
SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
out:
sctp_update_strreset_result(asoc, result);
err:
@ -873,6 +866,14 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
goto out;
in = ntohs(addstrm->number_of_streams);
incnt = stream->incnt + in;
if (!in || incnt > SCTP_MAX_STREAM)
goto out;
if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
goto out;
if (asoc->strreset_chunk) {
if (!sctp_chunk_lookup_strreset_param(
asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
@ -896,14 +897,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
}
}
in = ntohs(addstrm->number_of_streams);
incnt = stream->incnt + in;
if (!in || incnt > SCTP_MAX_STREAM)
goto out;
if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
goto out;
stream->incnt = incnt;
result = SCTP_STRRESET_PERFORMED;
@ -973,9 +966,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_in(
result = SCTP_STRRESET_PERFORMED;
*evp = sctp_ulpevent_make_stream_change_event(asoc,
0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC);
out:
sctp_update_strreset_result(asoc, result);
err:
@ -1036,10 +1026,10 @@ struct sctp_chunk *sctp_process_strreset_resp(
sout->mid_uo = 0;
}
}
flags = SCTP_STREAM_RESET_OUTGOING_SSN;
}
flags |= SCTP_STREAM_RESET_OUTGOING_SSN;
for (i = 0; i < stream->outcnt; i++)
SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
@ -1058,6 +1048,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
sizeof(__u16);
flags |= SCTP_STREAM_RESET_INCOMING_SSN;
*evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
nums, str_p, GFP_ATOMIC);
} else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {

View File

@ -1628,7 +1628,10 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
dst_copy_metrics(dst1, dst);
if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
__u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
__u32 mark = 0;
if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
family = xfrm[i]->props.family;
dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,

View File

@ -117,6 +117,7 @@ struct alc_spec {
int codec_variant; /* flag for other variants */
unsigned int has_alc5505_dsp:1;
unsigned int no_depop_delay:1;
unsigned int done_hp_init:1;
/* for PLL fix */
hda_nid_t pll_nid;
@ -3372,6 +3373,48 @@ static void alc_default_shutup(struct hda_codec *codec)
snd_hda_shutup_pins(codec);
}
static void alc294_hp_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
int i, val;
if (!hp_pin)
return;
snd_hda_codec_write(codec, hp_pin, 0,
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
msleep(100);
snd_hda_codec_write(codec, hp_pin, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
/* Wait for depop procedure finish */
val = alc_read_coefex_idx(codec, 0x58, 0x01);
for (i = 0; i < 20 && val & 0x0080; i++) {
msleep(50);
val = alc_read_coefex_idx(codec, 0x58, 0x01);
}
/* Set HP depop to auto mode */
alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
msleep(50);
}
static void alc294_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
if (!spec->done_hp_init) {
alc294_hp_init(codec);
spec->done_hp_init = true;
}
alc_default_init(codec);
}
static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg,
unsigned int val)
{
@ -7288,37 +7331,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
alc_update_coef_idx(codec, 0x4, 0, 1<<11);
}
static void alc294_hp_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
int i, val;
if (!hp_pin)
return;
snd_hda_codec_write(codec, hp_pin, 0,
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
msleep(100);
snd_hda_codec_write(codec, hp_pin, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
/* Wait for depop procedure finish */
val = alc_read_coefex_idx(codec, 0x58, 0x01);
for (i = 0; i < 20 && val & 0x0080; i++) {
msleep(50);
val = alc_read_coefex_idx(codec, 0x58, 0x01);
}
/* Set HP depop to auto mode */
alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
msleep(50);
}
/*
*/
static int patch_alc269(struct hda_codec *codec)
@ -7444,7 +7456,7 @@ static int patch_alc269(struct hda_codec *codec)
spec->codec_variant = ALC269_TYPE_ALC294;
spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
alc294_hp_init(codec);
spec->init_hook = alc294_init;
break;
case 0x10ec0300:
spec->codec_variant = ALC269_TYPE_ALC300;
@ -7456,7 +7468,7 @@ static int patch_alc269(struct hda_codec *codec)
spec->codec_variant = ALC269_TYPE_ALC700;
spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
alc294_hp_init(codec);
spec->init_hook = alc294_init;
break;
}

View File

@ -1373,6 +1373,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */

View File

@ -1563,7 +1563,16 @@ TEST_F(TRACE_poke, getpid_runs_normally)
#ifdef SYSCALL_NUM_RET_SHARE_REG
# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action)
#else
# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(val, action)
# define EXPECT_SYSCALL_RETURN(val, action) \
do { \
errno = 0; \
if (val < 0) { \
EXPECT_EQ(-1, action); \
EXPECT_EQ(-(val), errno); \
} else { \
EXPECT_EQ(val, action); \
} \
} while (0)
#endif
/* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
@ -1602,7 +1611,7 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
/* Architecture-specific syscall changing routine. */
void change_syscall(struct __test_metadata *_metadata,
pid_t tracee, int syscall)
pid_t tracee, int syscall, int result)
{
int ret;
ARCH_REGS regs;
@ -1661,7 +1670,7 @@ void change_syscall(struct __test_metadata *_metadata,
#ifdef SYSCALL_NUM_RET_SHARE_REG
TH_LOG("Can't modify syscall return on this architecture");
#else
regs.SYSCALL_RET = EPERM;
regs.SYSCALL_RET = result;
#endif
#ifdef HAVE_GETREGS
@ -1689,14 +1698,19 @@ void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee,
case 0x1002:
/* change getpid to getppid. */
EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
change_syscall(_metadata, tracee, __NR_getppid);
change_syscall(_metadata, tracee, __NR_getppid, 0);
break;
case 0x1003:
/* skip gettid. */
/* skip gettid with valid return code. */
EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
change_syscall(_metadata, tracee, -1);
change_syscall(_metadata, tracee, -1, 45000);
break;
case 0x1004:
/* skip openat with error. */
EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
change_syscall(_metadata, tracee, -1, -ESRCH);
break;
case 0x1005:
/* do nothing (allow getppid) */
EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
break;
@ -1729,9 +1743,11 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
nr = get_syscall(_metadata, tracee);
if (nr == __NR_getpid)
change_syscall(_metadata, tracee, __NR_getppid);
change_syscall(_metadata, tracee, __NR_getppid, 0);
if (nr == __NR_gettid)
change_syscall(_metadata, tracee, -1, 45000);
if (nr == __NR_openat)
change_syscall(_metadata, tracee, -1);
change_syscall(_metadata, tracee, -1, -ESRCH);
}
FIXTURE_DATA(TRACE_syscall) {
@ -1748,8 +1764,10 @@ FIXTURE_SETUP(TRACE_syscall)
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
@ -1797,15 +1815,26 @@ TEST_F(TRACE_syscall, ptrace_syscall_redirected)
EXPECT_NE(self->mypid, syscall(__NR_getpid));
}
TEST_F(TRACE_syscall, ptrace_syscall_dropped)
TEST_F(TRACE_syscall, ptrace_syscall_errno)
{
/* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
teardown_trace_fixture(_metadata, self->tracer);
self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
true);
/* Tracer should skip the open syscall, resulting in EPERM. */
EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat));
/* Tracer should skip the open syscall, resulting in ESRCH. */
EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
}
TEST_F(TRACE_syscall, ptrace_syscall_faked)
{
/* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
teardown_trace_fixture(_metadata, self->tracer);
self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
true);
/* Tracer should skip the gettid syscall, resulting fake pid. */
EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
}
TEST_F(TRACE_syscall, syscall_allowed)
@ -1838,7 +1867,21 @@ TEST_F(TRACE_syscall, syscall_redirected)
EXPECT_NE(self->mypid, syscall(__NR_getpid));
}
TEST_F(TRACE_syscall, syscall_dropped)
TEST_F(TRACE_syscall, syscall_errno)
{
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
ASSERT_EQ(0, ret);
/* openat has been skipped and an errno return. */
EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
}
TEST_F(TRACE_syscall, syscall_faked)
{
long ret;
@ -1849,8 +1892,7 @@ TEST_F(TRACE_syscall, syscall_dropped)
ASSERT_EQ(0, ret);
/* gettid has been skipped and an altered return value stored. */
EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid));
EXPECT_NE(self->mytid, syscall(__NR_gettid));
EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
}
TEST_F(TRACE_syscall, skip_after_RET_TRACE)