This is the 4.19.251 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmLG/TsACgkQONu9yGCS
 aT7mDQ/8Cd+wSYLKWd1obamA0rlRL3P3XJSu41c6BCFdqF5GwfBHpl2G/ekowk3A
 RxC/O2IKYWybefcGb1GUaUh1ua4b9zMtcCD+oEFKt5NWjvu1yBFhBEdgHdM6jKsh
 sqToAM40BlnB3flSdT+bYhEE4cRO1BVJDBqx2qLQNE1VtkDxntQe3tFEC2HCUrA4
 QDfAYNu10WOsL82gG65qYGrF1W9G4WVbjxtCFH3msUIKnm8ecKa3V44J+IUKWdvk
 UKl4wHsQJ+AAXThluiGkkfLOzvO+zdf0fep5CDGShGutng0pFptnaWendvwVjeRC
 9EbhphatoReSxUQEO22XKPOiL7gBuga3/gvzcl+GURS+87FZa6k2Cl8ZoRSA2FUh
 mP30hCGjhOV8YmpLhzNhDH4I6946/imUHMW7B0r11Rt5oMfeSiDpZFTfGuXBKcw/
 cfUZKkOpRtPuMOQ33Qj7F7h9Cux0xpzVlEhbj2oH1JseFRib9HC7LpkHwlqlgSK+
 UHORGp6NxP2Qa1hx1p5839vxRECOJYW2G9aq6FK9+We/IxUt4TvlGfYCT5iy61cv
 XKzjH0xkllmUeQ5kYnDzlrWUFM//2G01uqtctKWfmywXvhHkgBSgfujzL4Q6TlWI
 u3/zfSozNCIkG/bWDiHc2nlxaUffnuJMBOjoMxgZDPZzP4CVV6s=
 =/wWN
 -----END PGP SIGNATURE-----

Merge 4.19.251 into android-4.19-stable

Changes in 4.19.251
	nvdimm: Fix badblocks clear off-by-one error
	dm raid: fix accesses beyond end of raid member array
	dm raid: fix KASAN warning in raid5_add_disks
	s390/archrandom: simplify back to earlier design and initialize earlier
	SUNRPC: Fix READ_PLUS crasher
	net: rose: fix UAF bugs caused by timer handler
	net: usb: ax88179_178a: Fix packet receiving
	virtio-net: fix race between ndo_open() and virtio_device_ready()
	selftests/net: pass ipv6_args to udpgso_bench's IPv6 TCP test
	net: tun: unlink NAPI from device on destruction
	net: tun: stop NAPI when detaching queues
	RDMA/qedr: Fix reporting QP timeout attribute
	usbnet: fix memory allocation in helpers
	net: ipv6: unexport __init-annotated seg6_hmac_net_init()
	caif_virtio: fix race between virtio_device_ready() and ndo_open()
	netfilter: nft_dynset: restore set element counter when failing to update
	net: bonding: fix possible NULL deref in rlb code
	net: bonding: fix use-after-free after 802.3ad slave unbind
	nfc: nfcmrvl: Fix irq_of_parse_and_map() return value
	NFC: nxp-nci: Don't issue a zero length i2c_master_read()
	net: tun: avoid disabling NAPI twice
	xen/gntdev: Avoid blocking in unmap_grant_pages()
	hwmon: (ibmaem) don't call platform_device_del() if platform_device_add() fails
	net: dsa: bcm_sf2: force pause link settings
	sit: use min
	ipv6/sit: fix ipip6_tunnel_get_prl return value
	xen/blkfront: fix leaking data in shared pages
	xen/netfront: fix leaking data in shared pages
	xen/netfront: force data bouncing when backend is untrusted
	xen/blkfront: force data bouncing when backend is untrusted
	xen/arm: Fix race in RB-tree based P2M accounting
	net: usb: qmi_wwan: add Telit 0x1060 composition
	net: usb: qmi_wwan: add Telit 0x1070 composition
	Linux 4.19.251

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ie9952c1de1287952d707e7b5aafef9f4d39cdd2d
This commit is contained in:
Greg Kroah-Hartman 2022-07-08 12:28:11 +02:00
commit 105613d74a
33 changed files with 413 additions and 282 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 250
SUBLEVEL = 251
EXTRAVERSION =
NAME = "People's Front"

View File

@ -61,11 +61,12 @@ out:
unsigned long __pfn_to_mfn(unsigned long pfn)
{
struct rb_node *n = phys_to_mach.rb_node;
struct rb_node *n;
struct xen_p2m_entry *entry;
unsigned long irqflags;
read_lock_irqsave(&p2m_lock, irqflags);
n = phys_to_mach.rb_node;
while (n) {
entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
if (entry->pfn <= pfn &&
@ -151,10 +152,11 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
int rc;
unsigned long irqflags;
struct xen_p2m_entry *p2m_entry;
struct rb_node *n = phys_to_mach.rb_node;
struct rb_node *n;
if (mfn == INVALID_P2M_ENTRY) {
write_lock_irqsave(&p2m_lock, irqflags);
n = phys_to_mach.rb_node;
while (n) {
p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
if (p2m_entry->pfn <= pfn &&

View File

@ -2,126 +2,17 @@
/*
* s390 arch random implementation.
*
* Copyright IBM Corp. 2017, 2018
* Copyright IBM Corp. 2017, 2020
* Author(s): Harald Freudenberger
*
* The s390_arch_random_generate() function may be called from random.c
* in interrupt context. So this implementation does the best to be very
* fast. There is a buffer of random data which is asynchronously checked
* and filled by a workqueue thread.
* If there are enough bytes in the buffer the s390_arch_random_generate()
* just delivers these bytes. Otherwise false is returned until the
* worker thread refills the buffer.
* The worker fills the rng buffer by pulling fresh entropy from the
* high quality (but slow) true hardware random generator. This entropy
* is then spread over the buffer with an pseudo random generator PRNG.
* As the arch_get_random_seed_long() fetches 8 bytes and the calling
* function add_interrupt_randomness() counts this as 1 bit entropy the
* distribution needs to make sure there is in fact 1 bit entropy contained
* in 8 bytes of the buffer. The current values pull 32 byte entropy
* and scatter this into a 2048 byte buffer. So 8 byte in the buffer
* will contain 1 bit of entropy.
* The worker thread is rescheduled based on the charge level of the
* buffer but at least with 500 ms delay to avoid too much CPU consumption.
* So the max. amount of rng data delivered via arch_get_random_seed is
* limited to 4k bytes per second.
*/
#include <linux/kernel.h>
#include <linux/atomic.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/static_key.h>
#include <linux/workqueue.h>
#include <asm/cpacf.h>
DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0);
EXPORT_SYMBOL(s390_arch_random_counter);
#define ARCH_REFILL_TICKS (HZ/2)
#define ARCH_PRNG_SEED_SIZE 32
#define ARCH_RNG_BUF_SIZE 2048
static DEFINE_SPINLOCK(arch_rng_lock);
static u8 *arch_rng_buf;
static unsigned int arch_rng_buf_idx;
static void arch_rng_refill_buffer(struct work_struct *);
static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
{
/* max hunk is ARCH_RNG_BUF_SIZE */
if (nbytes > ARCH_RNG_BUF_SIZE)
return false;
/* lock rng buffer */
if (!spin_trylock(&arch_rng_lock))
return false;
/* try to resolve the requested amount of bytes from the buffer */
arch_rng_buf_idx -= nbytes;
if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) {
memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes);
atomic64_add(nbytes, &s390_arch_random_counter);
spin_unlock(&arch_rng_lock);
return true;
}
/* not enough bytes in rng buffer, refill is done asynchronously */
spin_unlock(&arch_rng_lock);
return false;
}
EXPORT_SYMBOL(s390_arch_random_generate);
static void arch_rng_refill_buffer(struct work_struct *unused)
{
unsigned int delay = ARCH_REFILL_TICKS;
spin_lock(&arch_rng_lock);
if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) {
/* buffer is exhausted and needs refill */
u8 seed[ARCH_PRNG_SEED_SIZE];
u8 prng_wa[240];
/* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */
cpacf_trng(NULL, 0, seed, sizeof(seed));
/* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */
memset(prng_wa, 0, sizeof(prng_wa));
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
&prng_wa, NULL, 0, seed, sizeof(seed));
cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
&prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0);
arch_rng_buf_idx = ARCH_RNG_BUF_SIZE;
}
delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE;
spin_unlock(&arch_rng_lock);
/* kick next check */
queue_delayed_work(system_long_wq, &arch_rng_work, delay);
}
static int __init s390_arch_random_init(void)
{
/* all the needed PRNO subfunctions available ? */
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) &&
cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) {
/* alloc arch random working buffer */
arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL);
if (!arch_rng_buf)
return -ENOMEM;
/* kick worker queue job to fill the random buffer */
queue_delayed_work(system_long_wq,
&arch_rng_work, ARCH_REFILL_TICKS);
/* enable arch random to the outside world */
static_branch_enable(&s390_arch_random_available);
}
return 0;
}
arch_initcall(s390_arch_random_init);

View File

@ -2,7 +2,7 @@
/*
* Kernel interface for the s390 arch_random_* functions
*
* Copyright IBM Corp. 2017
* Copyright IBM Corp. 2017, 2020
*
* Author: Harald Freudenberger <freude@de.ibm.com>
*
@ -15,34 +15,37 @@
#include <linux/static_key.h>
#include <linux/atomic.h>
#include <asm/cpacf.h>
DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
extern atomic64_t s390_arch_random_counter;
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
static inline bool arch_get_random_long(unsigned long *v)
static inline bool __must_check arch_get_random_long(unsigned long *v)
{
return false;
}
static inline bool arch_get_random_int(unsigned int *v)
static inline bool __must_check arch_get_random_int(unsigned int *v)
{
return false;
}
static inline bool arch_get_random_seed_long(unsigned long *v)
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
{
if (static_branch_likely(&s390_arch_random_available)) {
return s390_arch_random_generate((u8 *)v, sizeof(*v));
cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
atomic64_add(sizeof(*v), &s390_arch_random_counter);
return true;
}
return false;
}
static inline bool arch_get_random_seed_int(unsigned int *v)
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
{
if (static_branch_likely(&s390_arch_random_available)) {
return s390_arch_random_generate((u8 *)v, sizeof(*v));
cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
atomic64_add(sizeof(*v), &s390_arch_random_counter);
return true;
}
return false;
}

View File

@ -851,6 +851,11 @@ static void __init setup_randomness(void)
if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
memblock_free((unsigned long) vmms, PAGE_SIZE);
#ifdef CONFIG_ARCH_RANDOM
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
static_branch_enable(&s390_arch_random_available);
#endif
}
/*

View File

@ -151,6 +151,10 @@ static unsigned int xen_blkif_max_ring_order;
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
static bool __read_mostly xen_blkif_trusted = true;
module_param_named(trusted, xen_blkif_trusted, bool, 0644);
MODULE_PARM_DESC(trusted, "Is the backend trusted");
#define BLK_RING_SIZE(info) \
__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
@ -211,6 +215,7 @@ struct blkfront_info
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
unsigned int feature_persistent:1;
unsigned int bounce:1;
unsigned int discard_granularity;
unsigned int discard_alignment;
/* Number of 4KB segments handled */
@ -300,8 +305,8 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
if (!gnt_list_entry)
goto out_of_memory;
if (info->feature_persistent) {
granted_page = alloc_page(GFP_NOIO);
if (info->bounce) {
granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
if (!granted_page) {
kfree(gnt_list_entry);
goto out_of_memory;
@ -320,7 +325,7 @@ out_of_memory:
list_for_each_entry_safe(gnt_list_entry, n,
&rinfo->grants, node) {
list_del(&gnt_list_entry->node);
if (info->feature_persistent)
if (info->bounce)
__free_page(gnt_list_entry->page);
kfree(gnt_list_entry);
i--;
@ -366,7 +371,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
/* Assign a gref to this page */
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
BUG_ON(gnt_list_entry->gref == -ENOSPC);
if (info->feature_persistent)
if (info->bounce)
grant_foreign_access(gnt_list_entry, info);
else {
/* Grant access to the GFN passed by the caller */
@ -390,7 +395,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head,
/* Assign a gref to this page */
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
BUG_ON(gnt_list_entry->gref == -ENOSPC);
if (!info->feature_persistent) {
if (!info->bounce) {
struct page *indirect_page;
/* Fetch a pre-allocated page to use for indirect grefs */
@ -705,7 +710,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
.grant_idx = 0,
.segments = NULL,
.rinfo = rinfo,
.need_copy = rq_data_dir(req) && info->feature_persistent,
.need_copy = rq_data_dir(req) && info->bounce,
};
/*
@ -1026,11 +1031,12 @@ static void xlvbd_flush(struct blkfront_info *info)
{
blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
info->feature_fua ? true : false);
pr_info("blkfront: %s: %s %s %s %s %s\n",
pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
info->gd->disk_name, flush_info(info),
"persistent grants:", info->feature_persistent ?
"enabled;" : "disabled;", "indirect descriptors:",
info->max_indirect_segments ? "enabled;" : "disabled;");
info->max_indirect_segments ? "enabled;" : "disabled;",
"bounce buffer:", info->bounce ? "enabled" : "disabled;");
}
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
@ -1265,7 +1271,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
if (!list_empty(&rinfo->indirect_pages)) {
struct page *indirect_page, *n;
BUG_ON(info->feature_persistent);
BUG_ON(info->bounce);
list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
list_del(&indirect_page->lru);
__free_page(indirect_page);
@ -1282,7 +1288,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
0, 0UL);
rinfo->persistent_gnts_c--;
}
if (info->feature_persistent)
if (info->bounce)
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
}
@ -1303,7 +1309,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
for (j = 0; j < segs; j++) {
persistent_gnt = rinfo->shadow[i].grants_used[j];
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
if (info->feature_persistent)
if (info->bounce)
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
}
@ -1493,7 +1499,7 @@ static int blkif_completion(unsigned long *id,
data.s = s;
num_sg = s->num_sg;
if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
if (bret->operation == BLKIF_OP_READ && info->bounce) {
for_each_sg(s->sg, sg, num_sg, i) {
BUG_ON(sg->offset + sg->length > PAGE_SIZE);
@ -1552,7 +1558,7 @@ static int blkif_completion(unsigned long *id,
* Add the used indirect page back to the list of
* available pages for indirect grefs.
*/
if (!info->feature_persistent) {
if (!info->bounce) {
indirect_page = s->indirect_grants[i]->page;
list_add(&indirect_page->lru, &rinfo->indirect_pages);
}
@ -1744,7 +1750,7 @@ static int setup_blkring(struct xenbus_device *dev,
for (i = 0; i < info->nr_ring_pages; i++)
rinfo->ring_ref[i] = GRANT_INVALID_REF;
sring = alloc_pages_exact(ring_size, GFP_NOIO);
sring = alloc_pages_exact(ring_size, GFP_NOIO | __GFP_ZERO);
if (!sring) {
xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
return -ENOMEM;
@ -1847,6 +1853,10 @@ static int talk_to_blkback(struct xenbus_device *dev,
if (!info)
return -ENODEV;
/* Check if backend is trusted. */
info->bounce = !xen_blkif_trusted ||
!xenbus_read_unsigned(dev->nodename, "trusted", 1);
max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
"max-ring-page-order", 0);
ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
@ -2273,17 +2283,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
if (err)
goto out_of_memory;
if (!info->feature_persistent && info->max_indirect_segments) {
if (!info->bounce && info->max_indirect_segments) {
/*
* We are using indirect descriptors but not persistent
* grants, we need to allocate a set of pages that can be
* We are using indirect descriptors but don't have a bounce
* buffer, we need to allocate a set of pages that can be
* used for mapping indirect grefs
*/
int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
BUG_ON(!list_empty(&rinfo->indirect_pages));
for (i = 0; i < num; i++) {
struct page *indirect_page = alloc_page(GFP_KERNEL);
struct page *indirect_page = alloc_page(GFP_KERNEL |
__GFP_ZERO);
if (!indirect_page)
goto out_of_memory;
list_add(&indirect_page->lru, &rinfo->indirect_pages);
@ -2375,6 +2386,8 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
info->feature_persistent =
!!xenbus_read_unsigned(info->xbdev->otherend,
"feature-persistent", 0);
if (info->feature_persistent)
info->bounce = true;
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
"feature-max-indirect-segments", 0);
@ -2750,6 +2763,13 @@ static void blkfront_delay_work(struct work_struct *work)
struct blkfront_info *info;
bool need_schedule_work = false;
/*
* Note that when using bounce buffers but not persistent grants
* there's no need to run blkfront_delay_work because grants are
* revoked in blkif_completion or else an error is reported and the
* connection is closed.
*/
mutex_lock(&blkfront_mutex);
list_for_each_entry(info, &info_list, info_list) {

View File

@ -563,7 +563,7 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle)
res = platform_device_add(data->pdev);
if (res)
goto ipmi_err;
goto dev_add_err;
platform_set_drvdata(data->pdev, data);
@ -611,7 +611,9 @@ hwmon_reg_err:
ipmi_destroy_user(data->ipmi.user);
ipmi_err:
platform_set_drvdata(data->pdev, NULL);
platform_device_unregister(data->pdev);
platform_device_del(data->pdev);
dev_add_err:
platform_device_put(data->pdev);
dev_err:
ida_simple_remove(&aem_ida, data->id);
id_err:
@ -703,7 +705,7 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe,
res = platform_device_add(data->pdev);
if (res)
goto ipmi_err;
goto dev_add_err;
platform_set_drvdata(data->pdev, data);
@ -751,7 +753,9 @@ hwmon_reg_err:
ipmi_destroy_user(data->ipmi.user);
ipmi_err:
platform_set_drvdata(data->pdev, NULL);
platform_device_unregister(data->pdev);
platform_device_del(data->pdev);
dev_add_err:
platform_device_put(data->pdev);
dev_err:
ida_simple_remove(&aem_ida, data->id);
id_err:

View File

@ -407,6 +407,7 @@ struct qedr_qp {
u32 sq_psn;
u32 qkey;
u32 dest_qp_num;
u8 timeout;
/* Relevant to qps created from kernel space only (ULPs) */
u8 prev_wqe_size;

View File

@ -2376,6 +2376,8 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1 << max_t(int, attr->timeout - 8, 0);
else
qp_params.ack_timeout = 0;
qp->timeout = attr->timeout;
}
if (attr_mask & IB_QP_RETRY_CNT) {
@ -2535,7 +2537,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
rdma_ah_set_sl(&qp_attr->ah_attr, 0);
qp_attr->timeout = params.timeout;
qp_attr->timeout = qp->timeout;
qp_attr->rnr_retry = params.rnr_retry;
qp_attr->retry_cnt = params.retry_cnt;
qp_attr->min_rnr_timer = params.min_rnr_nak_timer;

View File

@ -998,12 +998,13 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
static int validate_raid_redundancy(struct raid_set *rs)
{
unsigned int i, rebuild_cnt = 0;
unsigned int rebuilds_per_group = 0, copies;
unsigned int rebuilds_per_group = 0, copies, raid_disks;
unsigned int group_size, last_group_start;
for (i = 0; i < rs->md.raid_disks; i++)
if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
!rs->dev[i].rdev.sb_page)
for (i = 0; i < rs->raid_disks; i++)
if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
!rs->dev[i].rdev.sb_page)))
rebuild_cnt++;
switch (rs->md.level) {
@ -1043,8 +1044,9 @@ static int validate_raid_redundancy(struct raid_set *rs)
* A A B B C
* C D D E E
*/
raid_disks = min(rs->raid_disks, rs->md.raid_disks);
if (__is_raid10_near(rs->md.new_layout)) {
for (i = 0; i < rs->md.raid_disks; i++) {
for (i = 0; i < raid_disks; i++) {
if (!(i % copies))
rebuilds_per_group = 0;
if ((!rs->dev[i].rdev.sb_page ||
@ -1067,10 +1069,10 @@ static int validate_raid_redundancy(struct raid_set *rs)
* results in the need to treat the last (potentially larger)
* set differently.
*/
group_size = (rs->md.raid_disks / copies);
last_group_start = (rs->md.raid_disks / group_size) - 1;
group_size = (raid_disks / copies);
last_group_start = (raid_disks / group_size) - 1;
last_group_start *= group_size;
for (i = 0; i < rs->md.raid_disks; i++) {
for (i = 0; i < raid_disks; i++) {
if (!(i % copies) && !(i > last_group_start))
rebuilds_per_group = 0;
if ((!rs->dev[i].rdev.sb_page ||
@ -1585,7 +1587,7 @@ static sector_t __rdev_sectors(struct raid_set *rs)
{
int i;
for (i = 0; i < rs->md.raid_disks; i++) {
for (i = 0; i < rs->raid_disks; i++) {
struct md_rdev *rdev = &rs->dev[i].rdev;
if (!test_bit(Journal, &rdev->flags) &&
@ -3751,13 +3753,13 @@ static int raid_iterate_devices(struct dm_target *ti,
unsigned int i;
int r = 0;
for (i = 0; !r && i < rs->md.raid_disks; i++)
if (rs->dev[i].data_dev)
r = fn(ti,
rs->dev[i].data_dev,
0, /* No offset on data devs */
rs->md.dev_sectors,
data);
for (i = 0; !r && i < rs->raid_disks; i++) {
if (rs->dev[i].data_dev) {
r = fn(ti, rs->dev[i].data_dev,
0, /* No offset on data devs */
rs->md.dev_sectors, data);
}
}
return r;
}

View File

@ -7716,6 +7716,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
*/
if (rdev->saved_raid_disk >= 0 &&
rdev->saved_raid_disk >= first &&
rdev->saved_raid_disk <= last &&
conf->disks[rdev->saved_raid_disk].rdev == NULL)
first = rdev->saved_raid_disk;

View File

@ -2199,7 +2199,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
temp_aggregator->num_of_ports--;
if (__agg_active_ports(temp_aggregator) == 0) {
select_new_active_agg = temp_aggregator->is_active;
ad_clear_agg(temp_aggregator);
if (temp_aggregator->num_of_ports == 0)
ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
netdev_info(bond->dev, "Removing an active aggregator\n");
/* select new active aggregator */

View File

@ -1292,12 +1292,12 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
return res;
if (rlb_enabled) {
bond->alb_info.rlb_enabled = 1;
res = rlb_initialize(bond);
if (res) {
tlb_deinitialize(bond);
return res;
}
bond->alb_info.rlb_enabled = 1;
} else {
bond->alb_info.rlb_enabled = 0;
}

View File

@ -727,13 +727,21 @@ static int cfv_probe(struct virtio_device *vdev)
/* Carrier is off until netdevice is opened */
netif_carrier_off(netdev);
/* serialize netdev register + virtio_device_ready() with ndo_open() */
rtnl_lock();
/* register Netdev */
err = register_netdev(netdev);
err = register_netdevice(netdev);
if (err) {
rtnl_unlock();
dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
goto err;
}
virtio_device_ready(vdev);
rtnl_unlock();
debugfs_init(cfv);
return 0;

View File

@ -599,6 +599,11 @@ force_link:
reg |= LINK_STS;
if (state->duplex == DUPLEX_FULL)
reg |= DUPLX_MODE;
if (state->pause & MLO_PAUSE_TXRX_MASK) {
if (state->pause & MLO_PAUSE_TX)
reg |= TXFLOW_CNTL;
reg |= RXFLOW_CNTL;
}
core_writel(priv, reg, offset);
}

View File

@ -333,6 +333,12 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
}
}
static void tun_napi_enable(struct tun_file *tfile)
{
if (tfile->napi_enabled)
napi_enable(&tfile->napi);
}
static void tun_napi_disable(struct tun_file *tfile)
{
if (tfile->napi_enabled)
@ -704,7 +710,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun = rtnl_dereference(tfile->tun);
if (tun && clean) {
tun_napi_disable(tfile);
if (!tfile->detached)
tun_napi_disable(tfile);
tun_napi_del(tfile);
}
@ -723,8 +730,10 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
if (clean) {
RCU_INIT_POINTER(tfile->tun, NULL);
sock_put(&tfile->sk);
} else
} else {
tun_disable_queue(tun, tfile);
tun_napi_disable(tfile);
}
synchronize_net();
tun_flow_delete_by_queue(tun, tun->numqueues + 1);
@ -797,6 +806,7 @@ static void tun_detach_all(struct net_device *dev)
sock_put(&tfile->sk);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
tun_napi_del(tfile);
tun_enable_queue(tfile);
tun_queue_purge(tfile);
xdp_rxq_info_unreg(&tfile->xdp_rxq);
@ -877,6 +887,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
if (tfile->detached) {
tun_enable_queue(tfile);
tun_napi_enable(tfile);
} else {
sock_hold(&tfile->sk);
tun_napi_init(tun, tfile, napi, napi_frags);

View File

@ -1377,6 +1377,42 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
* are bundled into this buffer and where we can find an array of
* per-packet metadata (which contains elements encoded into u16).
*/
/* SKB contents for current firmware:
* <packet 1> <padding>
* ...
* <packet N> <padding>
* <per-packet metadata entry 1> <dummy header>
* ...
* <per-packet metadata entry N> <dummy header>
* <padding2> <rx_hdr>
*
* where:
* <packet N> contains pkt_len bytes:
* 2 bytes of IP alignment pseudo header
* packet received
* <per-packet metadata entry N> contains 4 bytes:
* pkt_len and fields AX_RXHDR_*
* <padding> 0-7 bytes to terminate at
* 8 bytes boundary (64-bit).
* <padding2> 4 bytes to make rx_hdr terminate at
* 8 bytes boundary (64-bit)
* <dummy-header> contains 4 bytes:
* pkt_len=0 and AX_RXHDR_DROP_ERR
* <rx-hdr> contains 4 bytes:
* pkt_cnt and hdr_off (offset of
* <per-packet metadata entry 1>)
*
* pkt_cnt is number of entrys in the per-packet metadata.
* In current firmware there is 2 entrys per packet.
* The first points to the packet and the
* second is a dummy header.
* This was done probably to align fields in 64-bit and
* maintain compatibility with old firmware.
* This code assumes that <dummy header> and <padding2> are
* optional.
*/
if (skb->len < 4)
return 0;
skb_trim(skb, skb->len - 4);
@ -1391,51 +1427,66 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* Make sure that the bounds of the metadata array are inside the SKB
* (and in front of the counter at the end).
*/
if (pkt_cnt * 2 + hdr_off > skb->len)
if (pkt_cnt * 4 + hdr_off > skb->len)
return 0;
pkt_hdr = (u32 *)(skb->data + hdr_off);
/* Packets must not overlap the metadata array */
skb_trim(skb, hdr_off);
for (; ; pkt_cnt--, pkt_hdr++) {
for (; pkt_cnt > 0; pkt_cnt--, pkt_hdr++) {
u16 pkt_len_plus_padd;
u16 pkt_len;
le32_to_cpus(pkt_hdr);
pkt_len = (*pkt_hdr >> 16) & 0x1fff;
pkt_len_plus_padd = (pkt_len + 7) & 0xfff8;
if (pkt_len > skb->len)
/* Skip dummy header used for alignment
*/
if (pkt_len == 0)
continue;
if (pkt_len_plus_padd > skb->len)
return 0;
/* Check CRC or runt packet */
if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) &&
pkt_len >= 2 + ETH_HLEN) {
bool last = (pkt_cnt == 0);
if (last) {
ax_skb = skb;
} else {
ax_skb = skb_clone(skb, GFP_ATOMIC);
if (!ax_skb)
return 0;
}
ax_skb->len = pkt_len;
/* Skip IP alignment pseudo header */
skb_pull(ax_skb, 2);
skb_set_tail_pointer(ax_skb, ax_skb->len);
ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
ax88179_rx_checksum(ax_skb, pkt_hdr);
if (last)
return 1;
usbnet_skb_return(dev, ax_skb);
if ((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) ||
pkt_len < 2 + ETH_HLEN) {
dev->net->stats.rx_errors++;
skb_pull(skb, pkt_len_plus_padd);
continue;
}
/* Trim this packet away from the SKB */
if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8))
/* last packet */
if (pkt_len_plus_padd == skb->len) {
skb_trim(skb, pkt_len);
/* Skip IP alignment pseudo header */
skb_pull(skb, 2);
skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
ax88179_rx_checksum(skb, pkt_hdr);
return 1;
}
ax_skb = skb_clone(skb, GFP_ATOMIC);
if (!ax_skb)
return 0;
skb_trim(ax_skb, pkt_len);
/* Skip IP alignment pseudo header */
skb_pull(ax_skb, 2);
skb->truesize = pkt_len_plus_padd +
SKB_DATA_ALIGN(sizeof(struct sk_buff));
ax88179_rx_checksum(ax_skb, pkt_hdr);
usbnet_skb_return(dev, ax_skb);
skb_pull(skb, pkt_len_plus_padd);
}
return 0;
}
static struct sk_buff *

View File

@ -1318,6 +1318,8 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */

View File

@ -2005,7 +2005,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
cmd, reqtype, value, index, size);
if (size) {
buf = kmalloc(size, GFP_KERNEL);
buf = kmalloc(size, GFP_NOIO);
if (!buf)
goto out;
}
@ -2037,7 +2037,7 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
cmd, reqtype, value, index, size);
if (data) {
buf = kmemdup(data, size, GFP_KERNEL);
buf = kmemdup(data, size, GFP_NOIO);
if (!buf)
goto out;
} else {

View File

@ -3105,14 +3105,20 @@ static int virtnet_probe(struct virtio_device *vdev)
}
}
err = register_netdev(dev);
/* serialize netdev register + virtio_device_ready() with ndo_open() */
rtnl_lock();
err = register_netdevice(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
rtnl_unlock();
goto free_failover;
}
virtio_device_ready(vdev);
rtnl_unlock();
err = virtnet_cpu_notif_add(vi);
if (err) {
pr_debug("virtio_net: registering cpu notifier failed\n");

View File

@ -63,6 +63,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
"Maximum number of queues per virtual interface");
static bool __read_mostly xennet_trusted = true;
module_param_named(trusted, xennet_trusted, bool, 0644);
MODULE_PARM_DESC(trusted, "Is the backend trusted");
#define XENNET_TIMEOUT (5 * HZ)
static const struct ethtool_ops xennet_ethtool_ops;
@ -163,6 +167,9 @@ struct netfront_info {
/* Is device behaving sane? */
bool broken;
/* Should skbs be bounced into a zeroed buffer? */
bool bounce;
atomic_t rx_gso_checksum_fixup;
};
@ -261,7 +268,7 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
if (unlikely(!skb))
return NULL;
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
if (!page) {
kfree_skb(skb);
return NULL;
@ -593,6 +600,34 @@ static void xennet_mark_tx_pending(struct netfront_queue *queue)
queue->tx_link[i] = TX_PENDING;
}
struct sk_buff *bounce_skb(const struct sk_buff *skb)
{
unsigned int headerlen = skb_headroom(skb);
/* Align size to allocate full pages and avoid contiguous data leaks */
unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
XEN_PAGE_SIZE);
struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
if (!n)
return NULL;
if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
WARN_ONCE(1, "misaligned skb allocated\n");
kfree_skb(n);
return NULL;
}
/* Set the data pointer */
skb_reserve(n, headerlen);
/* Set the tail pointer and length */
skb_put(n, skb->len);
BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
skb_copy_header(n, skb);
return n;
}
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
@ -645,9 +680,13 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
/* The first req should be at least ETH_HLEN size or the packet will be
* dropped by netback.
*
* If the backend is not trusted bounce all data to zeroed pages to
* avoid exposing contiguous data on the granted page not belonging to
* the skb.
*/
if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
nskb = skb_copy(skb, GFP_ATOMIC);
if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
nskb = bounce_skb(skb);
if (!nskb)
goto drop;
dev_consume_skb_any(skb);
@ -1953,6 +1992,10 @@ static int talk_to_netback(struct xenbus_device *dev,
info->netdev->irq = 0;
/* Check if backend is trusted. */
info->bounce = !xennet_trusted ||
!xenbus_read_unsigned(dev->nodename, "trusted", 1);
/* Check if backend supports multiple queues */
max_queues = xenbus_read_unsigned(info->xbdev->otherend,
"multi-queue-max-queues", 1);
@ -2106,6 +2149,9 @@ static int xennet_connect(struct net_device *dev)
err = talk_to_netback(np->xbdev, np);
if (err)
return err;
if (np->bounce)
dev_info(&np->xbdev->dev,
"bouncing transmitted data to zeroed pages\n");
/* talk_to_netback() sets the correct number of queues */
num_queues = dev->real_num_tx_queues;

View File

@ -186,9 +186,9 @@ static int nfcmrvl_i2c_parse_dt(struct device_node *node,
pdata->irq_polarity = IRQF_TRIGGER_RISING;
ret = irq_of_parse_and_map(node, 0);
if (ret < 0) {
pr_err("Unable to get irq, error: %d\n", ret);
return ret;
if (!ret) {
pr_err("Unable to get irq\n");
return -EINVAL;
}
pdata->irq = ret;

View File

@ -129,9 +129,9 @@ static int nfcmrvl_spi_parse_dt(struct device_node *node,
}
ret = irq_of_parse_and_map(node, 0);
if (ret < 0) {
pr_err("Unable to get irq, error: %d\n", ret);
return ret;
if (!ret) {
pr_err("Unable to get irq\n");
return -EINVAL;
}
pdata->irq = ret;

View File

@ -178,6 +178,9 @@ static int nxp_nci_i2c_nci_read(struct nxp_nci_i2c_phy *phy,
skb_put_data(*skb, (void *)&header, NCI_CTRL_HDR_SIZE);
if (!header.plen)
return 0;
r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen);
if (r != header.plen) {
nfc_err(&client->dev,

View File

@ -196,8 +196,8 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
/* make sure we are in the region */
if (ctx->phys < nd_region->ndr_start
|| (ctx->phys + ctx->cleared) > ndr_end)
if (ctx->phys < nd_region->ndr_start ||
(ctx->phys + ctx->cleared - 1) > ndr_end)
return 0;
sector = (ctx->phys - nd_region->ndr_start) / 512;

View File

@ -15,6 +15,8 @@
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/types.h>
#include <xen/interface/event_channel.h>
#include <xen/grant_table.h>
struct gntdev_dmabuf_priv;
@ -61,6 +63,7 @@ struct gntdev_grant_map {
struct gnttab_unmap_grant_ref *unmap_ops;
struct gnttab_map_grant_ref *kmap_ops;
struct gnttab_unmap_grant_ref *kunmap_ops;
bool *being_removed;
struct page **pages;
unsigned long pages_vm_start;
@ -78,6 +81,11 @@ struct gntdev_grant_map {
/* Needed to avoid allocation in gnttab_dma_free_pages(). */
xen_pfn_t *frames;
#endif
/* Number of live grants */
atomic_t live_grants;
/* Needed to avoid allocation in __unmap_grant_pages */
struct gntab_unmap_queue_data unmap_data;
};
struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,

View File

@ -64,11 +64,12 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
static atomic_t pages_mapped = ATOMIC_INIT(0);
/* True in PV mode, false otherwise */
static int use_ptemod;
#define populate_freeable_maps use_ptemod
static int unmap_grant_pages(struct gntdev_grant_map *map,
int offset, int pages);
static void unmap_grant_pages(struct gntdev_grant_map *map,
int offset, int pages);
static struct miscdevice gntdev_miscdev;
@ -125,6 +126,7 @@ static void gntdev_free_map(struct gntdev_grant_map *map)
kfree(map->unmap_ops);
kfree(map->kmap_ops);
kfree(map->kunmap_ops);
kfree(map->being_removed);
kfree(map);
}
@ -144,12 +146,15 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
add->being_removed =
kcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
NULL == add->kmap_ops ||
NULL == add->kunmap_ops ||
NULL == add->pages)
NULL == add->pages ||
NULL == add->being_removed)
goto err;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
@ -245,6 +250,35 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
return;
atomic_sub(map->count, &pages_mapped);
if (map->pages && !use_ptemod) {
/*
* Increment the reference count. This ensures that the
* subsequent call to unmap_grant_pages() will not wind up
* re-entering itself. It *can* wind up calling
* gntdev_put_map() recursively, but such calls will be with a
* reference count greater than 1, so they will return before
* this code is reached. The recursion depth is thus limited to
* 1. Do NOT use refcount_inc() here, as it will detect that
* the reference count is zero and WARN().
*/
refcount_set(&map->users, 1);
/*
* Unmap the grants. This may or may not be asynchronous, so it
* is possible that the reference count is 1 on return, but it
* could also be greater than 1.
*/
unmap_grant_pages(map, 0, map->count);
/* Check if the memory now needs to be freed */
if (!refcount_dec_and_test(&map->users))
return;
/*
* All pages have been returned to the hypervisor, so free the
* map.
*/
}
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
@ -302,6 +336,7 @@ static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token,
int gntdev_map_grant_pages(struct gntdev_grant_map *map)
{
size_t alloced = 0;
int i, err = 0;
if (!use_ptemod) {
@ -350,87 +385,109 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
map->pages, map->count);
for (i = 0; i < map->count; i++) {
if (map->map_ops[i].status == GNTST_okay)
if (map->map_ops[i].status == GNTST_okay) {
map->unmap_ops[i].handle = map->map_ops[i].handle;
else if (!err)
if (!use_ptemod)
alloced++;
} else if (!err)
err = -EINVAL;
if (map->flags & GNTMAP_device_map)
map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
if (use_ptemod) {
if (map->kmap_ops[i].status == GNTST_okay)
if (map->kmap_ops[i].status == GNTST_okay) {
if (map->map_ops[i].status == GNTST_okay)
alloced++;
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
else if (!err)
} else if (!err)
err = -EINVAL;
}
}
atomic_add(alloced, &map->live_grants);
return err;
}
static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
int pages)
static void __unmap_grant_pages_done(int result,
struct gntab_unmap_queue_data *data)
{
int i, err = 0;
struct gntab_unmap_queue_data unmap_data;
unsigned int i;
struct gntdev_grant_map *map = data->data;
unsigned int offset = data->unmap_ops - map->unmap_ops;
if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
int pgno = (map->notify.addr >> PAGE_SHIFT);
if (pgno >= offset && pgno < offset + pages) {
/* No need for kmap, pages are in lowmem */
uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
}
}
unmap_data.unmap_ops = map->unmap_ops + offset;
unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
unmap_data.pages = map->pages + offset;
unmap_data.count = pages;
err = gnttab_unmap_refs_sync(&unmap_data);
if (err)
return err;
for (i = 0; i < pages; i++) {
if (map->unmap_ops[offset+i].status)
err = -EINVAL;
for (i = 0; i < data->count; i++) {
WARN_ON(map->unmap_ops[offset+i].status);
pr_debug("unmap handle=%d st=%d\n",
map->unmap_ops[offset+i].handle,
map->unmap_ops[offset+i].status);
map->unmap_ops[offset+i].handle = -1;
}
return err;
/*
* Decrease the live-grant counter. This must happen after the loop to
* prevent premature reuse of the grants by gnttab_mmap().
*/
atomic_sub(data->count, &map->live_grants);
/* Release reference taken by __unmap_grant_pages */
gntdev_put_map(NULL, map);
}
static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
int pages)
static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
int pages)
{
int range, err = 0;
if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
int pgno = (map->notify.addr >> PAGE_SHIFT);
if (pgno >= offset && pgno < offset + pages) {
/* No need for kmap, pages are in lowmem */
uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
}
}
map->unmap_data.unmap_ops = map->unmap_ops + offset;
map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
map->unmap_data.pages = map->pages + offset;
map->unmap_data.count = pages;
map->unmap_data.done = __unmap_grant_pages_done;
map->unmap_data.data = map;
refcount_inc(&map->users); /* to keep map alive during async call below */
gnttab_unmap_refs_async(&map->unmap_data);
}
static void unmap_grant_pages(struct gntdev_grant_map *map, int offset,
int pages)
{
int range;
if (atomic_read(&map->live_grants) == 0)
return; /* Nothing to do */
pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
/* It is possible the requested range will have a "hole" where we
* already unmapped some of the grants. Only unmap valid ranges.
*/
while (pages && !err) {
while (pages && map->unmap_ops[offset].handle == -1) {
while (pages) {
while (pages && map->being_removed[offset]) {
offset++;
pages--;
}
range = 0;
while (range < pages) {
if (map->unmap_ops[offset+range].handle == -1)
if (map->being_removed[offset + range])
break;
map->being_removed[offset + range] = true;
range++;
}
err = __unmap_grant_pages(map, offset, range);
if (range)
__unmap_grant_pages(map, offset, range);
offset += range;
pages -= range;
}
return err;
}
/* ------------------------------------------------------------------ */
@ -500,7 +557,6 @@ static int unmap_if_in_range(struct gntdev_grant_map *map,
bool blockable)
{
unsigned long mstart, mend;
int err;
if (!in_range(map, start, end))
return 0;
@ -514,10 +570,9 @@ static int unmap_if_in_range(struct gntdev_grant_map *map,
map->index, map->count,
map->vma->vm_start, map->vma->vm_end,
start, end, mstart, mend);
err = unmap_grant_pages(map,
unmap_grant_pages(map,
(mstart - map->vma->vm_start) >> PAGE_SHIFT,
(mend - mstart) >> PAGE_SHIFT);
WARN_ON(err);
return 0;
}
@ -558,7 +613,6 @@ static void mn_release(struct mmu_notifier *mn,
{
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
struct gntdev_grant_map *map;
int err;
mutex_lock(&priv->lock);
list_for_each_entry(map, &priv->maps, next) {
@ -567,8 +621,7 @@ static void mn_release(struct mmu_notifier *mn,
pr_debug("map %d+%d (%lx %lx)\n",
map->index, map->count,
map->vma->vm_start, map->vma->vm_end);
err = unmap_grant_pages(map, /* offset */ 0, map->count);
WARN_ON(err);
unmap_grant_pages(map, /* offset */ 0, map->count);
}
list_for_each_entry(map, &priv->freeable_maps, next) {
if (!map->vma)
@ -576,8 +629,7 @@ static void mn_release(struct mmu_notifier *mn,
pr_debug("map %d+%d (%lx %lx)\n",
map->index, map->count,
map->vma->vm_start, map->vma->vm_end);
err = unmap_grant_pages(map, /* offset */ 0, map->count);
WARN_ON(err);
unmap_grant_pages(map, /* offset */ 0, map->count);
}
mutex_unlock(&priv->lock);
}
@ -1113,6 +1165,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
goto unlock_out;
}
if (atomic_read(&map->live_grants)) {
err = -EAGAIN;
goto unlock_out;
}
refcount_inc(&map->users);
vma->vm_ops = &gntdev_vmops;

View File

@ -415,7 +415,6 @@ int __net_init seg6_hmac_net_init(struct net *net)
return 0;
}
EXPORT_SYMBOL(seg6_hmac_net_init);
void seg6_hmac_exit(void)
{

View File

@ -314,9 +314,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
kcalloc(cmax, sizeof(*kp), GFP_KERNEL | __GFP_NOWARN) :
NULL;
rcu_read_lock();
ca = t->prl_count < cmax ? t->prl_count : cmax;
ca = min(t->prl_count, cmax);
if (!kp) {
/* We don't try hard to allocate much memory for
@ -331,7 +329,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
}
}
c = 0;
rcu_read_lock();
for_each_prl_rcu(t->prl) {
if (c >= cmax)
break;
@ -343,7 +341,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
if (kprl.addr != htonl(INADDR_ANY))
break;
}
out:
rcu_read_unlock();
len = sizeof(*kp) * c;
@ -352,7 +350,7 @@ out:
ret = -EFAULT;
kfree(kp);
out:
return ret;
}

View File

@ -145,6 +145,7 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key,
/* Another cpu may race to insert the element with the same key */
if (prev) {
nft_set_elem_destroy(set, he, true);
atomic_dec(&set->nelems);
he = prev;
}
@ -154,6 +155,7 @@ out:
err2:
nft_set_elem_destroy(set, he, true);
atomic_dec(&set->nelems);
err1:
return false;
}

View File

@ -34,89 +34,89 @@ static void rose_idletimer_expiry(struct timer_list *);
void rose_start_heartbeat(struct sock *sk)
{
del_timer(&sk->sk_timer);
sk_stop_timer(sk, &sk->sk_timer);
sk->sk_timer.function = rose_heartbeat_expiry;
sk->sk_timer.expires = jiffies + 5 * HZ;
add_timer(&sk->sk_timer);
sk_reset_timer(sk, &sk->sk_timer, sk->sk_timer.expires);
}
void rose_start_t1timer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
del_timer(&rose->timer);
sk_stop_timer(sk, &rose->timer);
rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->t1;
add_timer(&rose->timer);
sk_reset_timer(sk, &rose->timer, rose->timer.expires);
}
void rose_start_t2timer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
del_timer(&rose->timer);
sk_stop_timer(sk, &rose->timer);
rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->t2;
add_timer(&rose->timer);
sk_reset_timer(sk, &rose->timer, rose->timer.expires);
}
void rose_start_t3timer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
del_timer(&rose->timer);
sk_stop_timer(sk, &rose->timer);
rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->t3;
add_timer(&rose->timer);
sk_reset_timer(sk, &rose->timer, rose->timer.expires);
}
void rose_start_hbtimer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
del_timer(&rose->timer);
sk_stop_timer(sk, &rose->timer);
rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->hb;
add_timer(&rose->timer);
sk_reset_timer(sk, &rose->timer, rose->timer.expires);
}
void rose_start_idletimer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
del_timer(&rose->idletimer);
sk_stop_timer(sk, &rose->idletimer);
if (rose->idle > 0) {
rose->idletimer.function = rose_idletimer_expiry;
rose->idletimer.expires = jiffies + rose->idle;
add_timer(&rose->idletimer);
sk_reset_timer(sk, &rose->idletimer, rose->idletimer.expires);
}
}
void rose_stop_heartbeat(struct sock *sk)
{
del_timer(&sk->sk_timer);
sk_stop_timer(sk, &sk->sk_timer);
}
void rose_stop_timer(struct sock *sk)
{
del_timer(&rose_sk(sk)->timer);
sk_stop_timer(sk, &rose_sk(sk)->timer);
}
void rose_stop_idletimer(struct sock *sk)
{
del_timer(&rose_sk(sk)->idletimer);
sk_stop_timer(sk, &rose_sk(sk)->idletimer);
}
static void rose_heartbeat_expiry(struct timer_list *t)
@ -133,6 +133,7 @@ static void rose_heartbeat_expiry(struct timer_list *t)
(sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
bh_unlock_sock(sk);
rose_destroy_socket(sk);
sock_put(sk);
return;
}
break;
@ -155,6 +156,7 @@ static void rose_heartbeat_expiry(struct timer_list *t)
rose_start_heartbeat(sk);
bh_unlock_sock(sk);
sock_put(sk);
}
static void rose_timer_expiry(struct timer_list *t)
@ -184,6 +186,7 @@ static void rose_timer_expiry(struct timer_list *t)
break;
}
bh_unlock_sock(sk);
sock_put(sk);
}
static void rose_idletimer_expiry(struct timer_list *t)
@ -208,4 +211,5 @@ static void rose_idletimer_expiry(struct timer_list *t)
sock_set_flag(sk, SOCK_DEAD);
}
bh_unlock_sock(sk);
sock_put(sk);
}

View File

@ -544,7 +544,7 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
*/
xdr->p = (void *)p + frag2bytes;
space_left = xdr->buf->buflen - xdr->buf->len;
if (space_left - nbytes >= PAGE_SIZE)
if (space_left - frag1bytes >= PAGE_SIZE)
xdr->end = (void *)p + PAGE_SIZE;
else
xdr->end = (void *)p + space_left - frag1bytes;

View File

@ -57,7 +57,7 @@ run_all() {
run_udp "${ipv4_args}"
echo "ipv6"
run_tcp "${ipv4_args}"
run_tcp "${ipv6_args}"
run_udp "${ipv6_args}"
}