Re: Linux 5.4.201
From: Greg Kroah-Hartman
Date: Sat Jun 25 2022 - 09:19:38 EST
diff --git a/Documentation/hwmon/hwmon-kernel-api.rst b/Documentation/hwmon/hwmon-kernel-api.rst
index 23f27fe78e37..c41eb6108103 100644
--- a/Documentation/hwmon/hwmon-kernel-api.rst
+++ b/Documentation/hwmon/hwmon-kernel-api.rst
@@ -72,7 +72,7 @@ hwmon_device_register_with_info is the most comprehensive and preferred means
to register a hardware monitoring device. It creates the standard sysfs
attributes in the hardware monitoring core, letting the driver focus on reading
from and writing to the chip instead of having to bother with sysfs attributes.
-The parent device parameter as well as the chip parameter must not be NULL. Its
+The parent device parameter cannot be NULL with non-NULL chip info. Its
parameters are described in more detail below.
devm_hwmon_device_register_with_info is similar to
diff --git a/Makefile b/Makefile
index 32da9117e9d7..75be5870cc55 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
-SUBLEVEL = 200
+SUBLEVEL = 201
EXTRAVERSION =
NAME = Kleptomaniac Octopus
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index db767b072601..7b054c67acd8 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -228,8 +228,6 @@ ENDPIPROC(__dma_flush_area)
* - dir - DMA direction
*/
ENTRY(__dma_map_area)
- cmp w2, #DMA_FROM_DEVICE
- b.eq __dma_inv_area
b __dma_clean_area
ENDPIPROC(__dma_map_area)
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 4438c00acb65..28ca07360e97 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -716,7 +716,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
ptev = pte_val(*ptep);
if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
- page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
+ page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
pgste_set_unlock(ptep, pgste);
preempt_enable();
}
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index c73b93b9bb87..a2175394cd25 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -715,12 +715,11 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
/**
* hwmon_device_register_with_info - register w/ hwmon
- * @dev: the parent device (mandatory)
- * @name: hwmon name attribute (mandatory)
- * @drvdata: driver data to attach to created device (optional)
- * @chip: pointer to hwmon chip information (mandatory)
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @chip: pointer to hwmon chip information
* @extra_groups: pointer to list of additional non-standard attribute groups
- * (optional)
*
* hwmon_device_unregister() must be called when the device is no
* longer needed.
@@ -733,10 +732,13 @@ hwmon_device_register_with_info(struct device *dev, const char *name,
const struct hwmon_chip_info *chip,
const struct attribute_group **extra_groups)
{
- if (!dev || !name || !chip)
+ if (!name)
+ return ERR_PTR(-EINVAL);
+
+ if (chip && (!chip->ops || !chip->ops->is_visible || !chip->info))
return ERR_PTR(-EINVAL);
- if (!chip->ops || !chip->ops->is_visible || !chip->info)
+ if (chip && !dev)
return ERR_PTR(-EINVAL);
return __hwmon_device_register(dev, name, drvdata, chip, extra_groups);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 06b382304d92..81bc36a43b32 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -872,8 +872,7 @@ EXPORT_SYMBOL(dm_consume_args);
static bool __table_type_bio_based(enum dm_queue_mode table_type)
{
return (table_type == DM_TYPE_BIO_BASED ||
- table_type == DM_TYPE_DAX_BIO_BASED ||
- table_type == DM_TYPE_NVME_BIO_BASED);
+ table_type == DM_TYPE_DAX_BIO_BASED);
}
static bool __table_type_request_based(enum dm_queue_mode table_type)
@@ -929,8 +928,6 @@ bool dm_table_supports_dax(struct dm_table *t,
return true;
}
-static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
-
static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -960,7 +957,6 @@ static int dm_table_determine_type(struct dm_table *t)
goto verify_bio_based;
}
BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
- BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
goto verify_rq_based;
}
@@ -999,15 +995,6 @@ static int dm_table_determine_type(struct dm_table *t)
if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) ||
(list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
t->type = DM_TYPE_DAX_BIO_BASED;
- } else {
- /* Check if upgrading to NVMe bio-based is valid or required */
- tgt = dm_table_get_immutable_target(t);
- if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
- t->type = DM_TYPE_NVME_BIO_BASED;
- goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
- } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
- t->type = DM_TYPE_NVME_BIO_BASED;
- }
}
return 0;
}
@@ -1024,8 +1011,7 @@ static int dm_table_determine_type(struct dm_table *t)
* (e.g. request completion process for partial completion.)
*/
if (t->num_targets > 1) {
- DMERR("%s DM doesn't support multiple targets",
- t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
+ DMERR("request-based DM doesn't support multiple targets");
return -EINVAL;
}
@@ -1714,20 +1700,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
return q && !blk_queue_add_random(q);
}
-static int device_is_partial_completion(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- char b[BDEVNAME_SIZE];
-
- /* For now, NVMe devices are the only devices of this class */
- return (strncmp(bdevname(dev->bdev, b), "nvme", 4) != 0);
-}
-
-static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
-{
- return !dm_table_any_dev_attr(t, device_is_partial_completion, NULL);
-}
-
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 37b8bb4d80f0..77e28f77c59f 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1000,7 +1000,7 @@ static void clone_endio(struct bio *bio)
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
- if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
+ if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD &&
!bio->bi_disk->queue->limits.max_discard_sectors)
disable_discard(md);
@@ -1325,7 +1325,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
sector = clone->bi_iter.bi_sector;
if (unlikely(swap_bios_limit(ti, clone))) {
- struct mapped_device *md = io->md;
int latch = get_swap_bios();
if (unlikely(latch != md->swap_bios))
__set_swap_bios_limit(md, latch);
@@ -1340,24 +1339,17 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(clone->bi_disk->queue, clone,
bio_dev(io->orig_bio), sector);
- if (md->type == DM_TYPE_NVME_BIO_BASED)
- ret = direct_make_request(clone);
- else
- ret = generic_make_request(clone);
+ ret = generic_make_request(clone);
break;
case DM_MAPIO_KILL:
- if (unlikely(swap_bios_limit(ti, clone))) {
- struct mapped_device *md = io->md;
+ if (unlikely(swap_bios_limit(ti, clone)))
up(&md->swap_bios_semaphore);
- }
free_tio(tio);
dec_pending(io, BLK_STS_IOERR);
break;
case DM_MAPIO_REQUEUE:
- if (unlikely(swap_bios_limit(ti, clone))) {
- struct mapped_device *md = io->md;
+ if (unlikely(swap_bios_limit(ti, clone)))
up(&md->swap_bios_semaphore);
- }
free_tio(tio);
dec_pending(io, BLK_STS_DM_REQUEUE);
break;
@@ -1732,51 +1724,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
return ret;
}
-/*
- * Optimized variant of __split_and_process_bio that leverages the
- * fact that targets that use it do _not_ have a need to split bios.
- */
-static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
- struct bio *bio, struct dm_target *ti)
-{
- struct clone_info ci;
- blk_qc_t ret = BLK_QC_T_NONE;
- int error = 0;
-
- init_clone_info(&ci, md, map, bio);
-
- if (bio->bi_opf & REQ_PREFLUSH) {
- struct bio flush_bio;
-
- /*
- * Use an on-stack bio for this, it's safe since we don't
- * need to reference it after submit. It's just used as
- * the basis for the clone(s).
- */
- bio_init(&flush_bio, NULL, 0);
- flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
- ci.bio = &flush_bio;
- ci.sector_count = 0;
- error = __send_empty_flush(&ci);
- bio_uninit(ci.bio);
- /* dec_pending submits any data associated with flush */
- } else {
- struct dm_target_io *tio;
-
- ci.bio = bio;
- ci.sector_count = bio_sectors(bio);
- if (__process_abnormal_io(&ci, ti, &error))
- goto out;
-
- tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
- ret = __clone_and_map_simple_bio(&ci, tio, NULL);
- }
-out:
- /* drop the extra reference count */
- dec_pending(ci.io, errno_to_blk_status(error));
- return ret;
-}
-
static blk_qc_t dm_process_bio(struct mapped_device *md,
struct dm_table *map, struct bio *bio)
{
@@ -1807,8 +1754,6 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
/* regular IO is split by __split_and_process_bio */
}
- if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
- return __process_bio(md, map, bio, ti);
return __split_and_process_bio(md, map, bio);
}
@@ -2200,12 +2145,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
if (request_based)
dm_stop_queue(q);
- if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
+ if (request_based) {
/*
- * Leverage the fact that request-based DM targets and
- * NVMe bio based targets are immutable singletons
- * - used to optimize both dm_request_fn and dm_mq_queue_rq;
- * and __process_bio.
+ * Leverage the fact that request-based DM targets are
+ * immutable singletons - used to optimize dm_mq_queue_rq.
*/
md->immutable_target = dm_table_get_immutable_target(t);
}
@@ -2334,7 +2277,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
break;
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
- case DM_TYPE_NVME_BIO_BASED:
dm_init_congested_fn(md);
break;
case DM_TYPE_NONE:
@@ -3070,7 +3012,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
switch (type) {
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
- case DM_TYPE_NVME_BIO_BASED:
pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 271bd08f4a25..3f053b11e2ce 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -772,9 +772,13 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
dev->qmult = qmult;
snprintf(net->name, sizeof(net->name), "%s%%d", netname);
- if (get_ether_addr(dev_addr, net->dev_addr))
+ if (get_ether_addr(dev_addr, net->dev_addr)) {
+ net->addr_assign_type = NET_ADDR_RANDOM;
dev_warn(&g->dev,
"using random %s ethernet address\n", "self");
+ } else {
+ net->addr_assign_type = NET_ADDR_SET;
+ }
if (get_ether_addr(host_addr, dev->host_mac))
dev_warn(&g->dev,
"using random %s ethernet address\n", "host");
@@ -831,6 +835,9 @@ struct net_device *gether_setup_name_default(const char *netname)
INIT_LIST_HEAD(&dev->tx_reqs);
INIT_LIST_HEAD(&dev->rx_reqs);
+ /* by default we always have a random MAC address */
+ net->addr_assign_type = NET_ADDR_RANDOM;
+
skb_queue_head_init(&dev->rx_frames);
/* network device setup */
@@ -868,7 +875,6 @@ int gether_register_netdev(struct net_device *net)
g = dev->gadget;
memcpy(net->dev_addr, dev->dev_mac, ETH_ALEN);
- net->addr_assign_type = NET_ADDR_RANDOM;
status = register_netdev(net);
if (status < 0) {
@@ -908,6 +914,7 @@ int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
if (get_ether_addr(dev_addr, new_addr))
return -EINVAL;
memcpy(dev->dev_mac, new_addr, ETH_ALEN);
+ net->addr_assign_type = NET_ADDR_SET;
return 0;
}
EXPORT_SYMBOL_GPL(gether_set_dev_addr);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a53d7d2c2d95..60631f3abddb 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -28,7 +28,6 @@ enum dm_queue_mode {
DM_TYPE_BIO_BASED = 1,
DM_TYPE_REQUEST_BASED = 2,
DM_TYPE_DAX_BIO_BASED = 3,
- DM_TYPE_NVME_BIO_BASED = 4,
};
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 959f4f0c8546..d9bee15e36a5 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -675,12 +675,14 @@ EXPORT_SYMBOL_GPL(inet_unhash);
* Note that we use 32bit integers (vs RFC 'short integers')
* because 2^16 is not a multiple of num_ephemeral and this
* property might be used by clever attacker.
- * RFC claims using TABLE_LENGTH=10 buckets gives an improvement,
- * we use 256 instead to really give more isolation and
- * privacy, this only consumes 1 KB of kernel memory.
+ * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
+ * attacks were since demonstrated, thus we use 65536 instead to really
+ * give more isolation and privacy, at the expense of 256kB of kernel
+ * memory.
*/
-#define INET_TABLE_PERTURB_SHIFT 8
-static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT];
+#define INET_TABLE_PERTURB_SHIFT 16
+#define INET_TABLE_PERTURB_SIZE (1 << INET_TABLE_PERTURB_SHIFT)
+static u32 *table_perturb;
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u64 port_offset,
@@ -723,10 +725,11 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
if (likely(remaining > 1))
remaining &= ~1U;
- net_get_random_once(table_perturb, sizeof(table_perturb));
- index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT);
+ net_get_random_once(table_perturb,
+ INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
+ index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
- offset = READ_ONCE(table_perturb[index]) + port_offset;
+ offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
offset %= remaining;
/* In first pass we try ports of @low parity.
@@ -782,6 +785,12 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
return -EADDRNOTAVAIL;
ok:
+ /* Here we want to add a little bit of randomness to the next source
+ * port that will be chosen. We use a max() with a random here so that
+ * on low contention the randomness is maximal and on high contention
+ * it may be inexistent.
+ */
+ i = max_t(int, i, (prandom_u32() & 7) * 2);
WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
/* Head lock still held and bh's disabled */
@@ -855,6 +864,12 @@ void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
low_limit,
high_limit);
init_hashinfo_lhash2(h);
+
+ /* this one is used for source ports of outgoing connections */
+ table_perturb = kmalloc_array(INET_TABLE_PERTURB_SIZE,
+ sizeof(*table_perturb), GFP_KERNEL);
+ if (!table_perturb)
+ panic("TCP: failed to alloc table_perturb");
}
int inet_hashinfo2_init_mod(struct inet_hashinfo *h)