From e98f98086848502c12e7b96f4d060b451b75408a Mon Sep 17 00:00:00 2001 From: Petr Tesarik Date: Mar 20 2020 14:56:51 +0000 Subject: Merge branch 'fixes/linux-4.12' into SLE15-SP1 - Refresh patches.suse/blktrace-Protect-q-blk_trace-with-RCU.patch - Refresh patches.suse/blktrace-fix-unlocked-access-to-init-start-stop-tear.patch - Drop patches.kabi/blk_queue_blk_trace_mutex_kabi.patch Conflicts: blacklist.conf patches.suse/blktrace-Fix-potential-deadlock-between-delete-sysfs.patch patches.suse/tracing-blktrace-Fix-to-allow-setting-same-value.patch series.conf --- diff --git a/blacklist.conf b/blacklist.conf index b169030..8f7d49d 100644 --- a/blacklist.conf +++ b/blacklist.conf @@ -509,7 +509,6 @@ b60706644282af04e4aa57da5af57470d453cd1f # vsprintf: cosmetic 741a76b350897604c48fb12beff1c9b77724dc96 # kthread: fixes rather rare races in CPU hotplug; there are several followup fixes on top of it to get it actually right; does not worth the risk 4950276672fce5c241857540f8561c440663673d # kmemcheck removal; not for released products d8be75663cec0069b85f80191abd2682ce4a512f # related to kmemcheck removal; not for released products -1f2cac107c591c24b60b115d6050adc213d10fc0 # blktrace: racy init/start-stop/teardown for ages; not worth it a6da0024ffc19e0d47712bb5ca4fd083f76b07df # blktrace: fix unlocked registration of tracepoints; racy for ages; found by syzcaller; not worth it 6b7e633fe9c24682df550e5311f47fb524701586 # ring_buffer: just an optimization 23721a755f98ac846897a013c92cccb281c1bcc8 # trace/xdp: compilation warning; we do not have the affected code @@ -1469,3 +1468,6 @@ ba16a48af797db124ac100417f9229b1650ce1fb # Duplicate of f0f3a6cecf3b98990985cd42 775d78319f1ceb32be8eb3b1202ccdc60e9cb7f1 # breaks KABI a1bd079fca6219e18bb0892f0a7228a76dd6292c # in fact fixes a864e29d94abac7f9756e07180c167f50fb00042 8fc7036ee652207ca992fbb9abb64090c355a9e0 # in fact fixes 7f0bf1c09f3c8763a9807f66dcf4c7adad0ce7ff +1b710b1b10eff9d46666064ea25f079f70bc67a8 # DEBUG_RANDOM_BOOT is 0 +ca4b43c14cd88d28cfc6467d2fa075aad6818f1d # kABI +13380a1471aadc517994b7230371a227d1f9f152 # kABI, cosmetic patch diff --git a/patches.suse/0001-crypto-pcrypt-Fix-user-after-free-on-module-unload.patch b/patches.suse/0001-crypto-pcrypt-Fix-user-after-free-on-module-unload.patch new file mode 100644 index 0000000..8ad7f8c --- /dev/null +++ b/patches.suse/0001-crypto-pcrypt-Fix-user-after-free-on-module-unload.patch @@ -0,0 +1,37 @@ +From 07bfd9bdf568a38d9440c607b72342036011f727 Mon Sep 17 00:00:00 2001 +From: Herbert Xu +Date: Tue, 19 Nov 2019 17:41:31 +0800 +Subject: [PATCH] crypto: pcrypt - Fix user-after-free on module unload +Git-commit: 07bfd9bdf568a38d9440c607b72342036011f727 +References: git-fixes +Patch-mainline: v5.6-rc1 + +On module unload of pcrypt we must unregister the crypto algorithms +first and then tear down the padata structure. As otherwise the +crypto algorithms are still alive and can be used while the padata +structure is being freed. + +Fixes: 5068c7a883d1 ("crypto: pcrypt - Add pcrypt crypto...") +Cc: +Signed-off-by: Herbert Xu +Signed-off-by: Oliver Neukum +--- + crypto/pcrypt.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/crypto/pcrypt.c ++++ b/crypto/pcrypt.c +@@ -505,11 +505,12 @@ err: + + static void __exit pcrypt_exit(void) + { ++ crypto_unregister_template(&pcrypt_tmpl); ++ + pcrypt_fini_padata(&pencrypt); + pcrypt_fini_padata(&pdecrypt); + + kset_unregister(pcrypt_kset); +- crypto_unregister_template(&pcrypt_tmpl); + } + + module_init(pcrypt_init); diff --git a/patches.suse/0001-padata-always-acquire-cpu_hotplug_lock-before-pinst-.patch b/patches.suse/0001-padata-always-acquire-cpu_hotplug_lock-before-pinst-.patch new file mode 100644 index 0000000..d8f7f80 --- /dev/null +++ b/patches.suse/0001-padata-always-acquire-cpu_hotplug_lock-before-pinst-.patch @@ -0,0 +1,70 @@ +From 38228e8848cd7dd86ccb90406af32de0cad24be3 Mon Sep 17 00:00:00 2001 +From: Daniel Jordan +Date: Tue, 3 Dec 2019 14:31:11 -0500 +Subject: [PATCH] padata: always acquire cpu_hotplug_lock before pinst->lock +Git-commit: 38228e8848cd7dd86ccb90406af32de0cad24be3 +References: git-fixes +Patch-mainline: v5.6-rc1 + +lockdep complains when padata's paths to update cpumasks via CPU hotplug +and sysfs are both taken: + + # echo 0 > /sys/devices/system/cpu/cpu1/online + # echo ff > /sys/kernel/pcrypt/pencrypt/parallel_cpumask + + ====================================================== + WARNING: possible circular locking dependency detected + 5.4.0-rc8-padata-cpuhp-v3+ #1 Not tainted + ------------------------------------------------------ + bash/205 is trying to acquire lock: + ffffffff8286bcd0 (cpu_hotplug_lock.rw_sem){++++}, at: padata_set_cpumask+0x2b/0x120 + + but task is already holding lock: + ffff8880001abfa0 (&pinst->lock){+.+.}, at: padata_set_cpumask+0x26/0x120 + + which lock already depends on the new lock. + +padata doesn't take cpu_hotplug_lock and pinst->lock in a consistent +order. Which should be first? CPU hotplug calls into padata with +cpu_hotplug_lock already held, so it should have priority. + +Fixes: 6751fb3c0e0c ("padata: Use get_online_cpus/put_online_cpus") +Signed-off-by: Daniel Jordan +Cc: Eric Biggers +Cc: Herbert Xu +Cc: Steffen Klassert +Cc: linux-crypto@vger.kernel.org +Cc: linux-kernel@vger.kernel.org +Signed-off-by: Herbert Xu +Signed-off-by: Oliver Neukum +--- + kernel/padata.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/kernel/padata.c b/kernel/padata.c +index 1e6500d64846..f5964f015139 100644 +--- a/kernel/padata.c ++++ b/kernel/padata.c +@@ -643,8 +643,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, + struct cpumask *serial_mask, *parallel_mask; + int err = -EINVAL; + +- mutex_lock(&pinst->lock); + get_online_cpus(); ++ mutex_lock(&pinst->lock); + + switch (cpumask_type) { + case PADATA_CPU_PARALLEL: +@@ -662,8 +662,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, + err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); + + out: +- put_online_cpus(); + mutex_unlock(&pinst->lock); ++ put_online_cpus(); + + return err; + } +-- +2.16.4 + diff --git a/patches.suse/blktrace-Fix-potential-deadlock-between-delete-sysfs.patch b/patches.suse/blktrace-Fix-potential-deadlock-between-delete-sysfs.patch index 612e2a0..44df911 100644 --- a/patches.suse/blktrace-Fix-potential-deadlock-between-delete-sysfs.patch +++ b/patches.suse/blktrace-Fix-potential-deadlock-between-delete-sysfs.patch @@ -1,9 +1,10 @@ +From 5acb3cc2c2e9d3020a4fee43763c6463767f1572 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Wed, 20 Sep 2017 13:12:20 -0600 Subject: [PATCH] blktrace: Fix potential deadlock between delete & sysfs ops Git-commit: 5acb3cc2c2e9d3020a4fee43763c6463767f1572 Patch-mainline: v4.14-rc3 -References: bsc#1104967,FATE#325924 +References: bsc#1104967,bsc#1159285,FATE#325924 CVE-2019-19768 The lockdep code had reported the following unsafe locking scenario: @@ -43,7 +44,8 @@ Fix typo in patch subject line, and prune a comment detailing how the code used to work. Signed-off-by: Jens Axboe -Acked-by: Hannes Reinecke +Acked-by: Jan Kara + --- block/blk-core.c | 3 +++ include/linux/blkdev.h | 1 + diff --git a/patches.suse/blktrace-Protect-q-blk_trace-with-RCU.patch b/patches.suse/blktrace-Protect-q-blk_trace-with-RCU.patch new file mode 100644 index 0000000..10262e4 --- /dev/null +++ b/patches.suse/blktrace-Protect-q-blk_trace-with-RCU.patch @@ -0,0 +1,401 @@ +From c780e86dd48ef6467a1146cf7d0fe1e05a635039 Mon Sep 17 00:00:00 2001 +From: Jan Kara +Date: Thu, 6 Feb 2020 15:28:12 +0100 +Subject: [PATCH] blktrace: Protect q->blk_trace with RCU +Git-commit: c780e86dd48ef6467a1146cf7d0fe1e05a635039 +Patch-mainline: v5.6-rc4 +References: bsc#1159285 CVE-2019-19768 + +KASAN is reporting that __blk_add_trace() has a use-after-free issue +when accessing q->blk_trace. Indeed the switching of block tracing (and +thus eventual freeing of q->blk_trace) is completely unsynchronized with +the currently running tracing and thus it can happen that the blk_trace +structure is being freed just while __blk_add_trace() works on it. +Protect accesses to q->blk_trace by RCU during tracing and make sure we +wait for the end of RCU grace period when shutting down tracing. Luckily +that is rare enough event that we can afford that. Note that postponing +the freeing of blk_trace to an RCU callback should better be avoided as +it could have unexpected user visible side-effects as debugfs files +would be still existing for a short while block tracing has been shut +down. + +Link: https://bugzilla.kernel.org/show_bug.cgi?id=205711 +Cc: stable@vger.kernel.org +Reviewed-by: Chaitanya Kulkarni +Reviewed-by: Ming Lei +Tested-by: Ming Lei +Reviewed-by: Bart Van Assche +Reported-by: Tristan Madani +Signed-off-by: Jan Kara +Signed-off-by: Jens Axboe +Acked-by: Jan Kara + +--- + include/linux/blkdev.h | 2 + include/linux/blktrace_api.h | 17 ++++-- + kernel/trace/blktrace.c | 109 +++++++++++++++++++++++++++++++------------ + 3 files changed, 92 insertions(+), 36 deletions(-) + +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -624,7 +624,7 @@ struct request_queue { + unsigned int sg_reserved_size; + int node; + #ifdef CONFIG_BLK_DEV_IO_TRACE +- struct blk_trace *blk_trace; ++ struct blk_trace __rcu *blk_trace; + struct mutex blk_trace_mutex; + #endif + /* +--- a/include/linux/blktrace_api.h ++++ b/include/linux/blktrace_api.h +@@ -50,9 +50,12 @@ void __trace_note_message(struct blk_tra + **/ + #define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \ + do { \ +- struct blk_trace *bt = (q)->blk_trace; \ ++ struct blk_trace *bt; \ ++ rcu_read_lock(); \ ++ bt = rcu_dereference((q)->blk_trace); \ + if (unlikely(bt)) \ + __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\ ++ rcu_read_unlock(); \ + } while (0) + #define blk_add_trace_msg(q, fmt, ...) \ + blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__) +@@ -60,10 +63,14 @@ void __trace_note_message(struct blk_tra + + static inline bool blk_trace_note_message_enabled(struct request_queue *q) + { +- struct blk_trace *bt = q->blk_trace; +- if (likely(!bt)) +- return false; +- return bt->act_mask & BLK_TC_NOTIFY; ++ struct blk_trace *bt; ++ bool ret; ++ ++ rcu_read_lock(); ++ bt = rcu_dereference(q->blk_trace); ++ ret = bt && (bt->act_mask & BLK_TC_NOTIFY); ++ rcu_read_unlock(); ++ return ret; + } + + extern void blk_add_driver_data(struct request_queue *q, struct request *rq, +--- a/kernel/trace/blktrace.c ++++ b/kernel/trace/blktrace.c +@@ -331,6 +331,7 @@ static void blk_trace_free(struct blk_tr + + static void blk_trace_cleanup(struct blk_trace *bt) + { ++ synchronize_rcu(); + blk_trace_free(bt); + if (atomic_dec_and_test(&blk_probes_ref)) + blk_unregister_tracepoints(); +@@ -633,8 +634,10 @@ static int compat_blk_trace_setup(struct + static int __blk_trace_startstop(struct request_queue *q, int start) + { + int ret; +- struct blk_trace *bt = q->blk_trace; ++ struct blk_trace *bt; + ++ bt = rcu_dereference_protected(q->blk_trace, ++ lockdep_is_held(&q->blk_trace_mutex)); + if (bt == NULL) + return -EINVAL; + +@@ -743,8 +746,8 @@ int blk_trace_ioctl(struct block_device + void blk_trace_shutdown(struct request_queue *q) + { + mutex_lock(&q->blk_trace_mutex); +- +- if (q->blk_trace) { ++ if (rcu_dereference_protected(q->blk_trace, ++ lockdep_is_held(&q->blk_trace_mutex))) { + __blk_trace_startstop(q, 0); + __blk_trace_remove(q); + } +@@ -802,10 +805,14 @@ static void blk_add_trace_rq(struct requ + unsigned int nr_bytes, u32 what, + union kernfs_node_id *cgid) + { +- struct blk_trace *bt = rq->q->blk_trace; ++ struct blk_trace *bt; + +- if (likely(!bt)) ++ rcu_read_lock(); ++ bt = rcu_dereference(rq->q->blk_trace); ++ if (likely(!bt)) { ++ rcu_read_unlock(); + return; ++ } + + if (blk_rq_is_passthrough(rq)) + what |= BLK_TC_ACT(BLK_TC_PC); +@@ -814,6 +821,7 @@ static void blk_add_trace_rq(struct requ + + __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq), + rq->cmd_flags, what, error, 0, NULL, cgid); ++ rcu_read_unlock(); + } + + static void blk_add_trace_rq_insert(void *ignore, +@@ -859,13 +867,18 @@ static void blk_add_trace_rq_complete(vo + static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, + u32 what, int error, union kernfs_node_id *cgid) + { +- struct blk_trace *bt = q->blk_trace; ++ struct blk_trace *bt; + +- if (likely(!bt)) ++ rcu_read_lock(); ++ bt = rcu_dereference(q->blk_trace); ++ if (likely(!bt)) { ++ rcu_read_unlock(); + return; ++ } + + __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, + bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid); ++ rcu_read_unlock(); + } + + static void blk_add_trace_bio_bounce(void *ignore, +@@ -916,11 +929,14 @@ static void blk_add_trace_getrq(void *ig + blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0, + blk_trace_bio_get_cgid(q, bio)); + else { +- struct blk_trace *bt = q->blk_trace; ++ struct blk_trace *bt; + ++ rcu_read_lock(); ++ bt = rcu_dereference(q->blk_trace); + if (bt) + __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0, + NULL, NULL); ++ rcu_read_unlock(); + } + } + +@@ -933,27 +949,35 @@ static void blk_add_trace_sleeprq(void * + blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0, + blk_trace_bio_get_cgid(q, bio)); + else { +- struct blk_trace *bt = q->blk_trace; ++ struct blk_trace *bt; + ++ rcu_read_lock(); ++ bt = rcu_dereference(q->blk_trace); + if (bt) + __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ, + 0, 0, NULL, NULL); ++ rcu_read_unlock(); + } + } + + static void blk_add_trace_plug(void *ignore, struct request_queue *q) + { +- struct blk_trace *bt = q->blk_trace; ++ struct blk_trace *bt; + ++ rcu_read_lock(); ++ bt = rcu_dereference(q->blk_trace); + if (bt) + __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL); ++ rcu_read_unlock(); + } + + static void blk_add_trace_unplug(void *ignore, struct request_queue *q, + unsigned int depth, bool explicit) + { +- struct blk_trace *bt = q->blk_trace; ++ struct blk_trace *bt; + ++ rcu_read_lock(); ++ bt = rcu_dereference(q->blk_trace); + if (bt) { + __be64 rpdu = cpu_to_be64(depth); + u32 what; +@@ -965,14 +989,17 @@ static void blk_add_trace_unplug(void *i + + __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL); + } ++ rcu_read_unlock(); + } + + static void blk_add_trace_split(void *ignore, + struct request_queue *q, struct bio *bio, + unsigned int pdu) + { +- struct blk_trace *bt = q->blk_trace; ++ struct blk_trace *bt; + ++ rcu_read_lock(); ++ bt = rcu_dereference(q->blk_trace); + if (bt) { + __be64 rpdu = cpu_to_be64(pdu); + +@@ -981,6 +1008,7 @@ static void blk_add_trace_split(void *ig + BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu), + &rpdu, blk_trace_bio_get_cgid(q, bio)); + } ++ rcu_read_unlock(); + } + + /** +@@ -1000,11 +1028,15 @@ static void blk_add_trace_bio_remap(void + struct request_queue *q, struct bio *bio, + dev_t dev, sector_t from) + { +- struct blk_trace *bt = q->blk_trace; ++ struct blk_trace *bt; + struct blk_io_trace_remap r; + +- if (likely(!bt)) ++ rcu_read_lock(); ++ bt = rcu_dereference(q->blk_trace); ++ if (likely(!bt)) { ++ rcu_read_unlock(); + return; ++ } + + r.device_from = cpu_to_be32(dev); + r.device_to = cpu_to_be32(bio_dev(bio)); +@@ -1013,6 +1045,7 @@ static void blk_add_trace_bio_remap(void + __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, + bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status, + sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); ++ rcu_read_unlock(); + } + + /** +@@ -1033,11 +1066,15 @@ static void blk_add_trace_rq_remap(void + struct request *rq, dev_t dev, + sector_t from) + { +- struct blk_trace *bt = q->blk_trace; ++ struct blk_trace *bt; + struct blk_io_trace_remap r; + +- if (likely(!bt)) ++ rcu_read_lock(); ++ bt = rcu_dereference(q->blk_trace); ++ if (likely(!bt)) { ++ rcu_read_unlock(); + return; ++ } + + r.device_from = cpu_to_be32(dev); + r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); +@@ -1046,6 +1083,7 @@ static void blk_add_trace_rq_remap(void + __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), + rq_data_dir(rq), 0, BLK_TA_REMAP, 0, + sizeof(r), &r, blk_trace_request_get_cgid(q, rq)); ++ rcu_read_unlock(); + } + + /** +@@ -1063,14 +1101,19 @@ void blk_add_driver_data(struct request_ + struct request *rq, + void *data, size_t len) + { +- struct blk_trace *bt = q->blk_trace; ++ struct blk_trace *bt; + +- if (likely(!bt)) ++ rcu_read_lock(); ++ bt = rcu_dereference(q->blk_trace); ++ if (likely(!bt)) { ++ rcu_read_unlock(); + return; ++ } + + __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0, + BLK_TA_DRV_DATA, 0, len, data, + blk_trace_request_get_cgid(q, rq)); ++ rcu_read_unlock(); + } + EXPORT_SYMBOL_GPL(blk_add_driver_data); + +@@ -1761,6 +1804,7 @@ static ssize_t sysfs_blk_trace_attr_show + struct hd_struct *p = dev_to_part(dev); + struct request_queue *q; + struct block_device *bdev; ++ struct blk_trace *bt; + ssize_t ret = -ENXIO; + + bdev = bdget(part_devt(p)); +@@ -1773,21 +1817,23 @@ static ssize_t sysfs_blk_trace_attr_show + + mutex_lock(&q->blk_trace_mutex); + ++ bt = rcu_dereference_protected(q->blk_trace, ++ lockdep_is_held(&q->blk_trace_mutex)); + if (attr == &dev_attr_enable) { +- ret = sprintf(buf, "%u\n", !!q->blk_trace); ++ ret = sprintf(buf, "%u\n", !!bt); + goto out_unlock_bdev; + } + +- if (q->blk_trace == NULL) ++ if (bt == NULL) + ret = sprintf(buf, "disabled\n"); + else if (attr == &dev_attr_act_mask) +- ret = blk_trace_mask2str(buf, q->blk_trace->act_mask); ++ ret = blk_trace_mask2str(buf, bt->act_mask); + else if (attr == &dev_attr_pid) +- ret = sprintf(buf, "%u\n", q->blk_trace->pid); ++ ret = sprintf(buf, "%u\n", bt->pid); + else if (attr == &dev_attr_start_lba) +- ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba); ++ ret = sprintf(buf, "%llu\n", bt->start_lba); + else if (attr == &dev_attr_end_lba) +- ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); ++ ret = sprintf(buf, "%llu\n", bt->end_lba); + + out_unlock_bdev: + mutex_unlock(&q->blk_trace_mutex); +@@ -1804,6 +1850,7 @@ static ssize_t sysfs_blk_trace_attr_stor + struct block_device *bdev; + struct request_queue *q; + struct hd_struct *p; ++ struct blk_trace *bt; + u64 value; + ssize_t ret = -EINVAL; + +@@ -1834,8 +1881,10 @@ static ssize_t sysfs_blk_trace_attr_stor + + mutex_lock(&q->blk_trace_mutex); + ++ bt = rcu_dereference_protected(q->blk_trace, ++ lockdep_is_held(&q->blk_trace_mutex)); + if (attr == &dev_attr_enable) { +- if (!!value == !!q->blk_trace) { ++ if (!!value == !!bt) { + ret = 0; + goto out_unlock_bdev; + } +@@ -1847,18 +1896,18 @@ static ssize_t sysfs_blk_trace_attr_stor + } + + ret = 0; +- if (q->blk_trace == NULL) ++ if (bt == NULL) + ret = blk_trace_setup_queue(q, bdev); + + if (ret == 0) { + if (attr == &dev_attr_act_mask) +- q->blk_trace->act_mask = value; ++ bt->act_mask = value; + else if (attr == &dev_attr_pid) +- q->blk_trace->pid = value; ++ bt->pid = value; + else if (attr == &dev_attr_start_lba) +- q->blk_trace->start_lba = value; ++ bt->start_lba = value; + else if (attr == &dev_attr_end_lba) +- q->blk_trace->end_lba = value; ++ bt->end_lba = value; + } + + out_unlock_bdev: diff --git a/patches.suse/blktrace-fix-dereference-after-null-check.patch b/patches.suse/blktrace-fix-dereference-after-null-check.patch new file mode 100644 index 0000000..e61f77b --- /dev/null +++ b/patches.suse/blktrace-fix-dereference-after-null-check.patch @@ -0,0 +1,70 @@ +From 153031a301bb07194e9c37466cfce8eacb977621 Mon Sep 17 00:00:00 2001 +From: Cengiz Can +Date: Wed, 4 Mar 2020 13:58:19 +0300 +Subject: [PATCH] blktrace: fix dereference after null check +Git-commit: 153031a301bb07194e9c37466cfce8eacb977621 +Patch-mainline: v5.6-rc5 +References: bsc#1159285 + +There was a recent change in blktrace.c that added a RCU protection to +`q->blk_trace` in order to fix a use-after-free issue during access. + +However the change missed an edge case that can lead to dereferencing of +`bt` pointer even when it's NULL: + +Coverity static analyzer marked this as a FORWARD_NULL issue with CID +1460458. + +``` +/kernel/trace/blktrace.c: 1904 in sysfs_blk_trace_attr_store() +1898 ret = 0; +1899 if (bt == NULL) +1900 ret = blk_trace_setup_queue(q, bdev); +1901 +1902 if (ret == 0) { +1903 if (attr == &dev_attr_act_mask) +>>> CID 1460458: Null pointer dereferences (FORWARD_NULL) +>>> Dereferencing null pointer "bt". +1904 bt->act_mask = value; +1905 else if (attr == &dev_attr_pid) +1906 bt->pid = value; +1907 else if (attr == &dev_attr_start_lba) +1908 bt->start_lba = value; +1909 else if (attr == &dev_attr_end_lba) +``` + +Added a reassignment with RCU annotation to fix the issue. + +Fixes: c780e86dd48 ("blktrace: Protect q->blk_trace with RCU") +Cc: stable@vger.kernel.org +Reviewed-by: Ming Lei +Reviewed-by: Bob Liu +Reviewed-by: Steven Rostedt (VMware) +Signed-off-by: Cengiz Can +Signed-off-by: Jens Axboe +Acked-by: Jan Kara + +--- + kernel/trace/blktrace.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c +index 4560878f0bac..ca39dc3230cb 100644 +--- a/kernel/trace/blktrace.c ++++ b/kernel/trace/blktrace.c +@@ -1896,8 +1896,11 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, + } + + ret = 0; +- if (bt == NULL) ++ if (bt == NULL) { + ret = blk_trace_setup_queue(q, bdev); ++ bt = rcu_dereference_protected(q->blk_trace, ++ lockdep_is_held(&q->blk_trace_mutex)); ++ } + + if (ret == 0) { + if (attr == &dev_attr_act_mask) +-- +2.16.4 + diff --git a/patches.suse/blktrace-fix-trace-mutex-deadlock.patch b/patches.suse/blktrace-fix-trace-mutex-deadlock.patch new file mode 100644 index 0000000..4b9f9ac --- /dev/null +++ b/patches.suse/blktrace-fix-trace-mutex-deadlock.patch @@ -0,0 +1,46 @@ +From 2967acbb257a6a9bf912f4778b727e00972eac9b Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Sun, 19 Nov 2017 11:52:55 -0700 +Subject: [PATCH] blktrace: fix trace mutex deadlock +Git-commit: 2967acbb257a6a9bf912f4778b727e00972eac9b +Patch-mainline: v4.15-rc2 +References: bsc#1159285 + +A previous commit changed the locking around registration/cleanup, +but direct callers of blk_trace_remove() were missed. This means +that if we hit the error path in setup, we will deadlock on +attempting to re-acquire the queue trace mutex. + +Fixes: 1f2cac107c59 ("blktrace: fix unlocked access to init/start-stop/teardown") +Signed-off-by: Jens Axboe +Acked-by: Jan Kara + +--- + kernel/trace/blktrace.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c +index c5987d4c5f23..987d9a9ae283 100644 +--- a/kernel/trace/blktrace.c ++++ b/kernel/trace/blktrace.c +@@ -591,7 +591,7 @@ static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev, + return ret; + + if (copy_to_user(arg, &buts, sizeof(buts))) { +- blk_trace_remove(q); ++ __blk_trace_remove(q); + return -EFAULT; + } + return 0; +@@ -637,7 +637,7 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name, + return ret; + + if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) { +- blk_trace_remove(q); ++ __blk_trace_remove(q); + return -EFAULT; + } + +-- +2.16.4 + diff --git a/patches.suse/blktrace-fix-unlocked-access-to-init-start-stop-tear.patch b/patches.suse/blktrace-fix-unlocked-access-to-init-start-stop-tear.patch new file mode 100644 index 0000000..bf8e8e6 --- /dev/null +++ b/patches.suse/blktrace-fix-unlocked-access-to-init-start-stop-tear.patch @@ -0,0 +1,150 @@ +From 1f2cac107c591c24b60b115d6050adc213d10fc0 Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Sun, 5 Nov 2017 09:13:48 -0700 +Subject: [PATCH] blktrace: fix unlocked access to init/start-stop/teardown +Git-commit: 1f2cac107c591c24b60b115d6050adc213d10fc0 +Patch-mainline: v4.15-rc1 +References: bsc#1159285 CVE-2019-19768 + +sg.c calls into the blktrace functions without holding the proper queue +mutex for doing setup, start/stop, or teardown. + +Add internal unlocked variants, and export the ones that do the proper +locking. + +Fixes: 6da127ad0918 ("blktrace: Add blktrace ioctls to SCSI generic devices") +Tested-by: Dmitry Vyukov +Signed-off-by: Jens Axboe +Acked-by: Jan Kara + +--- + kernel/trace/blktrace.c | 58 +++++++++++++++++++++++++++++++++++++++--------- + 1 file changed, 48 insertions(+), 10 deletions(-) + +--- a/kernel/trace/blktrace.c ++++ b/kernel/trace/blktrace.c +@@ -312,7 +312,7 @@ static void blk_trace_cleanup(struct blk + blk_unregister_tracepoints(); + } + +-int blk_trace_remove(struct request_queue *q) ++static int __blk_trace_remove(struct request_queue *q) + { + struct blk_trace *bt; + +@@ -325,6 +325,17 @@ int blk_trace_remove(struct request_queu + + return 0; + } ++ ++int blk_trace_remove(struct request_queue *q) ++{ ++ int ret; ++ ++ mutex_lock(&q->blk_trace_mutex); ++ ret = __blk_trace_remove(q); ++ mutex_unlock(&q->blk_trace_mutex); ++ ++ return ret; ++} + EXPORT_SYMBOL_GPL(blk_trace_remove); + + static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, +@@ -526,9 +537,8 @@ err: + return ret; + } + +-int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, +- struct block_device *bdev, +- char __user *arg) ++static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev, ++ struct block_device *bdev, char __user *arg) + { + struct blk_user_trace_setup buts; + int ret; +@@ -547,6 +557,19 @@ int blk_trace_setup(struct request_queue + } + return 0; + } ++ ++int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, ++ struct block_device *bdev, ++ char __user *arg) ++{ ++ int ret; ++ ++ mutex_lock(&q->blk_trace_mutex); ++ ret = __blk_trace_setup(q, name, dev, bdev, arg); ++ mutex_unlock(&q->blk_trace_mutex); ++ ++ return ret; ++} + EXPORT_SYMBOL_GPL(blk_trace_setup); + + #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +@@ -583,7 +606,7 @@ static int compat_blk_trace_setup(struct + } + #endif + +-int blk_trace_startstop(struct request_queue *q, int start) ++static int __blk_trace_startstop(struct request_queue *q, int start) + { + int ret; + struct blk_trace *bt = q->blk_trace; +@@ -622,6 +645,17 @@ int blk_trace_startstop(struct request_q + + return ret; + } ++ ++int blk_trace_startstop(struct request_queue *q, int start) ++{ ++ int ret; ++ ++ mutex_lock(&q->blk_trace_mutex); ++ ret = __blk_trace_startstop(q, start); ++ mutex_unlock(&q->blk_trace_mutex); ++ ++ return ret; ++} + EXPORT_SYMBOL_GPL(blk_trace_startstop); + + /* +@@ -652,7 +686,7 @@ int blk_trace_ioctl(struct block_device + switch (cmd) { + case BLKTRACESETUP: + bdevname(bdev, b); +- ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); ++ ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); + break; + #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) + case BLKTRACESETUP32: +@@ -663,10 +697,10 @@ int blk_trace_ioctl(struct block_device + case BLKTRACESTART: + start = 1; + case BLKTRACESTOP: +- ret = blk_trace_startstop(q, start); ++ ret = __blk_trace_startstop(q, start); + break; + case BLKTRACETEARDOWN: +- ret = blk_trace_remove(q); ++ ret = __blk_trace_remove(q); + break; + default: + ret = -ENOTTY; +@@ -684,10 +718,14 @@ int blk_trace_ioctl(struct block_device + **/ + void blk_trace_shutdown(struct request_queue *q) + { ++ mutex_lock(&q->blk_trace_mutex); ++ + if (q->blk_trace) { +- blk_trace_startstop(q, 0); +- blk_trace_remove(q); ++ __blk_trace_startstop(q, 0); ++ __blk_trace_remove(q); + } ++ ++ mutex_unlock(&q->blk_trace_mutex); + } + + #ifdef CONFIG_BLK_CGROUP diff --git a/patches.suse/bonding-alb-properly-access-headers-in-bond_alb_xmit.patch b/patches.suse/bonding-alb-properly-access-headers-in-bond_alb_xmit.patch new file mode 100644 index 0000000..d21644a --- /dev/null +++ b/patches.suse/bonding-alb-properly-access-headers-in-bond_alb_xmit.patch @@ -0,0 +1,174 @@ +From: Eric Dumazet +Date: Tue, 4 Feb 2020 19:26:05 -0800 +Subject: bonding/alb: properly access headers in bond_alb_xmit() +Git-commit: 38f88c45404293bbc027b956def6c10cbd45c616 +Patch-mainline: 5.6-rc1 +References: networking-stable-20_02_09 + +syzbot managed to send an IPX packet through bond_alb_xmit() +and af_packet and triggered a use-after-free. + +First, bond_alb_xmit() was using ipx_hdr() helper to reach +the IPX header, but ipx_hdr() was using the transport offset +instead of the network offset. In the particular syzbot +report transport offset was 0xFFFF + +This patch removes ipx_hdr() since it was only (mis)used from bonding. + +Then we need to make sure IPv4/IPv6/IPX headers are pulled +in skb->head before dereferencing anything. + +BUG: KASAN: use-after-free in bond_alb_xmit+0x153a/0x1590 drivers/net/bonding/bond_alb.c:1452 +Read of size 2 at addr ffff8801ce56dfff by task syz-executor.2/18108 + (if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) ...) + +Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 +Call Trace: + [] __dump_stack lib/dump_stack.c:17 [inline] + [] dump_stack+0x14d/0x20b lib/dump_stack.c:53 + [] print_address_description+0x6f/0x20b mm/kasan/report.c:282 + [] kasan_report_error mm/kasan/report.c:380 [inline] + [] kasan_report mm/kasan/report.c:438 [inline] + [] kasan_report.cold+0x8c/0x2a0 mm/kasan/report.c:422 + [] __asan_report_load_n_noabort+0xf/0x20 mm/kasan/report.c:469 + [] bond_alb_xmit+0x153a/0x1590 drivers/net/bonding/bond_alb.c:1452 + [] __bond_start_xmit drivers/net/bonding/bond_main.c:4199 [inline] + [] bond_start_xmit+0x4f4/0x1570 drivers/net/bonding/bond_main.c:4224 + [] __netdev_start_xmit include/linux/netdevice.h:4525 [inline] + [] netdev_start_xmit include/linux/netdevice.h:4539 [inline] + [] xmit_one net/core/dev.c:3611 [inline] + [] dev_hard_start_xmit+0x168/0x910 net/core/dev.c:3627 + [] __dev_queue_xmit+0x1f55/0x33b0 net/core/dev.c:4238 + [] dev_queue_xmit+0x18/0x20 net/core/dev.c:4278 + [] packet_snd net/packet/af_packet.c:3226 [inline] + [] packet_sendmsg+0x4919/0x70b0 net/packet/af_packet.c:3252 + [] sock_sendmsg_nosec net/socket.c:673 [inline] + [] sock_sendmsg+0x12c/0x160 net/socket.c:684 + [] __sys_sendto+0x262/0x380 net/socket.c:1996 + [] SYSC_sendto net/socket.c:2008 [inline] + [] SyS_sendto+0x40/0x60 net/socket.c:2004 + +Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") +Signed-off-by: Eric Dumazet +Reported-by: syzbot +Cc: Jay Vosburgh +Cc: Veaceslav Falico +Cc: Andy Gospodarek +Signed-off-by: David S. Miller +Signed-off-by: Jiri Slaby +--- + drivers/net/bonding/bond_alb.c | 44 +++++++++++++++++++++++++++++------------ + include/net/ipx.h | 5 ---- + 2 files changed, 32 insertions(+), 17 deletions(-) + +--- a/drivers/net/bonding/bond_alb.c ++++ b/drivers/net/bonding/bond_alb.c +@@ -1404,26 +1404,31 @@ int bond_alb_xmit(struct sk_buff *skb, s + bool do_tx_balance = true; + u32 hash_index = 0; + const u8 *hash_start = NULL; +- struct ipv6hdr *ip6hdr; + + skb_reset_mac_header(skb); + eth_data = eth_hdr(skb); + + switch (ntohs(skb->protocol)) { + case ETH_P_IP: { +- const struct iphdr *iph = ip_hdr(skb); ++ const struct iphdr *iph; + + if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) || +- (iph->daddr == ip_bcast) || +- (iph->protocol == IPPROTO_IGMP)) { ++ !pskb_network_may_pull(skb, sizeof(*iph))) { ++ do_tx_balance = false; ++ break; ++ } ++ iph = ip_hdr(skb); ++ if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) { + do_tx_balance = false; + break; + } + hash_start = (char *)&(iph->daddr); + hash_size = sizeof(iph->daddr); +- } + break; +- case ETH_P_IPV6: ++ } ++ case ETH_P_IPV6: { ++ const struct ipv6hdr *ip6hdr; ++ + /* IPv6 doesn't really use broadcast mac address, but leave + * that here just in case. + */ +@@ -1440,7 +1445,11 @@ int bond_alb_xmit(struct sk_buff *skb, s + break; + } + +- /* Additianally, DAD probes should not be tx-balanced as that ++ if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) { ++ do_tx_balance = false; ++ break; ++ } ++ /* Additionally, DAD probes should not be tx-balanced as that + * will lead to false positives for duplicate addresses and + * prevent address configuration from working. + */ +@@ -1450,17 +1459,26 @@ int bond_alb_xmit(struct sk_buff *skb, s + break; + } + +- hash_start = (char *)&(ipv6_hdr(skb)->daddr); +- hash_size = sizeof(ipv6_hdr(skb)->daddr); ++ hash_start = (char *)&ip6hdr->daddr; ++ hash_size = sizeof(ip6hdr->daddr); + break; +- case ETH_P_IPX: +- if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) { ++ } ++ case ETH_P_IPX: { ++ const struct ipxhdr *ipxhdr; ++ ++ if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) { ++ do_tx_balance = false; ++ break; ++ } ++ ipxhdr = (struct ipxhdr *)skb_network_header(skb); ++ ++ if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) { + /* something is wrong with this packet */ + do_tx_balance = false; + break; + } + +- if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) { ++ if (ipxhdr->ipx_type != IPX_TYPE_NCP) { + /* The only protocol worth balancing in + * this family since it has an "ARP" like + * mechanism +@@ -1469,9 +1487,11 @@ int bond_alb_xmit(struct sk_buff *skb, s + break; + } + ++ eth_data = eth_hdr(skb); + hash_start = (char *)eth_data->h_dest; + hash_size = ETH_ALEN; + break; ++ } + case ETH_P_ARP: + do_tx_balance = false; + if (bond_info->rlb_enabled) +--- a/include/net/ipx.h ++++ b/include/net/ipx.h +@@ -45,11 +45,6 @@ struct ipxhdr { + /* From af_ipx.c */ + extern int sysctl_ipx_pprop_broadcasting; + +-static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb) +-{ +- return (struct ipxhdr *)skb_transport_header(skb); +-} +- + struct ipx_interface { + /* IPX address */ + __be32 if_netnum; diff --git a/patches.suse/cpufreq-powernv-Fix-unsafe-notifiers.patch b/patches.suse/cpufreq-powernv-Fix-unsafe-notifiers.patch new file mode 100644 index 0000000..340a664 --- /dev/null +++ b/patches.suse/cpufreq-powernv-Fix-unsafe-notifiers.patch @@ -0,0 +1,67 @@ +From 966c08de7c2c9bcac13e2cb9e769a39582d5389f Mon Sep 17 00:00:00 2001 +From: Oliver O'Halloran +Date: Thu, 6 Feb 2020 17:26:22 +1100 +Subject: [PATCH] cpufreq: powernv: Fix unsafe notifiers + +References: bsc#1065729 +Patch-mainline: queued +Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git +Git-commit: 966c08de7c2c9bcac13e2cb9e769a39582d5389f + +The PowerNV cpufreq driver registers two notifiers: one to catch +throttle messages from the OCC and one to bump the CPU frequency back +to normal before a reboot. Both require the cpufreq driver to be +registered in order to function since the notifier callbacks use +various cpufreq_*() functions. + +Right now we register both notifiers before we've initialised the +driver. This seems to work, but we should head off any protential +problems by registering the notifiers after the driver is initialised. + +Signed-off-by: Oliver O'Halloran +Reviewed-by: Gautham R. Shenoy +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/20200206062622.28235-2-oohall@gmail.com +Acked-by: Michal Suchanek +--- + drivers/cpufreq/powernv-cpufreq.c | 11 +++++------ + 1 file changed, 5 insertions(+), 6 deletions(-) + +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c +index 1806b1da4366..03798c4326c6 100644 +--- a/drivers/cpufreq/powernv-cpufreq.c ++++ b/drivers/cpufreq/powernv-cpufreq.c +@@ -1114,9 +1114,6 @@ static int __init powernv_cpufreq_init(void) + if (rc) + goto out; + +- register_reboot_notifier(&powernv_cpufreq_reboot_nb); +- opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb); +- + if (powernv_pstate_info.wof_enabled) + powernv_cpufreq_driver.boost_enabled = true; + else +@@ -1125,15 +1122,17 @@ static int __init powernv_cpufreq_init(void) + rc = cpufreq_register_driver(&powernv_cpufreq_driver); + if (rc) { + pr_info("Failed to register the cpufreq driver (%d)\n", rc); +- goto cleanup_notifiers; ++ goto cleanup; + } + + if (powernv_pstate_info.wof_enabled) + cpufreq_enable_boost_support(); + ++ register_reboot_notifier(&powernv_cpufreq_reboot_nb); ++ opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb); ++ + return 0; +-cleanup_notifiers: +- unregister_all_notifiers(); ++cleanup: + clean_chip_info(); + out: + pr_info("Platform driver disabled. System does not support PState control\n"); +-- +2.23.0 + diff --git a/patches.suse/cpufreq-powernv-Fix-use-after-free.patch b/patches.suse/cpufreq-powernv-Fix-use-after-free.patch new file mode 100644 index 0000000..2b2158b --- /dev/null +++ b/patches.suse/cpufreq-powernv-Fix-use-after-free.patch @@ -0,0 +1,51 @@ +From d0a72efac89d1c35ac55197895201b7b94c5e6ef Mon Sep 17 00:00:00 2001 +From: Oliver O'Halloran +Date: Thu, 6 Feb 2020 17:26:21 +1100 +Subject: [PATCH] cpufreq: powernv: Fix use-after-free + +References: bsc#1065729 +Patch-mainline: queued +Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git +Git-commit: d0a72efac89d1c35ac55197895201b7b94c5e6ef + +The cpufreq driver has a use-after-free that we can hit if: + +a) There's an OCC message pending when the notifier is registered, and +b) The cpufreq driver fails to register with the core. + +When a) occurs the notifier schedules a workqueue item to handle the +message. The backing work_struct is located on chips[].throttle and +when b) happens we clean up by freeing the array. Once we get to +the (now free) queued item and the kernel crashes. + +Fixes: c5e29ea7ac14 ("cpufreq: powernv: Fix bugs in powernv_cpufreq_{init/exit}") +Cc: stable@vger.kernel.org # v4.6+ +Signed-off-by: Oliver O'Halloran +Reviewed-by: Gautham R. Shenoy +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/20200206062622.28235-1-oohall@gmail.com +Acked-by: Michal Suchanek +--- + drivers/cpufreq/powernv-cpufreq.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c +index 56f4bc0d209e..1806b1da4366 100644 +--- a/drivers/cpufreq/powernv-cpufreq.c ++++ b/drivers/cpufreq/powernv-cpufreq.c +@@ -1080,6 +1080,12 @@ static int init_chip_info(void) + + static inline void clean_chip_info(void) + { ++ int i; ++ ++ /* flush any pending work items */ ++ if (chips) ++ for (i = 0; i < nr_chips; i++) ++ cancel_work_sync(&chips[i].throttle); + kfree(chips); + } + +-- +2.23.0 + diff --git a/patches.suse/net-macb-Limit-maximum-GEM-TX-length-in-TSO.patch b/patches.suse/net-macb-Limit-maximum-GEM-TX-length-in-TSO.patch new file mode 100644 index 0000000..0696473 --- /dev/null +++ b/patches.suse/net-macb-Limit-maximum-GEM-TX-length-in-TSO.patch @@ -0,0 +1,39 @@ +From: Harini Katakam +Date: Wed, 5 Feb 2020 18:08:12 +0530 +Subject: net: macb: Limit maximum GEM TX length in TSO +Git-commit: f822e9c4ffa511a5c681cf866287d9383a3b6f1b +Patch-mainline: 5.6-rc1 +References: networking-stable-20_02_09 + +GEM_MAX_TX_LEN currently resolves to 0x3FF8 for any IP version supporting +TSO with full 14bits of length field in payload descriptor. But an IP +errata causes false amba_error (bit 6 of ISR) when length in payload +descriptors is specified above 16387. The error occurs because the DMA +falsely concludes that there is not enough space in SRAM for incoming +payload. These errors were observed continuously under stress of large +packets using iperf on a version where SRAM was 16K for each queue. This +errata will be documented shortly and affects all versions since TSO +functionality was added. Hence limit the max length to 0x3FC0 (rounded). + +Signed-off-by: Harini Katakam +Signed-off-by: David S. Miller +Signed-off-by: Jiri Slaby +--- + drivers/net/ethernet/cadence/macb.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/drivers/net/ethernet/cadence/macb.c ++++ b/drivers/net/ethernet/cadence/macb.c +@@ -66,7 +66,11 @@ + /* Max length of transmit frame must be a multiple of 8 bytes */ + #define MACB_TX_LEN_ALIGN 8 + #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) +-#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) ++/* Limit maximum TX length as per Cadence TSO errata. This is to avoid a ++ * false amba_error in TX path from the DMA assuming there is not enough ++ * space in the SRAM (16KB) even when there is. ++ */ ++#define GEM_MAX_TX_LEN (unsigned int)(0x3FC0) + + #define GEM_MTU_MIN_SIZE ETH_MIN_MTU + #define MACB_NETIF_LSO NETIF_F_TSO diff --git a/patches.suse/net-macb-Remove-unnecessary-alignment-check-for-TSO.patch b/patches.suse/net-macb-Remove-unnecessary-alignment-check-for-TSO.patch new file mode 100644 index 0000000..cff741f --- /dev/null +++ b/patches.suse/net-macb-Remove-unnecessary-alignment-check-for-TSO.patch @@ -0,0 +1,45 @@ +From: Harini Katakam +Date: Wed, 5 Feb 2020 18:08:11 +0530 +Subject: net: macb: Remove unnecessary alignment check for TSO +Git-commit: 41c1ef978c8d0259c6636e6d2d854777e92650eb +Patch-mainline: 5.6-rc1 +References: networking-stable-20_02_09 + +The IP TSO implementation does NOT require the length to be a +multiple of 8. That is only a requirement for UFO as per IP +documentation. Hence, exit macb_features_check function in the +beginning if the protocol is not UDP. Only when it is UDP, +proceed further to the alignment checks. Update comments to +reflect the same. Also remove dead code checking for protocol +TCP when calculating header length. + +Fixes: 1629dd4f763c ("cadence: Add LSO support.") +Signed-off-by: Harini Katakam +Signed-off-by: David S. Miller +Signed-off-by: Jiri Slaby +--- + drivers/net/ethernet/cadence/macb.c | 8 +++----- + 1 file changed, 3 insertions(+), 5 deletions(-) + +--- a/drivers/net/ethernet/cadence/macb.c ++++ b/drivers/net/ethernet/cadence/macb.c +@@ -1489,16 +1489,14 @@ static netdev_features_t macb_features_c + + /* Validate LSO compatibility */ + +- /* there is only one buffer */ +- if (!skb_is_nonlinear(skb)) ++ /* there is only one buffer or protocol is not UDP */ ++ if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) + return features; + + /* length of header */ + hdrlen = skb_transport_offset(skb); +- if (ip_hdr(skb)->protocol == IPPROTO_TCP) +- hdrlen += tcp_hdrlen(skb); + +- /* For LSO: ++ /* For UFO only: + * When software supplies two or more payload buffers all payload buffers + * apart from the last must be a multiple of 8 bytes in size. + */ diff --git a/patches.suse/net-mvneta-move-rx_dropped-and-rx_errors-in-per-cpu-.patch b/patches.suse/net-mvneta-move-rx_dropped-and-rx_errors-in-per-cpu-.patch new file mode 100644 index 0000000..3338caa --- /dev/null +++ b/patches.suse/net-mvneta-move-rx_dropped-and-rx_errors-in-per-cpu-.patch @@ -0,0 +1,95 @@ +From: Lorenzo Bianconi +Date: Thu, 6 Feb 2020 10:14:39 +0100 +Subject: net: mvneta: move rx_dropped and rx_errors in per-cpu stats +Git-commit: c35947b8ff8acca33134ee39c31708233765c31a +Patch-mainline: 5.6-rc1 +References: networking-stable-20_02_09 + +Move rx_dropped and rx_errors counters in mvneta_pcpu_stats in order to +avoid possible races updating statistics + +Fixes: 562e2f467e71 ("net: mvneta: Improve the buffer allocation method for SWBM") +Fixes: dc35a10f68d3 ("net: mvneta: bm: add support for hardware buffer management") +Fixes: c5aff18204da ("net: mvneta: driver for Marvell Armada 370/XP network unit") +Signed-off-by: Lorenzo Bianconi +Signed-off-by: David S. Miller +Signed-off-by: Jiri Slaby +--- + drivers/net/ethernet/marvell/mvneta.c | 19 ++++++++++++++----- + 1 file changed, 14 insertions(+), 5 deletions(-) + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -361,6 +361,8 @@ struct mvneta_pcpu_stats { + struct u64_stats_sync syncp; + u64 rx_packets; + u64 rx_bytes; ++ u64 rx_dropped; ++ u64 rx_errors; + u64 tx_packets; + u64 tx_bytes; + }; +@@ -665,6 +667,8 @@ mvneta_get_stats64(struct net_device *de + struct mvneta_pcpu_stats *cpu_stats; + u64 rx_packets; + u64 rx_bytes; ++ u64 rx_dropped; ++ u64 rx_errors; + u64 tx_packets; + u64 tx_bytes; + +@@ -673,19 +677,20 @@ mvneta_get_stats64(struct net_device *de + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + rx_packets = cpu_stats->rx_packets; + rx_bytes = cpu_stats->rx_bytes; ++ rx_dropped = cpu_stats->rx_dropped; ++ rx_errors = cpu_stats->rx_errors; + tx_packets = cpu_stats->tx_packets; + tx_bytes = cpu_stats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; ++ stats->rx_dropped += rx_dropped; ++ stats->rx_errors += rx_errors; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + } + +- stats->rx_errors = dev->stats.rx_errors; +- stats->rx_dropped = dev->stats.rx_dropped; +- + stats->tx_dropped = dev->stats.tx_dropped; + } + +@@ -1704,8 +1709,14 @@ static u32 mvneta_txq_desc_csum(int l3_o + static void mvneta_rx_error(struct mvneta_port *pp, + struct mvneta_rx_desc *rx_desc) + { ++ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + u32 status = rx_desc->status; + ++ /* update per-cpu counter */ ++ u64_stats_update_begin(&stats->syncp); ++ stats->rx_errors++; ++ u64_stats_update_end(&stats->syncp); ++ + if (!mvneta_rxq_desc_is_first_last(status)) { + netdev_err(pp->dev, + "bad rx status %08x (buffer oversize), size=%d\n", +@@ -1960,7 +1971,6 @@ static int mvneta_rx_swbm(struct napi_st + (rx_status & MVNETA_RXD_ERR_SUMMARY)) { + mvneta_rx_error(pp, rx_desc); + err_drop_frame: +- dev->stats.rx_errors++; + /* leave the descriptor untouched */ + continue; + } +@@ -2085,7 +2095,6 @@ err_drop_frame_ret_pool: + mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, + rx_desc->buf_phys_addr); + err_drop_frame: +- dev->stats.rx_errors++; + mvneta_rx_error(pp, rx_desc); + /* leave the descriptor untouched */ + continue; diff --git a/patches.suse/net-systemport-Avoid-RBUF-stuck-in-Wake-on-LAN-mode.patch b/patches.suse/net-systemport-Avoid-RBUF-stuck-in-Wake-on-LAN-mode.patch new file mode 100644 index 0000000..2dca84e --- /dev/null +++ b/patches.suse/net-systemport-Avoid-RBUF-stuck-in-Wake-on-LAN-mode.patch @@ -0,0 +1,35 @@ +From: Florian Fainelli +Date: Wed, 5 Feb 2020 12:32:04 -0800 +Subject: net: systemport: Avoid RBUF stuck in Wake-on-LAN mode +Git-commit: 263a425a482fc495d6d3f9a29b9103a664c38b69 +Patch-mainline: 5.6-rc1 +References: networking-stable-20_02_09 + +After a number of suspend and resume cycles, it is possible for the RBUF +to be stuck in Wake-on-LAN mode, despite the MPD enable bit being +cleared which instructed the RBUF to exit that mode. + +Avoid creating that problematic condition by clearing the RX_EN and +TX_EN bits in the UniMAC prior to disable the Magic Packet Detector +logic which is guaranteed to make the RBUF exit Wake-on-LAN mode. + +Fixes: 83e82f4c706b ("net: systemport: add Wake-on-LAN support") +Signed-off-by: Florian Fainelli +Signed-off-by: David S. Miller +Signed-off-by: Jiri Slaby +--- + drivers/net/ethernet/broadcom/bcmsysport.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/net/ethernet/broadcom/bcmsysport.c ++++ b/drivers/net/ethernet/broadcom/bcmsysport.c +@@ -2246,6 +2246,9 @@ static int bcm_sysport_resume(struct dev + + umac_reset(priv); + ++ /* Disable the UniMAC RX/TX */ ++ umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); ++ + /* We may have been suspended and never received a WOL event that + * would turn off MPD detection, take care of that now + */ diff --git a/patches.suse/net_sched-fix-a-resource-leak-in-tcindex_set_parms.patch b/patches.suse/net_sched-fix-a-resource-leak-in-tcindex_set_parms.patch new file mode 100644 index 0000000..104628b --- /dev/null +++ b/patches.suse/net_sched-fix-a-resource-leak-in-tcindex_set_parms.patch @@ -0,0 +1,45 @@ +From: Cong Wang +Date: Tue, 4 Feb 2020 11:10:12 -0800 +Subject: net_sched: fix a resource leak in tcindex_set_parms() +Git-commit: 52b5ae501c045010aeeb1d5ac0373ff161a88291 +Patch-mainline: 5.6-rc1 +References: networking-stable-20_02_09 + +Jakub noticed there is a potential resource leak in +tcindex_set_parms(): when tcindex_filter_result_init() fails +and it jumps to 'errout1' which doesn't release the memory +and resources allocated by tcindex_alloc_perfect_hash(). + +We should just jump to 'errout_alloc' which calls +tcindex_free_perfect_hash(). + +Fixes: b9a24bb76bf6 ("net_sched: properly handle failure case of tcf_exts_init()") +Reported-by: Jakub Kicinski +Cc: Jamal Hadi Salim +Cc: Jiri Pirko +Signed-off-by: Cong Wang +Signed-off-by: David S. Miller +Signed-off-by: Jiri Slaby +--- + net/sched/cls_tcindex.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +--- a/net/sched/cls_tcindex.c ++++ b/net/sched/cls_tcindex.c +@@ -337,7 +337,7 @@ tcindex_set_parms(struct net *net, struc + + err = tcindex_filter_result_init(&new_filter_result); + if (err < 0) +- goto errout1; ++ goto errout_alloc; + if (old_r) + cr = r->res; + +@@ -456,7 +456,6 @@ errout_alloc: + tcindex_free_perfect_hash(cp); + else if (balloc == 2) + kfree(cp->h); +-errout1: + tcf_exts_destroy(&new_filter_result.exts); + errout: + kfree(cp); diff --git a/patches.suse/powerpc-fix-hardware-PMU-exception-bug-on-PowerVM-co.patch b/patches.suse/powerpc-fix-hardware-PMU-exception-bug-on-PowerVM-co.patch new file mode 100644 index 0000000..38e4fdd --- /dev/null +++ b/patches.suse/powerpc-fix-hardware-PMU-exception-bug-on-PowerVM-co.patch @@ -0,0 +1,50 @@ +From fc37a1632d40c80c067eb1bc235139f5867a2667 Mon Sep 17 00:00:00 2001 +From: "Desnes A. Nunes do Rosario" +Date: Thu, 27 Feb 2020 10:47:15 -0300 +Subject: [PATCH] powerpc: fix hardware PMU exception bug on PowerVM + compatibility mode systems + +References: bsc#1056686 +Patch-mainline: v5.6-rc5 +Git-commit: fc37a1632d40c80c067eb1bc235139f5867a2667 + +PowerVM systems running compatibility mode on a few Power8 revisions are +still vulnerable to the hardware defect that loses PMU exceptions arriving +prior to a context switch. + +The software fix for this issue is enabled through the CPU_FTR_PMAO_BUG +cpu_feature bit, nevertheless this bit also needs to be set for PowerVM +compatibility mode systems. + +Fixes: 68f2f0d431d9ea4 ("powerpc: Add a cpu feature CPU_FTR_PMAO_BUG") +Signed-off-by: Desnes A. Nunes do Rosario +Reviewed-by: Leonardo Bras +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/20200227134715.9715-1-desnesn@linux.ibm.com +Acked-by: Michal Suchanek +--- + arch/powerpc/kernel/cputable.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c +index e745abc5457a..245be4fafe13 100644 +--- a/arch/powerpc/kernel/cputable.c ++++ b/arch/powerpc/kernel/cputable.c +@@ -2193,11 +2193,13 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset, + * oprofile_cpu_type already has a value, then we are + * possibly overriding a real PVR with a logical one, + * and, in that case, keep the current value for +- * oprofile_cpu_type. ++ * oprofile_cpu_type. Futhermore, let's ensure that the ++ * fix for the PMAO bug is enabled on compatibility mode. + */ + if (old.oprofile_cpu_type != NULL) { + t->oprofile_cpu_type = old.oprofile_cpu_type; + t->oprofile_type = old.oprofile_type; ++ t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG; + } + } + +-- +2.23.0 + diff --git a/patches.suse/tracing-blktrace-Fix-to-allow-setting-same-value.patch b/patches.suse/tracing-blktrace-Fix-to-allow-setting-same-value.patch index 7a2462c..2c6f171 100644 --- a/patches.suse/tracing-blktrace-Fix-to-allow-setting-same-value.patch +++ b/patches.suse/tracing-blktrace-Fix-to-allow-setting-same-value.patch @@ -49,7 +49,7 @@ diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index b951aa1fac61..96457ad8d720 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c -@@ -1841,6 +1841,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, +@@ -1730,6 +1730,10 @@ static ssize_t sysfs_blk_trace_attr_stor mutex_lock(&q->blk_trace_mutex); if (attr == &dev_attr_enable) { diff --git a/series.conf b/series.conf index 223c6e9..0bdea43 100644 --- a/series.conf +++ b/series.conf @@ -12671,6 +12671,7 @@ patches.suse/blk-mq-don-t-allocate-driver-tag-upfront-for-flush-r.patch patches.suse/0001-nbd-wait-uninterruptible-for-the-dead-timeout.patch patches.suse/0001-nbd-don-t-start-req-until-after-the-dead-connection-.patch + patches.suse/blktrace-fix-unlocked-access-to-init-start-stop-tear.patch patches.suse/nvme-move-the-dying-queue-check-from-cancel-to-compl.patch patches.suse/nvme-always-unregister-the-integrity-profile-in-__nv.patch patches.suse/nvme-don-t-pass-struct-nvme_ns-to-nvme_init_integrit.patch @@ -18142,6 +18143,7 @@ patches.suse/0035-bcache-Fix-building-error-on-MIPS.patch patches.suse/0036-bcache-recover-data-from-backing-when-data-is-clean.patch patches.suse/0037-bcache-check-return-value-of-register_shrinker.patch + patches.suse/blktrace-fix-trace-mutex-deadlock.patch patches.suse/nvme-rdma-don-t-suppress-send-completions.patch patches.suse/nvme-rdma-don-t-complete-requests-before-a-send-work.patch patches.suse/nvme-rdma-wait-for-local-invalidation-before-complet.patch @@ -52294,7 +52296,9 @@ patches.suse/cifs-add-support-for-fallocate-mode-0-for-non-sparse-files.patch patches.suse/cifs-use-PTR_ERR_OR_ZERO-to-simplify-code.patch patches.suse/CIFS-Fix-task-struct-use-after-free-on-reconnect.patch + patches.suse/0001-crypto-pcrypt-Fix-user-after-free-on-module-unload.patch patches.suse/crypto-pcrypt-Do-not-clear-MAY_SLEEP-flag-in-origina.patch + patches.suse/0001-padata-always-acquire-cpu_hotplug_lock-before-pinst-.patch patches.suse/crypto-af_alg-Use-bh_lock_sock-in-sk_destruct.patch patches.suse/crypto-api-Check-spawn-alg-under-lock-in-crypto_drop.patch patches.suse/crypto-api-Fix-race-condition-in-crypto_spawn_alg.patch @@ -52535,7 +52539,13 @@ patches.suse/drm-amd-display-Retrain-dongles-when-SINK_COUNT-beco.patch patches.suse/0001-drm-amd-dm-mst-Ignore-payload-update-failures.patch patches.suse/soc-tegra-fuse-Correct-straps-address-for-older-Tegr.patch + patches.suse/net_sched-fix-a-resource-leak-in-tcindex_set_parms.patch patches.suse/devlink-report-0-after-hitting-end-in-region-read.patch + patches.suse/bonding-alb-properly-access-headers-in-bond_alb_xmit.patch + patches.suse/net-macb-Remove-unnecessary-alignment-check-for-TSO.patch + patches.suse/net-macb-Limit-maximum-GEM-TX-length-in-TSO.patch + patches.suse/net-mvneta-move-rx_dropped-and-rx_errors-in-per-cpu-.patch + patches.suse/net-systemport-Avoid-RBUF-stuck-in-Wake-on-LAN-mode.patch patches.suse/net-mlx5-IPsec-Fix-esp-modify-function-attribute.patch patches.suse/net-mlx5-IPsec-fix-memory-leak-at-mlx5_fpga_ipsec_de.patch patches.suse/mwifiex-fix-unbalanced-locking-in-mwifiex_process_co.patch @@ -52637,6 +52647,7 @@ patches.suse/drm-i915-gvt-fix-orphan-vgpu-dmabuf_objs-lifetime patches.suse/acpi-watchdog-allow-disabling-wdat-at-boot.patch patches.suse/acpi-watchdog-set-default-timeout-in-probe.patch + patches.suse/blktrace-Protect-q-blk_trace-with-RCU.patch patches.suse/kvm-vmx-check-descriptor-table-exits-on-instruction-emulation patches.suse/cifs-don-t-leak-EAGAIN-for-stat-during-reconnect.patch patches.suse/cifs-fix-potential-mismatch-of-UNC-paths.patch @@ -52659,7 +52670,9 @@ patches.suse/ASoC-topology-Fix-memleak-in-soc_tplg_link_elems_loa.patch patches.suse/ASoC-pcm512x-Fix-unbalanced-regulator-enable-call-in.patch patches.suse/ASoC-dapm-Correct-DAPM-handling-of-active-widgets-du.patch + patches.suse/powerpc-fix-hardware-PMU-exception-bug-on-PowerVM-co.patch patches.suse/s390-pci-fix-unexpected-write-combine-on-resource + patches.suse/blktrace-fix-dereference-after-null-check.patch patches.suse/vt-selection-push-console-lock-down.patch patches.suse/vt-selection-push-sel_lock-up.patch patches.suse/firmware-imx-scu-ensure-sequential-tx.patch @@ -52736,6 +52749,8 @@ # powerpc/linux next patches.suse/powerpc-drmem-avoid-NULL-pointer-dereference-when-dr.patch patches.suse/powerpc-smp-Use-nid-as-fallback-for-package_id.patch + patches.suse/cpufreq-powernv-Fix-use-after-free.patch + patches.suse/cpufreq-powernv-Fix-unsafe-notifiers.patch # dhowells/linux-fs keys-uefi patches.suse/0001-KEYS-Allow-unrestricted-boot-time-addition-of-keys-t.patch