diff --git a/patches.fixes/tcp-add-tcp_min_snd_mss-sysctl.patch b/patches.fixes/tcp-add-tcp_min_snd_mss-sysctl.patch new file mode 100644 index 0000000..1993376 --- /dev/null +++ b/patches.fixes/tcp-add-tcp_min_snd_mss-sysctl.patch @@ -0,0 +1,121 @@ +From: Eric Dumazet +Date: Thu, 6 Jun 2019 09:38:47 -0700 +Subject: tcp: add tcp_min_snd_mss sysctl +Patch-mainline: Not yet, embargo +References: bsc#1137586 CVE-2019-11479 + +Some TCP peers announce a very small MSS option in their SYN and/or +SYN/ACK messages. + +This forces the stack to send packets with a very high network/cpu +overhead. + +Linux has enforced a minimal value of 48. Since this value includes +the size of TCP options, and that the options can consume up to 40 +bytes, this means that each segment can include only 8 bytes of payload. + +In some cases, it can be useful to increase the minimal value +to a saner value. + +We still let the default to 48 (TCP_MIN_SND_MSS), for compatibility +reasons. + +Note that TCP_MAXSEG socket option enforces a minimal value +of (TCP_MIN_MSS). David Miller increased this minimal value +in commit c39508d6f118 ("tcp: Make TCP_MAXSEG minimum more correct.") +from 64 to 88. + +We might in the future merge TCP_MIN_SND_MSS and TCP_MIN_MSS. + +Signed-off-by: Eric Dumazet +Suggested-by: Jonathan Looney +Cc: Neal Cardwell +Cc: Yuchung Cheng +Cc: Tyler Hicks +Cc: Bruce Curtis +Acked-by: Michal Kubecek + +--- + Documentation/networking/ip-sysctl.txt | 8 ++++++++ + include/net/netns/ipv4.h | 1 + + net/ipv4/sysctl_net_ipv4.c | 11 +++++++++++ + net/ipv4/tcp_ipv4.c | 1 + + net/ipv4/tcp_output.c | 3 +-- + 5 files changed, 22 insertions(+), 2 deletions(-) + +--- a/Documentation/networking/ip-sysctl.txt ++++ b/Documentation/networking/ip-sysctl.txt +@@ -241,6 +241,14 @@ tcp_base_mss - INTEGER + Path MTU discovery (MTU probing). If MTU probing is enabled, + this is the initial MSS used by the connection. + ++tcp_min_snd_mss - INTEGER ++ TCP SYN and SYNACK messages usually advertise an ADVMSS option, ++ as described in RFC 1122 and RFC 6691. ++ If this ADVMSS option is smaller than tcp_min_snd_mss, ++ it is silently capped to tcp_min_snd_mss. ++ ++ Default : 48 (at least 8 bytes of payload per segment) ++ + tcp_congestion_control - STRING + Set the congestion control algorithm to be used for new + connections. The algorithm "reno" is always available, but +--- a/include/net/netns/ipv4.h ++++ b/include/net/netns/ipv4.h +@@ -105,6 +105,7 @@ struct netns_ipv4 { + #endif + int sysctl_tcp_mtu_probing; + int sysctl_tcp_base_mss; ++ int sysctl_tcp_min_snd_mss; + int sysctl_tcp_probe_threshold; + u32 sysctl_tcp_probe_interval; + +--- a/net/ipv4/sysctl_net_ipv4.c ++++ b/net/ipv4/sysctl_net_ipv4.c +@@ -36,6 +36,8 @@ static int ip_local_port_range_min[] = { 1, 1 }; + static int ip_local_port_range_max[] = { 65535, 65535 }; + static int tcp_adv_win_scale_min = -31; + static int tcp_adv_win_scale_max = 31; ++static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS; ++static int tcp_min_snd_mss_max = 65535; + static int ip_privileged_port_min; + static int ip_privileged_port_max = 65535; + static int ip_ttl_min = 1; +@@ -938,6 +940,15 @@ static struct ctl_table ipv4_net_table[] = { + .mode = 0644, + .proc_handler = proc_dointvec, + }, ++ { ++ .procname = "tcp_min_snd_mss", ++ .data = &init_net.ipv4.sysctl_tcp_min_snd_mss, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec_minmax, ++ .extra1 = &tcp_min_snd_mss_min, ++ .extra2 = &tcp_min_snd_mss_max, ++ }, + { + .procname = "tcp_probe_threshold", + .data = &init_net.ipv4.sysctl_tcp_probe_threshold, +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -2423,6 +2423,7 @@ static int __net_init tcp_sk_init(struct net *net) + net->ipv4.sysctl_tcp_ecn_fallback = 1; + + net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS; ++ net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS; + net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD; + net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; + +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1446,8 +1446,7 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) + mss_now -= icsk->icsk_ext_hdr_len; + + /* Then reserve room for full set of TCP options and 8 bytes of data */ +- if (mss_now < TCP_MIN_SND_MSS) +- mss_now = TCP_MIN_SND_MSS; ++ mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss); + return mss_now; + } + diff --git a/patches.fixes/tcp-enforce-tcp_min_snd_mss-in-tcp_mtu_probing.patch b/patches.fixes/tcp-enforce-tcp_min_snd_mss-in-tcp_mtu_probing.patch new file mode 100644 index 0000000..1a82b48 --- /dev/null +++ b/patches.fixes/tcp-enforce-tcp_min_snd_mss-in-tcp_mtu_probing.patch @@ -0,0 +1,34 @@ +From: Eric Dumazet +Date: Sat, 8 Jun 2019 10:38:08 -0700 +Subject: tcp: enforce tcp_min_snd_mss in tcp_mtu_probing() +Patch-mainline: Not yet, embargo +References: bsc#1137586 CVE-2019-11479 + +If mtu probing is enabled tcp_mtu_probing() could very well end up +with a too small MSS. + +Use the new sysctl tcp_min_snd_mss to make sure MSS search +is performed in an acceptable range. + +Signed-off-by: Eric Dumazet +Reported-by: Jonathan Lemon +Cc: Jonathan Looney +Cc: Neal Cardwell +Cc: Yuchung Cheng +Cc: Tyler Hicks +Cc: Bruce Curtis +Acked-by: Michal Kubecek +--- + net/ipv4/tcp_timer.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/net/ipv4/tcp_timer.c ++++ b/net/ipv4/tcp_timer.c +@@ -140,6 +140,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) + mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; + mss = min(net->ipv4.sysctl_tcp_base_mss, mss); + mss = max(mss, 68 - tp->tcp_header_len); ++ mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss); + icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); + tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); + } diff --git a/patches.fixes/tcp-fix-fack_count-accounting-on-tcp_shift_skb_data.patch b/patches.fixes/tcp-fix-fack_count-accounting-on-tcp_shift_skb_data.patch new file mode 100644 index 0000000..2ac8272 --- /dev/null +++ b/patches.fixes/tcp-fix-fack_count-accounting-on-tcp_shift_skb_data.patch @@ -0,0 +1,50 @@ +From: Joao Martins +Date: Mon, 10 Jun 2019 10:13:23 -0400 +Subject: tcp: fix fack_count accounting on tcp_shift_skb_data() +Patch-mainline: Not yet, embargo +References: CVE-2019-11477 bsc#1137586 + +v4.15 or since commit 737ff314563 ("tcp: use sequence distance to +detect reordering") had switched from the packet-based FACK tracking and +switched to sequence-based. + +v4.14 and older still have the old logic and hence on +tcp_skb_shift_data() needs to retain its original logic and have +@fack_count in sync. In other words, we keep the increment of pcount with +tcp_skb_pcount(skb) to later used that to update fack_count. To make it +more explicit we track the new skb that gets incremented to pcount in +@next_pcount, and we get to avoid the constant invocation of +tcp_skb_pcount(skb) all together. + +Reported-by: Alexey Kodanev +Signed-off-by: Joao Martins +Acked-by: Michal Kubecek +--- + net/ipv4/tcp_input.c | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -1419,6 +1419,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, + struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *prev; + int mss; ++ int next_pcount; + int pcount = 0; + int len; + int in_sack; +@@ -1535,9 +1536,11 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, + goto out; + + len = skb->len; +- pcount = tcp_skb_pcount(skb); +- if (tcp_skb_shift(prev, skb, pcount, len)) +- tcp_shifted_skb(sk, skb, state, pcount, len, mss, 0); ++ next_pcount = tcp_skb_pcount(skb); ++ if (tcp_skb_shift(prev, skb, next_pcount, len)) { ++ pcount += next_pcount; ++ tcp_shifted_skb(sk, skb, state, next_pcount, len, mss, 0); ++ } + + out: + state->fack_count += pcount; diff --git a/patches.fixes/tcp-limit-payload-size-of-sacked-skbs.patch b/patches.fixes/tcp-limit-payload-size-of-sacked-skbs.patch new file mode 100644 index 0000000..51e93d8 --- /dev/null +++ b/patches.fixes/tcp-limit-payload-size-of-sacked-skbs.patch @@ -0,0 +1,152 @@ +From: Eric Dumazet +Date: Thu, 6 Jun 2019 09:38:45 -0700 +Subject: tcp: limit payload size of sacked skbs +Patch-mainline: Not yet, embargo +References: bsc#1137586 CVE-2019-11477 + +Jonathan Looney reported that TCP can trigger the following crash +in tcp_shifted_skb() : + + BUG_ON(tcp_skb_pcount(skb) < pcount); + +This can happen if the remote peer has advertized the smallest +MSS that linux TCP accepts : 48 + +An skb can hold 17 fragments, and each fragment can hold 32KB +on x86, or 64KB on PowerPC. + +This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs +can overflow. + +Note that tcp_sendmsg() builds skbs with less than 64KB +of payload, so this problem needs SACK to be enabled. +SACK blocks allow TCP to coalesce multiple skbs in the retransmit +queue, thus filling the 17 fragments to maximal capacity. + +Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing") +Signed-off-by: Eric Dumazet +Reported-by: Jonathan Looney +Acked-by: Neal Cardwell +Reviewed-by: Tyler Hicks +Cc: Yuchung Cheng +Cc: Bruce Curtis +Acked-by: Michal Kubecek + +--- + include/linux/tcp.h | 4 ++++ + include/net/tcp.h | 2 ++ + net/ipv4/tcp.c | 1 + + net/ipv4/tcp_input.c | 26 ++++++++++++++++++++------ + net/ipv4/tcp_output.c | 6 +++--- + 5 files changed, 30 insertions(+), 9 deletions(-) + +--- a/include/linux/tcp.h ++++ b/include/linux/tcp.h +@@ -450,4 +450,8 @@ static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) + + return (user_mss && user_mss < mss) ? user_mss : mss; + } ++ ++int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount, ++ int shiftlen); ++ + #endif /* _LINUX_TCP_H */ +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -53,6 +53,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); + + #define MAX_TCP_HEADER (128 + MAX_HEADER) + #define MAX_TCP_OPTION_SPACE 40 ++#define TCP_MIN_SND_MSS 48 ++#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE) + + /* + * Never offer a window over 32767 without using window scaling. Some +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -3297,6 +3297,7 @@ void __init tcp_init(void) + unsigned long limit; + unsigned int i; + ++ BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); + BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > + FIELD_SIZEOF(struct sk_buff, cb)); + +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -1326,7 +1326,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, + TCP_SKB_CB(skb)->seq += shifted; + + tcp_skb_pcount_add(prev, pcount); +- BUG_ON(tcp_skb_pcount(skb) < pcount); ++ WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount); + tcp_skb_pcount_add(skb, -pcount); + + /* When we're adding to gso_segs == 1, gso_size will be zero, +@@ -1393,6 +1393,21 @@ static int skb_can_shift(const struct sk_buff *skb) + return !skb_headlen(skb) && skb_is_nonlinear(skb); + } + ++int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, ++ int pcount, int shiftlen) ++{ ++ /* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE) ++ * Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need ++ * to make sure not storing more than 65535 * 8 bytes per skb, ++ * even if current MSS is bigger. ++ */ ++ if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE)) ++ return 0; ++ if (unlikely(tcp_skb_pcount(to) + pcount > 65535)) ++ return 0; ++ return skb_shift(to, from, shiftlen); ++} ++ + /* Try collapsing SACK blocks spanning across multiple skbs to a single + * skb. + */ +@@ -1501,7 +1516,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, + if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) + goto fallback; + +- if (!skb_shift(prev, skb, len)) ++ if (!tcp_skb_shift(prev, skb, pcount, len)) + goto fallback; + if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) + goto out; +@@ -1520,10 +1535,9 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, + goto out; + + len = skb->len; +- if (skb_shift(prev, skb, len)) { +- pcount += tcp_skb_pcount(skb); +- tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); +- } ++ pcount = tcp_skb_pcount(skb); ++ if (tcp_skb_shift(prev, skb, pcount, len)) ++ tcp_shifted_skb(sk, skb, state, pcount, len, mss, 0); + + out: + state->fack_count += pcount; +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1443,8 +1443,8 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) + mss_now -= icsk->icsk_ext_hdr_len; + + /* Then reserve room for full set of TCP options and 8 bytes of data */ +- if (mss_now < 48) +- mss_now = 48; ++ if (mss_now < TCP_MIN_SND_MSS) ++ mss_now = TCP_MIN_SND_MSS; + return mss_now; + } + +@@ -2720,7 +2720,7 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) + if (next_skb_size <= skb_availroom(skb)) + skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size), + next_skb_size); +- else if (!skb_shift(skb, next_skb, next_skb_size)) ++ else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size)) + return false; + } + tcp_highest_sack_replace(sk, next_skb, skb); diff --git a/patches.fixes/tcp-tcp_fragment-should-apply-sane-memory-limits.patch b/patches.fixes/tcp-tcp_fragment-should-apply-sane-memory-limits.patch new file mode 100644 index 0000000..46fa950 --- /dev/null +++ b/patches.fixes/tcp-tcp_fragment-should-apply-sane-memory-limits.patch @@ -0,0 +1,68 @@ +From: Eric Dumazet +Date: Thu, 6 Jun 2019 09:38:46 -0700 +Subject: tcp: tcp_fragment() should apply sane memory limits +Patch-mainline: Not yet, embargo +References: bsc#1137586 CVE-2019-11478 + +Jonathan Looney reported that a malicious peer can force a sender +to fragment its retransmit queue into tiny skbs, inflating memory +usage and/or overflow 32bit counters. + +TCP allows an application to queue up to sk_sndbuf bytes, +so we need to give some allowance for non malicious splitting +of retransmit queue. + +A new SNMP counter is added to monitor how many times TCP +did not allow to split an skb if the allowance was exceeded. + +Note that this counter might increase in the case applications +use SO_SNDBUF socket option to lower sk_sndbuf. + +Signed-off-by: Eric Dumazet +Reported-by: Jonathan Looney +Acked-by: Neal Cardwell +Acked-by: Yuchung Cheng +Reviewed-by: Tyler Hicks +Cc: Bruce Curtis +Acked-by: Michal Kubecek + +--- + include/uapi/linux/snmp.h | 1 + + net/ipv4/proc.c | 1 + + net/ipv4/tcp_output.c | 5 +++++ + 3 files changed, 7 insertions(+) + +--- a/include/uapi/linux/snmp.h ++++ b/include/uapi/linux/snmp.h +@@ -275,6 +275,7 @@ enum + LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */ + LINUX_MIB_TCPMTUPFAIL, /* TCPMTUPFail */ + LINUX_MIB_TCPMTUPSUCCESS, /* TCPMTUPSuccess */ ++ LINUX_MIB_TCPWQUEUETOOBIG, /* TCPWqueueTooBig */ + __LINUX_MIB_MAX + }; + +--- a/net/ipv4/proc.c ++++ b/net/ipv4/proc.c +@@ -297,6 +297,7 @@ static const struct snmp_mib snmp4_net_list[] = { + SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE), + SNMP_MIB_ITEM("TCPMTUPFail", LINUX_MIB_TCPMTUPFAIL), + SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS), ++ SNMP_MIB_ITEM("TCPWqueueTooBig", LINUX_MIB_TCPWQUEUETOOBIG), + SNMP_MIB_SENTINEL + }; + +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1273,6 +1273,11 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, + if (nsize < 0) + nsize = 0; + ++ if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf)) { ++ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); ++ return -ENOMEM; ++ } ++ + if (skb_unclone(skb, gfp)) + return -ENOMEM; + diff --git a/patches.kabi/kabi-drop-LINUX_MIB_TCPWQUEUETOOBIG-snmp-counter.patch b/patches.kabi/kabi-drop-LINUX_MIB_TCPWQUEUETOOBIG-snmp-counter.patch new file mode 100644 index 0000000..781b664 --- /dev/null +++ b/patches.kabi/kabi-drop-LINUX_MIB_TCPWQUEUETOOBIG-snmp-counter.patch @@ -0,0 +1,52 @@ +From: Michal Kubecek +Date: Fri, 7 Jun 2019 18:05:46 +0200 +Subject: kabi: drop LINUX_MIB_TCPWQUEUETOOBIG snmp counter +Patch-mainline: Never, kabi workaround +References: bsc#1137586 CVE-2019-11478 + +patches.fixes/tcp-tcp_fragment-should-apply-sane-memory-limits.patch adds +LINUX_MIB_TCPWQUEUETOOBIG snmp attribute which breaks kABI. As it is only +a diagnostic aid and is not essential for the actual security fix, drop +the snmp counter and leave only the check. + +Signed-off-by: Michal Kubecek +--- + include/uapi/linux/snmp.h | 1 - + net/ipv4/proc.c | 1 - + net/ipv4/tcp_output.c | 4 +--- + 3 files changed, 1 insertion(+), 5 deletions(-) + +--- a/include/uapi/linux/snmp.h ++++ b/include/uapi/linux/snmp.h +@@ -275,7 +275,6 @@ enum + LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */ + LINUX_MIB_TCPMTUPFAIL, /* TCPMTUPFail */ + LINUX_MIB_TCPMTUPSUCCESS, /* TCPMTUPSuccess */ +- LINUX_MIB_TCPWQUEUETOOBIG, /* TCPWqueueTooBig */ + __LINUX_MIB_MAX + }; + +--- a/net/ipv4/proc.c ++++ b/net/ipv4/proc.c +@@ -297,7 +297,6 @@ static const struct snmp_mib snmp4_net_list[] = { + SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE), + SNMP_MIB_ITEM("TCPMTUPFail", LINUX_MIB_TCPMTUPFAIL), + SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS), +- SNMP_MIB_ITEM("TCPWqueueTooBig", LINUX_MIB_TCPWQUEUETOOBIG), + SNMP_MIB_SENTINEL + }; + +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1273,10 +1273,8 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, + if (nsize < 0) + nsize = 0; + +- if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf)) { +- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); ++ if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf)) + return -ENOMEM; +- } + + if (skb_unclone(skb, gfp)) + return -ENOMEM; diff --git a/patches.kabi/kabi-handle-addition-of-ip6addrlbl_table-into-struct.patch b/patches.kabi/kabi-handle-addition-of-ip6addrlbl_table-into-struct.patch index e5fb626..f8db212 100644 --- a/patches.kabi/kabi-handle-addition-of-ip6addrlbl_table-into-struct.patch +++ b/patches.kabi/kabi-handle-addition-of-ip6addrlbl_table-into-struct.patch @@ -12,27 +12,25 @@ be allocated directly or embedded in other structure or array. Signed-off-by: Michal Kubecek --- - include/net/net_namespace.h | 7 +++++++ + include/net/net_namespace.h | 5 +++++ include/net/netns/ipv6.h | 5 ----- net/ipv6/addrlabel.c | 34 +++++++++++++++++----------------- - 3 files changed, 24 insertions(+), 22 deletions(-) + 3 files changed, 22 insertions(+), 22 deletions(-) --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h -@@ -149,6 +149,13 @@ struct net { - #endif +@@ -150,6 +150,11 @@ struct net { struct sock *diag_nlsk; atomic_t fnhe_genid; -+#ifndef __GENKSYMS__ + #ifndef __GENKSYMS__ + struct { + struct hlist_head head; + spinlock_t lock; + u32 seq; + } ip6addrlbl_table; -+#endif + int sysctl_tcp_min_snd_mss; + #endif }; - - #include --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -86,11 +86,6 @@ struct netns_ipv6 { diff --git a/patches.kabi/kabi-handle-addition-of-uevent_sock-into-struct-net.patch b/patches.kabi/kabi-handle-addition-of-uevent_sock-into-struct-net.patch index 5081b74..6ca00c0 100644 --- a/patches.kabi/kabi-handle-addition-of-uevent_sock-into-struct-net.patch +++ b/patches.kabi/kabi-handle-addition-of-uevent_sock-into-struct-net.patch @@ -30,6 +30,6 @@ Signed-off-by: Michal Kubecek u32 seq; } ip6addrlbl_table; + struct uevent_sock *uevent_sock; /* uevent socket */ + int sysctl_tcp_min_snd_mss; #endif }; - diff --git a/patches.kabi/kabi-move-sysctl_tcp_min_snd_mss-to-preserve-struct-.patch b/patches.kabi/kabi-move-sysctl_tcp_min_snd_mss-to-preserve-struct-.patch new file mode 100644 index 0000000..3d7ec92 --- /dev/null +++ b/patches.kabi/kabi-move-sysctl_tcp_min_snd_mss-to-preserve-struct-.patch @@ -0,0 +1,90 @@ +From: Michal Kubecek +Date: Sat, 8 Jun 2019 12:30:13 +0200 +Subject: kabi: move sysctl_tcp_min_snd_mss to preserve struct net layout +Patch-mainline: Never, kabi workaround +References: bsc#1137586 CVE-2019-11479 + +Patch patches.fixes/tcp-add-tcp_min_snd_mss-sysctl.patch adds new member +sysctl_tcp_min_snd_mss into struct netns_ipv4 which is embedded into struct +net so that the patch changes its layout in an incompatible way. Move it to +the end of struct net. + +This is safe as struct net is always allocated by in-tree helper and is +never embedded in another structure or used as an array element. + +Signed-off-by: Michal Kubecek +--- + include/net/net_namespace.h | 3 +++ + include/net/netns/ipv4.h | 1 - + net/ipv4/sysctl_net_ipv4.c | 2 +- + net/ipv4/tcp_ipv4.c | 2 +- + net/ipv4/tcp_output.c | 2 +- + net/ipv4/tcp_timer.c | 2 +- + 6 files changed, 7 insertions(+), 5 deletions(-) + +--- a/include/net/net_namespace.h ++++ b/include/net/net_namespace.h +@@ -149,6 +149,9 @@ struct net { + #endif + struct sock *diag_nlsk; + atomic_t fnhe_genid; ++#ifndef __GENKSYMS__ ++ int sysctl_tcp_min_snd_mss; ++#endif + }; + + #include +--- a/include/net/netns/ipv4.h ++++ b/include/net/netns/ipv4.h +@@ -105,7 +105,6 @@ struct netns_ipv4 { + #endif + int sysctl_tcp_mtu_probing; + int sysctl_tcp_base_mss; +- int sysctl_tcp_min_snd_mss; + int sysctl_tcp_probe_threshold; + u32 sysctl_tcp_probe_interval; + +--- a/net/ipv4/sysctl_net_ipv4.c ++++ b/net/ipv4/sysctl_net_ipv4.c +@@ -942,7 +942,7 @@ static struct ctl_table ipv4_net_table[] = { + }, + { + .procname = "tcp_min_snd_mss", +- .data = &init_net.ipv4.sysctl_tcp_min_snd_mss, ++ .data = &init_net.sysctl_tcp_min_snd_mss, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -2423,7 +2423,7 @@ static int __net_init tcp_sk_init(struct net *net) + net->ipv4.sysctl_tcp_ecn_fallback = 1; + + net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS; +- net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS; ++ net->sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS; + net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD; + net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; + +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1446,7 +1446,7 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) + mss_now -= icsk->icsk_ext_hdr_len; + + /* Then reserve room for full set of TCP options and 8 bytes of data */ +- mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss); ++ mss_now = max(mss_now, sock_net(sk)->sysctl_tcp_min_snd_mss); + return mss_now; + } + +--- a/net/ipv4/tcp_timer.c ++++ b/net/ipv4/tcp_timer.c +@@ -140,7 +140,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) + mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; + mss = min(net->ipv4.sysctl_tcp_base_mss, mss); + mss = max(mss, 68 - tp->tcp_header_len); +- mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss); ++ mss = max(mss, net->sysctl_tcp_min_snd_mss); + icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); + tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); + } diff --git a/series.conf b/series.conf index 2569178..d3182ce 100644 --- a/series.conf +++ b/series.conf @@ -22620,6 +22620,13 @@ patches.fixes/0001-mwifiex-Fix-possible-buffer-overflows-at-parsing-bss.patch patches.fixes/0001-mwifiex-Fix-heap-overflow-in-mwifiex_uap_parse_tail_.patch patches.fixes/0001-mwifiex-Abort-at-too-short-BSS-descriptor-element.patch + patches.fixes/tcp-limit-payload-size-of-sacked-skbs.patch + patches.fixes/tcp-tcp_fragment-should-apply-sane-memory-limits.patch + patches.kabi/kabi-drop-LINUX_MIB_TCPWQUEUETOOBIG-snmp-counter.patch + patches.fixes/tcp-add-tcp_min_snd_mss-sysctl.patch + patches.fixes/tcp-enforce-tcp_min_snd_mss-in-tcp_mtu_probing.patch + patches.kabi/kabi-move-sysctl_tcp_min_snd_mss-to-preserve-struct-.patch + patches.fixes/tcp-fix-fack_count-accounting-on-tcp_shift_skb_data.patch ######################################################## # end of sorted patches