summaryrefslogtreecommitdiff
authorHanjie Lin <hanjie.lin@amlogic.com>2019-08-12 11:35:42 (GMT)
committer Jianxin Pan <jianxin.pan@amlogic.com>2019-09-18 06:17:53 (GMT)
commit8663100e9d5986889b7d5939832ba03d21e9667d (patch)
tree679ec6f45d2e2e7aea437a04cae7f8c73764e4e4
parent63f5c4559ecd693f230ef490254decb9ca5e514d (diff)
downloadcommon-8663100e9d5986889b7d5939832ba03d21e9667d.zip
common-8663100e9d5986889b7d5939832ba03d21e9667d.tar.gz
common-8663100e9d5986889b7d5939832ba03d21e9667d.tar.bz2
RAVENPLAT-2379:OSS vulnerability found in [boot.img]:[linux_kernel] (CVE-2018-5390) Risk:[] [1/1]
PD#OTT-5669 [Problem] Linux kernel versions 4.9+ can be forced to make very expensive calls to tcp_collapse_ofo_queue() and tcp_prune_ofo_queue() for every incoming packet which can lead to a denial of service. [Solution] Juha-Matti Tilli reported that malicious peers could inject tiny packets in out_of_order_queue, forcing very expensive calls to tcp_collapse_ofo_queue() and tcp_prune_ofo_queue() for every incoming packet. With tcp_rmem[2] default of 6MB, the ooo queue could contain ~7000 nodes. This patch series makes sure we cut cpu cycles enough to render the attack not critical. We might in the future go further, like disconnecting or black-holing proven malicious flows. [Test] Change-Id: I09c72cd11a38516f3b6e293deb21c5dd0faa3d9e Signed-off-by: Hanjie Lin <hanjie.lin@amlogic.com>
Diffstat
-rw-r--r--net/ipv4/tcp_input.c28
1 files changed, 24 insertions, 4 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f384e0c..c980f7b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4357,6 +4357,23 @@ static bool tcp_try_coalesce(struct sock *sk,
return true;
}
+static bool tcp_ooo_try_coalesce(struct sock *sk,
+ struct sk_buff *to,
+ struct sk_buff *from,
+ bool *fragstolen)
+{
+ bool res = tcp_try_coalesce(sk, to, from, fragstolen);
+
+ /* In case tcp_drop() is called later, update to->gso_segs */
+ if (res) {
+ u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
+ max_t(u16, 1, skb_shinfo(from)->gso_segs);
+
+ skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
+ }
+ return res;
+}
+
static void tcp_drop(struct sock *sk, struct sk_buff *skb)
{
sk_drops_add(sk, skb);
@@ -4480,7 +4497,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
/* In the typical case, we are adding an skb to the end of the list.
* Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
*/
- if (tcp_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) {
+ if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) {
coalesce_done:
tcp_grow_window(sk, skb);
kfree_skb_partial(skb, fragstolen);
@@ -4508,7 +4525,7 @@ coalesce_done:
/* All the bits are present. Drop. */
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPOFOMERGE);
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
skb = NULL;
tcp_dsack_set(sk, seq, end_seq);
goto add_sack;
@@ -4527,10 +4544,10 @@ coalesce_done:
TCP_SKB_CB(skb1)->end_seq);
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPOFOMERGE);
- __kfree_skb(skb1);
+ tcp_drop(sk, skb1);
goto merge_right;
}
- } else if (tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
+ } else if (tcp_ooo_try_coalesce(sk, skb1, skb, &fragstolen)) {
goto coalesce_done;
}
p = &parent->rb_right;
@@ -5032,6 +5049,9 @@ static int tcp_prune_queue(struct sock *sk)
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
return 0;
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
+ return 0;
+
tcp_collapse_ofo_queue(sk);
if (!skb_queue_empty(&sk->sk_receive_queue))
tcp_collapse(sk, &sk->sk_receive_queue, NULL,