2 * TCP Illinois congestion control.
4 * http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
6 * The algorithm is described in:
7 * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
8 * for High-Speed Networks"
9 * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf
11 * Implemented from description in paper and ns-2 simulation.
12 * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
15 #include <linux/module.h>
16 #include <linux/skbuff.h>
17 #include <linux/inet_diag.h>
18 #include <asm/div64.h>
22 #define ALPHA_SCALE (1u<<ALPHA_SHIFT)
23 #define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */
24 #define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */
25 #define ALPHA_BASE ALPHA_SCALE /* 1.0 */
26 #define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */
29 #define BETA_SCALE (1u<<BETA_SHIFT)
30 #define BETA_MIN (BETA_SCALE/8) /* 0.125 */
31 #define BETA_MAX (BETA_SCALE/2) /* 0.5 */
32 #define BETA_BASE BETA_MAX
34 static int win_thresh __read_mostly = 15;
35 module_param(win_thresh, int, 0);
36 MODULE_PARM_DESC(win_thresh, "Window threshold for starting adaptive sizing");
38 static int theta __read_mostly = 5;
39 module_param(theta, int, 0);
40 MODULE_PARM_DESC(theta, "# of fast RTT's before full growth");
42 /* TCP Illinois Parameters */
44 u64 sum_rtt; /* sum of rtt's measured within last rtt */
45 u16 cnt_rtt; /* # of rtts measured within last rtt */
46 u32 base_rtt; /* min of all rtt in usec */
47 u32 max_rtt; /* max of all rtt in usec */
48 u32 end_seq; /* right edge of current RTT */
49 u32 alpha; /* Additive increase */
50 u32 beta; /* Muliplicative decrease */
51 u16 acked; /* # packets acked by current ACK */
52 u8 rtt_above; /* average rtt has gone above threshold */
53 u8 rtt_low; /* # of rtts measurements below threshold */
56 static void rtt_reset(struct sock *sk)
58 struct tcp_sock *tp = tcp_sk(sk);
59 struct illinois *ca = inet_csk_ca(sk);
61 ca->end_seq = tp->snd_nxt;
65 /* TODO: age max_rtt? */
68 static void tcp_illinois_init(struct sock *sk)
70 struct illinois *ca = inet_csk_ca(sk);
72 ca->alpha = ALPHA_MAX;
74 ca->base_rtt = 0x7fffffff;
84 /* Measure RTT for each ack. */
85 static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt)
87 struct illinois *ca = inet_csk_ca(sk);
89 ca->acked = pkts_acked;
91 /* dup ack, no rtt sample */
95 /* ignore bogus values, this prevents wraparound in alpha math */
99 /* keep track of minimum RTT seen so far */
100 if (ca->base_rtt > rtt)
104 if (ca->max_rtt < rtt)
111 /* Maximum queuing delay */
112 static inline u32 max_delay(const struct illinois *ca)
114 return ca->max_rtt - ca->base_rtt;
117 /* Average queuing delay */
118 static inline u32 avg_delay(const struct illinois *ca)
122 do_div(t, ca->cnt_rtt);
123 return t - ca->base_rtt;
127 * Compute value of alpha used for additive increase.
128 * If small window then use 1.0, equivalent to Reno.
130 * For larger windows, adjust based on average delay.
131 * A. If average delay is at minimum (we are uncongested),
132 * then use large alpha (10.0) to increase faster.
133 * B. If average delay is at maximum (getting congested)
134 * then use small alpha (0.3)
136 * The result is a convex window growth curve.
138 static u32 alpha(struct illinois *ca, u32 da, u32 dm)
140 u32 d1 = dm / 100; /* Low threshold */
143 /* If never got out of low delay zone, then use max */
147 /* Wait for 5 good RTT's before allowing alpha to go alpha max.
148 * This prevents one good RTT from causing sudden window increase.
150 if (++ca->rtt_low < theta)
163 * (dm - d1) amin amax
164 * k1 = -------------------
168 * k2 = ---------------- - d1
178 return (dm * ALPHA_MAX) /
179 (dm + (da * (ALPHA_MAX - ALPHA_MIN)) / ALPHA_MIN);
183 * Beta used for multiplicative decrease.
184 * For small window sizes returns same value as Reno (0.5)
186 * If delay is small (10% of max) then beta = 1/8
187 * If delay is up to 80% of max then beta = 1/2
188 * In between is a linear function
190 static u32 beta(u32 da, u32 dm)
199 if (da >= d3 || d3 <= d2)
206 * k3 = -------------------
215 return (BETA_MIN * d3 - BETA_MAX * d2 + (BETA_MAX - BETA_MIN) * da)
219 /* Update alpha and beta values once per RTT */
220 static void update_params(struct sock *sk)
222 struct tcp_sock *tp = tcp_sk(sk);
223 struct illinois *ca = inet_csk_ca(sk);
225 if (tp->snd_cwnd < win_thresh) {
226 ca->alpha = ALPHA_BASE;
227 ca->beta = BETA_BASE;
228 } else if (ca->cnt_rtt > 0) {
229 u32 dm = max_delay(ca);
230 u32 da = avg_delay(ca);
232 ca->alpha = alpha(ca, da, dm);
233 ca->beta = beta(da, dm);
240 * In case of loss, reset to default values
242 static void tcp_illinois_state(struct sock *sk, u8 new_state)
244 struct illinois *ca = inet_csk_ca(sk);
246 if (new_state == TCP_CA_Loss) {
247 ca->alpha = ALPHA_BASE;
248 ca->beta = BETA_BASE;
256 * Increase window in response to successful acknowledgment.
258 static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
260 struct tcp_sock *tp = tcp_sk(sk);
261 struct illinois *ca = inet_csk_ca(sk);
263 if (after(ack, ca->end_seq))
266 /* RFC2861 only increase cwnd if fully utilized */
267 if (!tcp_is_cwnd_limited(sk))
271 if (tp->snd_cwnd <= tp->snd_ssthresh)
272 tcp_slow_start(tp, acked);
277 /* snd_cwnd_cnt is # of packets since last cwnd increment */
278 tp->snd_cwnd_cnt += ca->acked;
281 /* This is close approximation of:
282 * tp->snd_cwnd += alpha/tp->snd_cwnd
284 delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
285 if (delta >= tp->snd_cwnd) {
286 tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd,
287 (u32)tp->snd_cwnd_clamp);
288 tp->snd_cwnd_cnt = 0;
293 static u32 tcp_illinois_ssthresh(struct sock *sk)
295 struct tcp_sock *tp = tcp_sk(sk);
296 struct illinois *ca = inet_csk_ca(sk);
298 /* Multiplicative decrease */
299 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
302 /* Extract info for Tcp socket info provided via netlink. */
303 static void tcp_illinois_info(struct sock *sk, u32 ext,
306 const struct illinois *ca = inet_csk_ca(sk);
308 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
309 struct tcpvegas_info info = {
311 .tcpv_rttcnt = ca->cnt_rtt,
312 .tcpv_minrtt = ca->base_rtt,
315 if (info.tcpv_rttcnt > 0) {
318 do_div(t, info.tcpv_rttcnt);
321 nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
325 static struct tcp_congestion_ops tcp_illinois __read_mostly = {
326 .init = tcp_illinois_init,
327 .ssthresh = tcp_illinois_ssthresh,
328 .cong_avoid = tcp_illinois_cong_avoid,
329 .set_state = tcp_illinois_state,
330 .get_info = tcp_illinois_info,
331 .pkts_acked = tcp_illinois_acked,
333 .owner = THIS_MODULE,
337 static int __init tcp_illinois_register(void)
339 BUILD_BUG_ON(sizeof(struct illinois) > ICSK_CA_PRIV_SIZE);
340 return tcp_register_congestion_control(&tcp_illinois);
343 static void __exit tcp_illinois_unregister(void)
345 tcp_unregister_congestion_control(&tcp_illinois);
348 module_init(tcp_illinois_register);
349 module_exit(tcp_illinois_unregister);
351 MODULE_AUTHOR("Stephen Hemminger, Shao Liu");
352 MODULE_LICENSE("GPL");
353 MODULE_DESCRIPTION("TCP Illinois");
354 MODULE_VERSION("1.0");