2 # SPDX-License-Identifier: GPL-2.0
4 . "$(dirname "${0}")/mptcp_lib.sh"
7 rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
14 timeout_test=$((timeout_poll * 2 + 1))
21 echo "Usage: $0 [ -b ] [ -c ] [ -d ]"
22 echo -e "\t-b: bail out after first error, otherwise runs al testcases"
23 echo -e "\t-c: capture packets for each test using tcpdump (default: no capture)"
24 echo -e "\t-d: debug this script"
30 rm -f "$large" "$small"
34 for netns in "$ns1" "$ns2" "$ns3";do
41 ip -Version > /dev/null 2>&1
43 echo "SKIP: Could not run test without ip tool"
48 # ns1eth1 ns2eth1 ns2eth3 ns3eth1
60 size=$((2 * 2048 * 4096))
62 dd if=/dev/zero of=$small bs=4096 count=20 >/dev/null 2>&1
63 dd if=/dev/zero of=$large bs=4096 count=$((size / 4096)) >/dev/null 2>&1
67 for i in "$ns1" "$ns2" "$ns3";do
68 ip netns add $i || exit $ksft_skip
69 ip -net $i link set lo up
70 ip netns exec $i sysctl -q net.ipv4.conf.all.rp_filter=0
71 ip netns exec $i sysctl -q net.ipv4.conf.default.rp_filter=0
74 ip link add ns1eth1 netns "$ns1" type veth peer name ns2eth1 netns "$ns2"
75 ip link add ns1eth2 netns "$ns1" type veth peer name ns2eth2 netns "$ns2"
76 ip link add ns2eth3 netns "$ns2" type veth peer name ns3eth1 netns "$ns3"
78 ip -net "$ns1" addr add 10.0.1.1/24 dev ns1eth1
79 ip -net "$ns1" addr add dead:beef:1::1/64 dev ns1eth1 nodad
80 ip -net "$ns1" link set ns1eth1 up mtu 1500
81 ip -net "$ns1" route add default via 10.0.1.2
82 ip -net "$ns1" route add default via dead:beef:1::2
84 ip -net "$ns1" addr add 10.0.2.1/24 dev ns1eth2
85 ip -net "$ns1" addr add dead:beef:2::1/64 dev ns1eth2 nodad
86 ip -net "$ns1" link set ns1eth2 up mtu 1500
87 ip -net "$ns1" route add default via 10.0.2.2 metric 101
88 ip -net "$ns1" route add default via dead:beef:2::2 metric 101
90 ip netns exec "$ns1" ./pm_nl_ctl limits 1 1
91 ip netns exec "$ns1" ./pm_nl_ctl add 10.0.2.1 dev ns1eth2 flags subflow
93 ip -net "$ns2" addr add 10.0.1.2/24 dev ns2eth1
94 ip -net "$ns2" addr add dead:beef:1::2/64 dev ns2eth1 nodad
95 ip -net "$ns2" link set ns2eth1 up mtu 1500
97 ip -net "$ns2" addr add 10.0.2.2/24 dev ns2eth2
98 ip -net "$ns2" addr add dead:beef:2::2/64 dev ns2eth2 nodad
99 ip -net "$ns2" link set ns2eth2 up mtu 1500
101 ip -net "$ns2" addr add 10.0.3.2/24 dev ns2eth3
102 ip -net "$ns2" addr add dead:beef:3::2/64 dev ns2eth3 nodad
103 ip -net "$ns2" link set ns2eth3 up mtu 1500
104 ip netns exec "$ns2" sysctl -q net.ipv4.ip_forward=1
105 ip netns exec "$ns2" sysctl -q net.ipv6.conf.all.forwarding=1
107 ip -net "$ns3" addr add 10.0.3.3/24 dev ns3eth1
108 ip -net "$ns3" addr add dead:beef:3::3/64 dev ns3eth1 nodad
109 ip -net "$ns3" link set ns3eth1 up mtu 1500
110 ip -net "$ns3" route add default via 10.0.3.2
111 ip -net "$ns3" route add default via dead:beef:3::2
113 ip netns exec "$ns3" ./pm_nl_ctl limits 1 1
115 # debug build can slow down measurably the test program
116 # we use quite tight time limit on the run-time, to ensure
118 # Use kmemleak/lockdep/kasan/prove_locking presence as a rough
119 # estimate for this being a debug kernel and increase the
120 # maximum run-time accordingly. Observed run times for CI builds
121 # running selftests, including kbuild, were used to determine the
122 # amount of time to add.
123 grep -q ' kmemleak_init$\| lockdep_init$\| kasan_init$\| prove_locking$' /proc/kallsyms && slack=$((slack+550))
132 port=$((10000+$test_cnt))
133 test_cnt=$((test_cnt+1))
141 if [ -z $SUDO_USER ] ; then
144 capuser="-Z $SUDO_USER"
147 local capfile="${rndh}-${port}"
148 local capopt="-i any -s 65535 -B 32768 ${capuser}"
150 ip netns exec ${ns3} tcpdump ${capopt} -w "${capfile}-listener.pcap" >> "${capout}" 2>&1 &
151 local cappid_listener=$!
153 ip netns exec ${ns1} tcpdump ${capopt} -w "${capfile}-connector.pcap" >> "${capout}" 2>&1 &
154 local cappid_connector=$!
159 timeout ${timeout_test} \
160 ip netns exec ${ns3} \
161 ./mptcp_connect -jt ${timeout_poll} -l -p $port -T $max_time \
162 0.0.0.0 < "$sin" > "$sout" &
165 mptcp_lib_wait_local_port_listen "${ns3}" "${port}"
167 timeout ${timeout_test} \
168 ip netns exec ${ns1} \
169 ./mptcp_connect -jt ${timeout_poll} -p $port -T $max_time \
170 10.0.3.3 < "$cin" > "$cout" &
180 kill ${cappid_listener}
181 kill ${cappid_connector}
184 cmp $sin $cout > /dev/null 2>&1
186 cmp $cin $sout > /dev/null 2>&1
189 printf "%-16s" " max $max_time "
190 if [ $retc -eq 0 ] && [ $rets -eq 0 ] && \
191 [ $cmpc -eq 0 ] && [ $cmps -eq 0 ]; then
198 echo "client exit code $retc, server $rets" 1>&2
199 echo -e "\nnetns ${ns3} socket stat for $port:" 1>&2
200 ip netns exec ${ns3} ss -nita 1>&2 -o "sport = :$port"
201 echo -e "\nnetns ${ns1} socket stat for $port:" 1>&2
202 ip netns exec ${ns1} ss -nita 1>&2 -o "dport = :$port"
221 [ $delay1 -gt 0 ] && delay1="delay $delay1" || delay1=""
222 [ $delay2 -gt 0 ] && delay2="delay $delay2" || delay2=""
224 for dev in ns1eth1 ns1eth2; do
225 tc -n $ns1 qdisc del dev $dev root >/dev/null 2>&1
227 for dev in ns2eth1 ns2eth2; do
228 tc -n $ns2 qdisc del dev $dev root >/dev/null 2>&1
230 tc -n $ns1 qdisc add dev ns1eth1 root netem rate ${rate1}mbit $delay1
231 tc -n $ns1 qdisc add dev ns1eth2 root netem rate ${rate2}mbit $delay2
232 tc -n $ns2 qdisc add dev ns2eth1 root netem rate ${rate1}mbit $delay1
233 tc -n $ns2 qdisc add dev ns2eth2 root netem rate ${rate2}mbit $delay2
235 # time is measured in ms, account for transfer size, aggregated link speed
236 # and header overhead (10%)
237 # ms byte -> bit 10% mbit -> kbit -> bit 10%
238 local time=$((1000 * size * 8 * 10 / ((rate1 + rate2) * 1000 * 1000 * 9) ))
240 # mptcp_connect will do some sleeps to allow the mp_join handshake
241 # completion (see mptcp_connect): 200ms on each side, add some slack
242 time=$((time + 400 + slack))
244 printf "%-60s" "$msg"
245 do_transfer $small $large $time
247 mptcp_lib_result_code "${lret}" "${msg}"
248 if [ $lret -ne 0 ]; then
250 [ $bail -eq 0 ] || exit $ret
253 msg+=" - reverse direction"
254 printf "%-60s" "${msg}"
255 do_transfer $large $small $time
257 mptcp_lib_result_code "${lret}" "${msg}"
258 if [ $lret -ne 0 ]; then
260 [ $bail -eq 0 ] || exit $ret
264 while getopts "bcdh" option;do
287 run_test 10 10 0 0 "balanced bwidth"
288 run_test 10 10 1 25 "balanced bwidth with unbalanced delay"
290 # we still need some additional infrastructure to pass the following test-cases
291 run_test 10 3 0 0 "unbalanced bwidth"
292 run_test 10 3 1 25 "unbalanced bwidth with unbalanced delay"
293 run_test 10 3 25 1 "unbalanced bwidth with opposed, unbalanced delay"
295 mptcp_lib_result_print_all_tap