#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#define BPF_STRUCT_OPS(name, args...) \
-SEC("struct_ops/"#name) \
-BPF_PROG(name, args)
-
#define USEC_PER_SEC 1000000UL
#define TCP_PACING_SS_RATIO (200)
#define TCP_PACING_CA_RATIO (120)
return flag & FLAG_DATA_ACKED;
}
-void BPF_STRUCT_OPS(bpf_cubic_init, struct sock *sk)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_init, struct sock *sk)
{
cubictcp_init(sk);
}
-void BPF_STRUCT_OPS(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
{
cubictcp_cwnd_event(sk, event);
}
-void BPF_STRUCT_OPS(bpf_cubic_cong_control, struct sock *sk, __u32 ack, int flag,
- const struct rate_sample *rs)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_cong_control, struct sock *sk, __u32 ack, int flag,
+ const struct rate_sample *rs)
{
struct tcp_sock *tp = tcp_sk(sk);
tcp_update_pacing_rate(sk);
}
-__u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk)
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_cubic_recalc_ssthresh, struct sock *sk)
{
return cubictcp_recalc_ssthresh(sk);
}
-void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_state, struct sock *sk, __u8 new_state)
{
cubictcp_state(sk, new_state);
}
-void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
- const struct ack_sample *sample)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_acked, struct sock *sk, const struct ack_sample *sample)
{
cubictcp_acked(sk, sample);
}
-__u32 BPF_STRUCT_OPS(bpf_cubic_undo_cwnd, struct sock *sk)
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_cubic_undo_cwnd, struct sock *sk)
{
return tcp_reno_undo_cwnd(sk);
}
__u32 curr_rtt; /* the minimum rtt of current round */
};
-static inline void bictcp_reset(struct bictcp *ca)
+static void bictcp_reset(struct bictcp *ca)
{
ca->cnt = 0;
ca->last_max_cwnd = 0;
#define USEC_PER_SEC 1000000UL
#define USEC_PER_JIFFY (USEC_PER_SEC / HZ)
-static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor)
+static __u64 div64_u64(__u64 dividend, __u64 divisor)
{
return dividend / divisor;
}
#define div64_ul div64_u64
#define BITS_PER_U64 (sizeof(__u64) * 8)
-static __always_inline int fls64(__u64 x)
+static int fls64(__u64 x)
{
int num = BITS_PER_U64 - 1;
return num + 1;
}
-static __always_inline __u32 bictcp_clock_us(const struct sock *sk)
+static __u32 bictcp_clock_us(const struct sock *sk)
{
return tcp_sk(sk)->tcp_mstamp;
}
-static __always_inline void bictcp_hystart_reset(struct sock *sk)
+static void bictcp_hystart_reset(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
ca->sample_cnt = 0;
}
-/* "struct_ops/" prefix is a requirement */
-SEC("struct_ops/bpf_cubic_init")
+SEC("struct_ops")
void BPF_PROG(bpf_cubic_init, struct sock *sk)
{
struct bictcp *ca = inet_csk_ca(sk);
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
}
-/* "struct_ops" prefix is a requirement */
-SEC("struct_ops/bpf_cubic_cwnd_event")
+SEC("struct_ops")
void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
{
if (event == CA_EVENT_TX_START) {
* Newton-Raphson iteration.
* Avg err ~= 0.195%
*/
-static __always_inline __u32 cubic_root(__u64 a)
+static __u32 cubic_root(__u64 a)
{
__u32 x, b, shift;
/*
* Compute congestion window to use.
*/
-static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd,
- __u32 acked)
+static void bictcp_update(struct bictcp *ca, __u32 cwnd, __u32 acked)
{
__u32 delta, bic_target, max_cnt;
__u64 offs, t;
ca->cnt = max(ca->cnt, 2U);
}
-/* Or simply use the BPF_STRUCT_OPS to avoid the SEC boiler plate. */
-void BPF_STRUCT_OPS(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
tcp_cong_avoid_ai(tp, ca->cnt, acked);
}
-__u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk)
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_cubic_recalc_ssthresh, struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
}
-void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_state, struct sock *sk, __u8 new_state)
{
if (new_state == TCP_CA_Loss) {
bictcp_reset(inet_csk_ca(sk));
* We apply another 100% factor because @rate is doubled at this point.
* We cap the cushion to 1ms.
*/
-static __always_inline __u32 hystart_ack_delay(struct sock *sk)
+static __u32 hystart_ack_delay(struct sock *sk)
{
unsigned long rate;
div64_ul((__u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate));
}
-static __always_inline void hystart_update(struct sock *sk, __u32 delay)
+static void hystart_update(struct sock *sk, __u32 delay)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
int bpf_cubic_acked_called = 0;
-void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
- const struct ack_sample *sample)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_acked, struct sock *sk, const struct ack_sample *sample)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
-__u32 BPF_STRUCT_OPS(bpf_cubic_undo_cwnd, struct sock *sk)
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_cubic_undo_cwnd, struct sock *sk)
{
return tcp_reno_undo_cwnd(sk);
}
static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */
static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA;
-static __always_inline void dctcp_reset(const struct tcp_sock *tp,
- struct dctcp *ca)
+static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
{
ca->next_seq = tp->snd_nxt;
ca->old_delivered_ce = tp->delivered_ce;
}
-SEC("struct_ops/dctcp_init")
+SEC("struct_ops")
void BPF_PROG(dctcp_init, struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
dctcp_reset(tp, ca);
}
-SEC("struct_ops/dctcp_ssthresh")
+SEC("struct_ops")
__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
{
struct dctcp *ca = inet_csk_ca(sk);
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
}
-SEC("struct_ops/dctcp_update_alpha")
+SEC("struct_ops")
void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
{
const struct tcp_sock *tp = tcp_sk(sk);
}
}
-static __always_inline void dctcp_react_to_loss(struct sock *sk)
+static void dctcp_react_to_loss(struct sock *sk)
{
struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
}
-SEC("struct_ops/dctcp_state")
+SEC("struct_ops")
void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
{
if (new_state == TCP_CA_Recovery &&
*/
}
-static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
+static void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
{
struct tcp_sock *tp = tcp_sk(sk);
* S: 0 <- last pkt was non-CE
* 1 <- last pkt was CE
*/
-static __always_inline
-void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
- __u32 *prior_rcv_nxt, __u32 *ce_state)
+static void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
+ __u32 *prior_rcv_nxt, __u32 *ce_state)
{
__u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
dctcp_ece_ack_cwr(sk, new_ce_state);
}
-SEC("struct_ops/dctcp_cwnd_event")
+SEC("struct_ops")
void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
{
struct dctcp *ca = inet_csk_ca(sk);
}
}
-SEC("struct_ops/dctcp_cwnd_undo")
+SEC("struct_ops")
__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
{
const struct dctcp *ca = inet_csk_ca(sk);
extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
-SEC("struct_ops/dctcp_reno_cong_avoid")
+SEC("struct_ops")
void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
{
tcp_reno_cong_avoid(sk, ack, acked);
char _license[] SEC("license") = "GPL";
const char cubic[] = "cubic";
-void BPF_STRUCT_OPS(dctcp_nouse_release, struct sock *sk)
+SEC("struct_ops")
+void BPF_PROG(dctcp_nouse_release, struct sock *sk)
{
bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
(void *)cubic, sizeof(cubic));
char _license[] SEC("license") = "X";
-void BPF_STRUCT_OPS(nogpltcp_init, struct sock *sk)
+SEC("struct_ops")
+void BPF_PROG(nogpltcp_init, struct sock *sk)
{
}
char _license[] SEC("license") = "GPL";
-SEC("struct_ops/incompl_cong_ops_ssthresh")
+SEC("struct_ops")
__u32 BPF_PROG(incompl_cong_ops_ssthresh, struct sock *sk)
{
return tcp_sk(sk)->snd_ssthresh;
}
-SEC("struct_ops/incompl_cong_ops_undo_cwnd")
+SEC("struct_ops")
__u32 BPF_PROG(incompl_cong_ops_undo_cwnd, struct sock *sk)
{
return tcp_sk(sk)->snd_cwnd;
extern void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) __ksym;
extern void cubictcp_acked(struct sock *sk, const struct ack_sample *sample) __ksym;
-SEC("struct_ops/init")
+SEC("struct_ops")
void BPF_PROG(init, struct sock *sk)
{
bbr_init(sk);
cubictcp_init(sk);
}
-SEC("struct_ops/in_ack_event")
+SEC("struct_ops")
void BPF_PROG(in_ack_event, struct sock *sk, u32 flags)
{
dctcp_update_alpha(sk, flags);
}
-SEC("struct_ops/cong_control")
+SEC("struct_ops")
void BPF_PROG(cong_control, struct sock *sk, u32 ack, int flag, const struct rate_sample *rs)
{
bbr_main(sk, ack, flag, rs);
}
-SEC("struct_ops/cong_avoid")
+SEC("struct_ops")
void BPF_PROG(cong_avoid, struct sock *sk, u32 ack, u32 acked)
{
cubictcp_cong_avoid(sk, ack, acked);
}
-SEC("struct_ops/sndbuf_expand")
+SEC("struct_ops")
u32 BPF_PROG(sndbuf_expand, struct sock *sk)
{
return bbr_sndbuf_expand(sk);
}
-SEC("struct_ops/undo_cwnd")
+SEC("struct_ops")
u32 BPF_PROG(undo_cwnd, struct sock *sk)
{
bbr_undo_cwnd(sk);
return dctcp_cwnd_undo(sk);
}
-SEC("struct_ops/cwnd_event")
+SEC("struct_ops")
void BPF_PROG(cwnd_event, struct sock *sk, enum tcp_ca_event event)
{
bbr_cwnd_event(sk, event);
cubictcp_cwnd_event(sk, event);
}
-SEC("struct_ops/ssthresh")
+SEC("struct_ops")
u32 BPF_PROG(ssthresh, struct sock *sk)
{
bbr_ssthresh(sk);
return cubictcp_recalc_ssthresh(sk);
}
-SEC("struct_ops/min_tso_segs")
+SEC("struct_ops")
u32 BPF_PROG(min_tso_segs, struct sock *sk)
{
return bbr_min_tso_segs(sk);
}
-SEC("struct_ops/set_state")
+SEC("struct_ops")
void BPF_PROG(set_state, struct sock *sk, u8 new_state)
{
bbr_set_state(sk, new_state);
cubictcp_state(sk, new_state);
}
-SEC("struct_ops/pkts_acked")
+SEC("struct_ops")
void BPF_PROG(pkts_acked, struct sock *sk, const struct ack_sample *sample)
{
cubictcp_acked(sk, sample);
char _license[] SEC("license") = "GPL";
-SEC("struct_ops/unsupp_cong_op_get_info")
+SEC("struct_ops")
size_t BPF_PROG(unsupp_cong_op_get_info, struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info)
{
int ca1_cnt = 0;
int ca2_cnt = 0;
-SEC("struct_ops/ca_update_1_init")
+SEC("struct_ops")
void BPF_PROG(ca_update_1_init, struct sock *sk)
{
ca1_cnt++;
}
-SEC("struct_ops/ca_update_2_init")
+SEC("struct_ops")
void BPF_PROG(ca_update_2_init, struct sock *sk)
{
ca2_cnt++;
}
-SEC("struct_ops/ca_update_cong_control")
+SEC("struct_ops")
void BPF_PROG(ca_update_cong_control, struct sock *sk,
const struct rate_sample *rs)
{
}
-SEC("struct_ops/ca_update_ssthresh")
+SEC("struct_ops")
__u32 BPF_PROG(ca_update_ssthresh, struct sock *sk)
{
return tcp_sk(sk)->snd_ssthresh;
}
-SEC("struct_ops/ca_update_undo_cwnd")
+SEC("struct_ops")
__u32 BPF_PROG(ca_update_undo_cwnd, struct sock *sk)
{
return tcp_sk(sk)->snd_cwnd;
#define min(a, b) ((a) < (b) ? (a) : (b))
-static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
+static unsigned int tcp_left_out(const struct tcp_sock *tp)
{
return tp->sacked_out + tp->lost_out;
}
-static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
+static unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
{
return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
}
-SEC("struct_ops/write_sk_pacing_init")
+SEC("struct_ops")
void BPF_PROG(write_sk_pacing_init, struct sock *sk)
{
#ifdef ENABLE_ATOMICS_TESTS
#endif
}
-SEC("struct_ops/write_sk_pacing_cong_control")
+SEC("struct_ops")
void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk,
const struct rate_sample *rs)
{
tp->app_limited = (tp->delivered + tcp_packets_in_flight(tp)) ?: 1;
}
-SEC("struct_ops/write_sk_pacing_ssthresh")
+SEC("struct_ops")
__u32 BPF_PROG(write_sk_pacing_ssthresh, struct sock *sk)
{
return tcp_sk(sk)->snd_ssthresh;
}
-SEC("struct_ops/write_sk_pacing_undo_cwnd")
+SEC("struct_ops")
__u32 BPF_PROG(write_sk_pacing_undo_cwnd, struct sock *sk)
{
return tcp_sk(sk)->snd_cwnd;