xref: /linux-6.15/tools/include/uapi/linux/bpf.h (revision 83a37b32)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #ifndef _UAPI__LINUX_BPF_H__
8 #define _UAPI__LINUX_BPF_H__
9 
10 #include <linux/types.h>
11 #include <linux/bpf_common.h>
12 
13 /* Extended instruction set based on top of classic BPF */
14 
15 /* instruction classes */
16 #define BPF_ALU64	0x07	/* alu mode in double word width */
17 
18 /* ld/ldx fields */
19 #define BPF_DW		0x18	/* double word */
20 #define BPF_XADD	0xc0	/* exclusive add */
21 
22 /* alu/jmp fields */
23 #define BPF_MOV		0xb0	/* mov reg to reg */
24 #define BPF_ARSH	0xc0	/* sign extending arithmetic shift right */
25 
26 /* change endianness of a register */
27 #define BPF_END		0xd0	/* flags for endianness conversion: */
28 #define BPF_TO_LE	0x00	/* convert to little-endian */
29 #define BPF_TO_BE	0x08	/* convert to big-endian */
30 #define BPF_FROM_LE	BPF_TO_LE
31 #define BPF_FROM_BE	BPF_TO_BE
32 
33 /* jmp encodings */
34 #define BPF_JNE		0x50	/* jump != */
35 #define BPF_JLT		0xa0	/* LT is unsigned, '<' */
36 #define BPF_JLE		0xb0	/* LE is unsigned, '<=' */
37 #define BPF_JSGT	0x60	/* SGT is signed '>', GT in x86 */
38 #define BPF_JSGE	0x70	/* SGE is signed '>=', GE in x86 */
39 #define BPF_JSLT	0xc0	/* SLT is signed, '<' */
40 #define BPF_JSLE	0xd0	/* SLE is signed, '<=' */
41 #define BPF_CALL	0x80	/* function call */
42 #define BPF_EXIT	0x90	/* function return */
43 
44 /* Register numbers */
45 enum {
46 	BPF_REG_0 = 0,
47 	BPF_REG_1,
48 	BPF_REG_2,
49 	BPF_REG_3,
50 	BPF_REG_4,
51 	BPF_REG_5,
52 	BPF_REG_6,
53 	BPF_REG_7,
54 	BPF_REG_8,
55 	BPF_REG_9,
56 	BPF_REG_10,
57 	__MAX_BPF_REG,
58 };
59 
60 /* BPF has 10 general purpose 64-bit registers and stack frame. */
61 #define MAX_BPF_REG	__MAX_BPF_REG
62 
63 struct bpf_insn {
64 	__u8	code;		/* opcode */
65 	__u8	dst_reg:4;	/* dest register */
66 	__u8	src_reg:4;	/* source register */
67 	__s16	off;		/* signed offset */
68 	__s32	imm;		/* signed immediate constant */
69 };
70 
71 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
72 struct bpf_lpm_trie_key {
73 	__u32	prefixlen;	/* up to 32 for AF_INET, 128 for AF_INET6 */
74 	__u8	data[0];	/* Arbitrary size */
75 };
76 
77 /* BPF syscall commands, see bpf(2) man-page for details. */
78 enum bpf_cmd {
79 	BPF_MAP_CREATE,
80 	BPF_MAP_LOOKUP_ELEM,
81 	BPF_MAP_UPDATE_ELEM,
82 	BPF_MAP_DELETE_ELEM,
83 	BPF_MAP_GET_NEXT_KEY,
84 	BPF_PROG_LOAD,
85 	BPF_OBJ_PIN,
86 	BPF_OBJ_GET,
87 	BPF_PROG_ATTACH,
88 	BPF_PROG_DETACH,
89 	BPF_PROG_TEST_RUN,
90 	BPF_PROG_GET_NEXT_ID,
91 	BPF_MAP_GET_NEXT_ID,
92 	BPF_PROG_GET_FD_BY_ID,
93 	BPF_MAP_GET_FD_BY_ID,
94 	BPF_OBJ_GET_INFO_BY_FD,
95 	BPF_PROG_QUERY,
96 };
97 
98 enum bpf_map_type {
99 	BPF_MAP_TYPE_UNSPEC,
100 	BPF_MAP_TYPE_HASH,
101 	BPF_MAP_TYPE_ARRAY,
102 	BPF_MAP_TYPE_PROG_ARRAY,
103 	BPF_MAP_TYPE_PERF_EVENT_ARRAY,
104 	BPF_MAP_TYPE_PERCPU_HASH,
105 	BPF_MAP_TYPE_PERCPU_ARRAY,
106 	BPF_MAP_TYPE_STACK_TRACE,
107 	BPF_MAP_TYPE_CGROUP_ARRAY,
108 	BPF_MAP_TYPE_LRU_HASH,
109 	BPF_MAP_TYPE_LRU_PERCPU_HASH,
110 	BPF_MAP_TYPE_LPM_TRIE,
111 	BPF_MAP_TYPE_ARRAY_OF_MAPS,
112 	BPF_MAP_TYPE_HASH_OF_MAPS,
113 	BPF_MAP_TYPE_DEVMAP,
114 	BPF_MAP_TYPE_SOCKMAP,
115 	BPF_MAP_TYPE_CPUMAP,
116 };
117 
118 enum bpf_prog_type {
119 	BPF_PROG_TYPE_UNSPEC,
120 	BPF_PROG_TYPE_SOCKET_FILTER,
121 	BPF_PROG_TYPE_KPROBE,
122 	BPF_PROG_TYPE_SCHED_CLS,
123 	BPF_PROG_TYPE_SCHED_ACT,
124 	BPF_PROG_TYPE_TRACEPOINT,
125 	BPF_PROG_TYPE_XDP,
126 	BPF_PROG_TYPE_PERF_EVENT,
127 	BPF_PROG_TYPE_CGROUP_SKB,
128 	BPF_PROG_TYPE_CGROUP_SOCK,
129 	BPF_PROG_TYPE_LWT_IN,
130 	BPF_PROG_TYPE_LWT_OUT,
131 	BPF_PROG_TYPE_LWT_XMIT,
132 	BPF_PROG_TYPE_SOCK_OPS,
133 	BPF_PROG_TYPE_SK_SKB,
134 };
135 
136 enum bpf_attach_type {
137 	BPF_CGROUP_INET_INGRESS,
138 	BPF_CGROUP_INET_EGRESS,
139 	BPF_CGROUP_INET_SOCK_CREATE,
140 	BPF_CGROUP_SOCK_OPS,
141 	BPF_SK_SKB_STREAM_PARSER,
142 	BPF_SK_SKB_STREAM_VERDICT,
143 	__MAX_BPF_ATTACH_TYPE
144 };
145 
146 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
147 
148 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
149  *
150  * NONE(default): No further bpf programs allowed in the subtree.
151  *
152  * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
153  * the program in this cgroup yields to sub-cgroup program.
154  *
155  * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
156  * that cgroup program gets run in addition to the program in this cgroup.
157  *
158  * Only one program is allowed to be attached to a cgroup with
159  * NONE or BPF_F_ALLOW_OVERRIDE flag.
160  * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
161  * release old program and attach the new one. Attach flags has to match.
162  *
163  * Multiple programs are allowed to be attached to a cgroup with
164  * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
165  * (those that were attached first, run first)
166  * The programs of sub-cgroup are executed first, then programs of
167  * this cgroup and then programs of parent cgroup.
168  * When children program makes decision (like picking TCP CA or sock bind)
169  * parent program has a chance to override it.
170  *
171  * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
172  * A cgroup with NONE doesn't allow any programs in sub-cgroups.
173  * Ex1:
174  * cgrp1 (MULTI progs A, B) ->
175  *    cgrp2 (OVERRIDE prog C) ->
176  *      cgrp3 (MULTI prog D) ->
177  *        cgrp4 (OVERRIDE prog E) ->
178  *          cgrp5 (NONE prog F)
179  * the event in cgrp5 triggers execution of F,D,A,B in that order.
180  * if prog F is detached, the execution is E,D,A,B
181  * if prog F and D are detached, the execution is E,A,B
182  * if prog F, E and D are detached, the execution is C,A,B
183  *
184  * All eligible programs are executed regardless of return code from
185  * earlier programs.
186  */
187 #define BPF_F_ALLOW_OVERRIDE	(1U << 0)
188 #define BPF_F_ALLOW_MULTI	(1U << 1)
189 
190 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
191  * verifier will perform strict alignment checking as if the kernel
192  * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
193  * and NET_IP_ALIGN defined to 2.
194  */
195 #define BPF_F_STRICT_ALIGNMENT	(1U << 0)
196 
197 #define BPF_PSEUDO_MAP_FD	1
198 
199 /* flags for BPF_MAP_UPDATE_ELEM command */
200 #define BPF_ANY		0 /* create new element or update existing */
201 #define BPF_NOEXIST	1 /* create new element if it didn't exist */
202 #define BPF_EXIST	2 /* update existing element */
203 
204 /* flags for BPF_MAP_CREATE command */
205 #define BPF_F_NO_PREALLOC	(1U << 0)
206 /* Instead of having one common LRU list in the
207  * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
208  * which can scale and perform better.
209  * Note, the LRU nodes (including free nodes) cannot be moved
210  * across different LRU lists.
211  */
212 #define BPF_F_NO_COMMON_LRU	(1U << 1)
213 /* Specify numa node during map creation */
214 #define BPF_F_NUMA_NODE		(1U << 2)
215 
216 /* flags for BPF_PROG_QUERY */
217 #define BPF_F_QUERY_EFFECTIVE	(1U << 0)
218 
219 #define BPF_OBJ_NAME_LEN 16U
220 
221 union bpf_attr {
222 	struct { /* anonymous struct used by BPF_MAP_CREATE command */
223 		__u32	map_type;	/* one of enum bpf_map_type */
224 		__u32	key_size;	/* size of key in bytes */
225 		__u32	value_size;	/* size of value in bytes */
226 		__u32	max_entries;	/* max number of entries in a map */
227 		__u32	map_flags;	/* BPF_MAP_CREATE related
228 					 * flags defined above.
229 					 */
230 		__u32	inner_map_fd;	/* fd pointing to the inner map */
231 		__u32	numa_node;	/* numa node (effective only if
232 					 * BPF_F_NUMA_NODE is set).
233 					 */
234 		char	map_name[BPF_OBJ_NAME_LEN];
235 	};
236 
237 	struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
238 		__u32		map_fd;
239 		__aligned_u64	key;
240 		union {
241 			__aligned_u64 value;
242 			__aligned_u64 next_key;
243 		};
244 		__u64		flags;
245 	};
246 
247 	struct { /* anonymous struct used by BPF_PROG_LOAD command */
248 		__u32		prog_type;	/* one of enum bpf_prog_type */
249 		__u32		insn_cnt;
250 		__aligned_u64	insns;
251 		__aligned_u64	license;
252 		__u32		log_level;	/* verbosity level of verifier */
253 		__u32		log_size;	/* size of user buffer */
254 		__aligned_u64	log_buf;	/* user supplied buffer */
255 		__u32		kern_version;	/* checked when prog_type=kprobe */
256 		__u32		prog_flags;
257 		char		prog_name[BPF_OBJ_NAME_LEN];
258 	};
259 
260 	struct { /* anonymous struct used by BPF_OBJ_* commands */
261 		__aligned_u64	pathname;
262 		__u32		bpf_fd;
263 	};
264 
265 	struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
266 		__u32		target_fd;	/* container object to attach to */
267 		__u32		attach_bpf_fd;	/* eBPF program to attach */
268 		__u32		attach_type;
269 		__u32		attach_flags;
270 	};
271 
272 	struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
273 		__u32		prog_fd;
274 		__u32		retval;
275 		__u32		data_size_in;
276 		__u32		data_size_out;
277 		__aligned_u64	data_in;
278 		__aligned_u64	data_out;
279 		__u32		repeat;
280 		__u32		duration;
281 	} test;
282 
283 	struct { /* anonymous struct used by BPF_*_GET_*_ID */
284 		union {
285 			__u32		start_id;
286 			__u32		prog_id;
287 			__u32		map_id;
288 		};
289 		__u32		next_id;
290 	};
291 
292 	struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
293 		__u32		bpf_fd;
294 		__u32		info_len;
295 		__aligned_u64	info;
296 	} info;
297 
298 	struct { /* anonymous struct used by BPF_PROG_QUERY command */
299 		__u32		target_fd;	/* container object to query */
300 		__u32		attach_type;
301 		__u32		query_flags;
302 		__u32		attach_flags;
303 		__aligned_u64	prog_ids;
304 		__u32		prog_cnt;
305 	} query;
306 } __attribute__((aligned(8)));
307 
308 /* BPF helper function descriptions:
309  *
310  * void *bpf_map_lookup_elem(&map, &key)
311  *     Return: Map value or NULL
312  *
313  * int bpf_map_update_elem(&map, &key, &value, flags)
314  *     Return: 0 on success or negative error
315  *
316  * int bpf_map_delete_elem(&map, &key)
317  *     Return: 0 on success or negative error
318  *
319  * int bpf_probe_read(void *dst, int size, void *src)
320  *     Return: 0 on success or negative error
321  *
322  * u64 bpf_ktime_get_ns(void)
323  *     Return: current ktime
324  *
325  * int bpf_trace_printk(const char *fmt, int fmt_size, ...)
326  *     Return: length of buffer written or negative error
327  *
328  * u32 bpf_prandom_u32(void)
329  *     Return: random value
330  *
331  * u32 bpf_raw_smp_processor_id(void)
332  *     Return: SMP processor ID
333  *
334  * int bpf_skb_store_bytes(skb, offset, from, len, flags)
335  *     store bytes into packet
336  *     @skb: pointer to skb
337  *     @offset: offset within packet from skb->mac_header
338  *     @from: pointer where to copy bytes from
339  *     @len: number of bytes to store into packet
340  *     @flags: bit 0 - if true, recompute skb->csum
341  *             other bits - reserved
342  *     Return: 0 on success or negative error
343  *
344  * int bpf_l3_csum_replace(skb, offset, from, to, flags)
345  *     recompute IP checksum
346  *     @skb: pointer to skb
347  *     @offset: offset within packet where IP checksum is located
348  *     @from: old value of header field
349  *     @to: new value of header field
350  *     @flags: bits 0-3 - size of header field
351  *             other bits - reserved
352  *     Return: 0 on success or negative error
353  *
354  * int bpf_l4_csum_replace(skb, offset, from, to, flags)
355  *     recompute TCP/UDP checksum
356  *     @skb: pointer to skb
357  *     @offset: offset within packet where TCP/UDP checksum is located
358  *     @from: old value of header field
359  *     @to: new value of header field
360  *     @flags: bits 0-3 - size of header field
361  *             bit 4 - is pseudo header
362  *             other bits - reserved
363  *     Return: 0 on success or negative error
364  *
365  * int bpf_tail_call(ctx, prog_array_map, index)
366  *     jump into another BPF program
367  *     @ctx: context pointer passed to next program
368  *     @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
369  *     @index: index inside array that selects specific program to run
370  *     Return: 0 on success or negative error
371  *
372  * int bpf_clone_redirect(skb, ifindex, flags)
373  *     redirect to another netdev
374  *     @skb: pointer to skb
375  *     @ifindex: ifindex of the net device
376  *     @flags: bit 0 - if set, redirect to ingress instead of egress
377  *             other bits - reserved
378  *     Return: 0 on success or negative error
379  *
380  * u64 bpf_get_current_pid_tgid(void)
381  *     Return: current->tgid << 32 | current->pid
382  *
383  * u64 bpf_get_current_uid_gid(void)
384  *     Return: current_gid << 32 | current_uid
385  *
386  * int bpf_get_current_comm(char *buf, int size_of_buf)
387  *     stores current->comm into buf
388  *     Return: 0 on success or negative error
389  *
390  * u32 bpf_get_cgroup_classid(skb)
391  *     retrieve a proc's classid
392  *     @skb: pointer to skb
393  *     Return: classid if != 0
394  *
395  * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci)
396  *     Return: 0 on success or negative error
397  *
398  * int bpf_skb_vlan_pop(skb)
399  *     Return: 0 on success or negative error
400  *
401  * int bpf_skb_get_tunnel_key(skb, key, size, flags)
402  * int bpf_skb_set_tunnel_key(skb, key, size, flags)
403  *     retrieve or populate tunnel metadata
404  *     @skb: pointer to skb
405  *     @key: pointer to 'struct bpf_tunnel_key'
406  *     @size: size of 'struct bpf_tunnel_key'
407  *     @flags: room for future extensions
408  *     Return: 0 on success or negative error
409  *
410  * u64 bpf_perf_event_read(map, flags)
411  *     read perf event counter value
412  *     @map: pointer to perf_event_array map
413  *     @flags: index of event in the map or bitmask flags
414  *     Return: value of perf event counter read or error code
415  *
416  * int bpf_redirect(ifindex, flags)
417  *     redirect to another netdev
418  *     @ifindex: ifindex of the net device
419  *     @flags:
420  *	  cls_bpf:
421  *          bit 0 - if set, redirect to ingress instead of egress
422  *          other bits - reserved
423  *	  xdp_bpf:
424  *	    all bits - reserved
425  *     Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error
426  *	       xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error
427  * int bpf_redirect_map(map, key, flags)
428  *     redirect to endpoint in map
429  *     @map: pointer to dev map
430  *     @key: index in map to lookup
431  *     @flags: --
432  *     Return: XDP_REDIRECT on success or XDP_ABORT on error
433  *
434  * u32 bpf_get_route_realm(skb)
435  *     retrieve a dst's tclassid
436  *     @skb: pointer to skb
437  *     Return: realm if != 0
438  *
439  * int bpf_perf_event_output(ctx, map, flags, data, size)
440  *     output perf raw sample
441  *     @ctx: struct pt_regs*
442  *     @map: pointer to perf_event_array map
443  *     @flags: index of event in the map or bitmask flags
444  *     @data: data on stack to be output as raw data
445  *     @size: size of data
446  *     Return: 0 on success or negative error
447  *
448  * int bpf_get_stackid(ctx, map, flags)
449  *     walk user or kernel stack and return id
450  *     @ctx: struct pt_regs*
451  *     @map: pointer to stack_trace map
452  *     @flags: bits 0-7 - numer of stack frames to skip
453  *             bit 8 - collect user stack instead of kernel
454  *             bit 9 - compare stacks by hash only
455  *             bit 10 - if two different stacks hash into the same stackid
456  *                      discard old
457  *             other bits - reserved
458  *     Return: >= 0 stackid on success or negative error
459  *
460  * s64 bpf_csum_diff(from, from_size, to, to_size, seed)
461  *     calculate csum diff
462  *     @from: raw from buffer
463  *     @from_size: length of from buffer
464  *     @to: raw to buffer
465  *     @to_size: length of to buffer
466  *     @seed: optional seed
467  *     Return: csum result or negative error code
468  *
469  * int bpf_skb_get_tunnel_opt(skb, opt, size)
470  *     retrieve tunnel options metadata
471  *     @skb: pointer to skb
472  *     @opt: pointer to raw tunnel option data
473  *     @size: size of @opt
474  *     Return: option size
475  *
476  * int bpf_skb_set_tunnel_opt(skb, opt, size)
477  *     populate tunnel options metadata
478  *     @skb: pointer to skb
479  *     @opt: pointer to raw tunnel option data
480  *     @size: size of @opt
481  *     Return: 0 on success or negative error
482  *
483  * int bpf_skb_change_proto(skb, proto, flags)
484  *     Change protocol of the skb. Currently supported is v4 -> v6,
485  *     v6 -> v4 transitions. The helper will also resize the skb. eBPF
486  *     program is expected to fill the new headers via skb_store_bytes
487  *     and lX_csum_replace.
488  *     @skb: pointer to skb
489  *     @proto: new skb->protocol type
490  *     @flags: reserved
491  *     Return: 0 on success or negative error
492  *
493  * int bpf_skb_change_type(skb, type)
494  *     Change packet type of skb.
495  *     @skb: pointer to skb
496  *     @type: new skb->pkt_type type
497  *     Return: 0 on success or negative error
498  *
499  * int bpf_skb_under_cgroup(skb, map, index)
500  *     Check cgroup2 membership of skb
501  *     @skb: pointer to skb
502  *     @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
503  *     @index: index of the cgroup in the bpf_map
504  *     Return:
505  *       == 0 skb failed the cgroup2 descendant test
506  *       == 1 skb succeeded the cgroup2 descendant test
507  *        < 0 error
508  *
509  * u32 bpf_get_hash_recalc(skb)
510  *     Retrieve and possibly recalculate skb->hash.
511  *     @skb: pointer to skb
512  *     Return: hash
513  *
514  * u64 bpf_get_current_task(void)
515  *     Returns current task_struct
516  *     Return: current
517  *
518  * int bpf_probe_write_user(void *dst, void *src, int len)
519  *     safely attempt to write to a location
520  *     @dst: destination address in userspace
521  *     @src: source address on stack
522  *     @len: number of bytes to copy
523  *     Return: 0 on success or negative error
524  *
525  * int bpf_current_task_under_cgroup(map, index)
526  *     Check cgroup2 membership of current task
527  *     @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
528  *     @index: index of the cgroup in the bpf_map
529  *     Return:
530  *       == 0 current failed the cgroup2 descendant test
531  *       == 1 current succeeded the cgroup2 descendant test
532  *        < 0 error
533  *
534  * int bpf_skb_change_tail(skb, len, flags)
535  *     The helper will resize the skb to the given new size, to be used f.e.
536  *     with control messages.
537  *     @skb: pointer to skb
538  *     @len: new skb length
539  *     @flags: reserved
540  *     Return: 0 on success or negative error
541  *
542  * int bpf_skb_pull_data(skb, len)
543  *     The helper will pull in non-linear data in case the skb is non-linear
544  *     and not all of len are part of the linear section. Only needed for
545  *     read/write with direct packet access.
546  *     @skb: pointer to skb
547  *     @len: len to make read/writeable
548  *     Return: 0 on success or negative error
549  *
550  * s64 bpf_csum_update(skb, csum)
551  *     Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
552  *     @skb: pointer to skb
553  *     @csum: csum to add
554  *     Return: csum on success or negative error
555  *
556  * void bpf_set_hash_invalid(skb)
557  *     Invalidate current skb->hash.
558  *     @skb: pointer to skb
559  *
560  * int bpf_get_numa_node_id()
561  *     Return: Id of current NUMA node.
562  *
563  * int bpf_skb_change_head()
564  *     Grows headroom of skb and adjusts MAC header offset accordingly.
565  *     Will extends/reallocae as required automatically.
566  *     May change skb data pointer and will thus invalidate any check
567  *     performed for direct packet access.
568  *     @skb: pointer to skb
569  *     @len: length of header to be pushed in front
570  *     @flags: Flags (unused for now)
571  *     Return: 0 on success or negative error
572  *
573  * int bpf_xdp_adjust_head(xdp_md, delta)
574  *     Adjust the xdp_md.data by delta
575  *     @xdp_md: pointer to xdp_md
576  *     @delta: An positive/negative integer to be added to xdp_md.data
577  *     Return: 0 on success or negative on error
578  *
579  * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
580  *     Copy a NUL terminated string from unsafe address. In case the string
581  *     length is smaller than size, the target is not padded with further NUL
582  *     bytes. In case the string length is larger than size, just count-1
583  *     bytes are copied and the last byte is set to NUL.
584  *     @dst: destination address
585  *     @size: maximum number of bytes to copy, including the trailing NUL
586  *     @unsafe_ptr: unsafe address
587  *     Return:
588  *       > 0 length of the string including the trailing NUL on success
589  *       < 0 error
590  *
591  * u64 bpf_get_socket_cookie(skb)
592  *     Get the cookie for the socket stored inside sk_buff.
593  *     @skb: pointer to skb
594  *     Return: 8 Bytes non-decreasing number on success or 0 if the socket
595  *     field is missing inside sk_buff
596  *
597  * u32 bpf_get_socket_uid(skb)
598  *     Get the owner uid of the socket stored inside sk_buff.
599  *     @skb: pointer to skb
600  *     Return: uid of the socket owner on success or overflowuid if failed.
601  *
602  * u32 bpf_set_hash(skb, hash)
603  *     Set full skb->hash.
604  *     @skb: pointer to skb
605  *     @hash: hash to set
606  *
607  * int bpf_setsockopt(bpf_socket, level, optname, optval, optlen)
608  *     Calls setsockopt. Not all opts are available, only those with
609  *     integer optvals plus TCP_CONGESTION.
610  *     Supported levels: SOL_SOCKET and IPROTO_TCP
611  *     @bpf_socket: pointer to bpf_socket
612  *     @level: SOL_SOCKET or IPROTO_TCP
613  *     @optname: option name
614  *     @optval: pointer to option value
615  *     @optlen: length of optval in byes
616  *     Return: 0 or negative error
617  *
618  * int bpf_skb_adjust_room(skb, len_diff, mode, flags)
619  *     Grow or shrink room in sk_buff.
620  *     @skb: pointer to skb
621  *     @len_diff: (signed) amount of room to grow/shrink
622  *     @mode: operation mode (enum bpf_adj_room_mode)
623  *     @flags: reserved for future use
624  *     Return: 0 on success or negative error code
625  *
626  * int bpf_sk_redirect_map(map, key, flags)
627  *     Redirect skb to a sock in map using key as a lookup key for the
628  *     sock in map.
629  *     @map: pointer to sockmap
630  *     @key: key to lookup sock in map
631  *     @flags: reserved for future use
632  *     Return: SK_REDIRECT
633  *
634  * int bpf_sock_map_update(skops, map, key, flags)
635  *	@skops: pointer to bpf_sock_ops
636  *	@map: pointer to sockmap to update
637  *	@key: key to insert/update sock in map
638  *	@flags: same flags as map update elem
639  *
640  * int bpf_xdp_adjust_meta(xdp_md, delta)
641  *     Adjust the xdp_md.data_meta by delta
642  *     @xdp_md: pointer to xdp_md
643  *     @delta: An positive/negative integer to be added to xdp_md.data_meta
644  *     Return: 0 on success or negative on error
645  */
646 #define __BPF_FUNC_MAPPER(FN)		\
647 	FN(unspec),			\
648 	FN(map_lookup_elem),		\
649 	FN(map_update_elem),		\
650 	FN(map_delete_elem),		\
651 	FN(probe_read),			\
652 	FN(ktime_get_ns),		\
653 	FN(trace_printk),		\
654 	FN(get_prandom_u32),		\
655 	FN(get_smp_processor_id),	\
656 	FN(skb_store_bytes),		\
657 	FN(l3_csum_replace),		\
658 	FN(l4_csum_replace),		\
659 	FN(tail_call),			\
660 	FN(clone_redirect),		\
661 	FN(get_current_pid_tgid),	\
662 	FN(get_current_uid_gid),	\
663 	FN(get_current_comm),		\
664 	FN(get_cgroup_classid),		\
665 	FN(skb_vlan_push),		\
666 	FN(skb_vlan_pop),		\
667 	FN(skb_get_tunnel_key),		\
668 	FN(skb_set_tunnel_key),		\
669 	FN(perf_event_read),		\
670 	FN(redirect),			\
671 	FN(get_route_realm),		\
672 	FN(perf_event_output),		\
673 	FN(skb_load_bytes),		\
674 	FN(get_stackid),		\
675 	FN(csum_diff),			\
676 	FN(skb_get_tunnel_opt),		\
677 	FN(skb_set_tunnel_opt),		\
678 	FN(skb_change_proto),		\
679 	FN(skb_change_type),		\
680 	FN(skb_under_cgroup),		\
681 	FN(get_hash_recalc),		\
682 	FN(get_current_task),		\
683 	FN(probe_write_user),		\
684 	FN(current_task_under_cgroup),	\
685 	FN(skb_change_tail),		\
686 	FN(skb_pull_data),		\
687 	FN(csum_update),		\
688 	FN(set_hash_invalid),		\
689 	FN(get_numa_node_id),		\
690 	FN(skb_change_head),		\
691 	FN(xdp_adjust_head),		\
692 	FN(probe_read_str),		\
693 	FN(get_socket_cookie),		\
694 	FN(get_socket_uid),		\
695 	FN(set_hash),			\
696 	FN(setsockopt),			\
697 	FN(skb_adjust_room),		\
698 	FN(redirect_map),		\
699 	FN(sk_redirect_map),		\
700 	FN(sock_map_update),		\
701 	FN(xdp_adjust_meta),		\
702 	FN(perf_event_read_value),	\
703 	FN(perf_prog_read_value),
704 
705 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
706  * function eBPF program intends to call
707  */
708 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
709 enum bpf_func_id {
710 	__BPF_FUNC_MAPPER(__BPF_ENUM_FN)
711 	__BPF_FUNC_MAX_ID,
712 };
713 #undef __BPF_ENUM_FN
714 
715 /* All flags used by eBPF helper functions, placed here. */
716 
717 /* BPF_FUNC_skb_store_bytes flags. */
718 #define BPF_F_RECOMPUTE_CSUM		(1ULL << 0)
719 #define BPF_F_INVALIDATE_HASH		(1ULL << 1)
720 
721 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
722  * First 4 bits are for passing the header field size.
723  */
724 #define BPF_F_HDR_FIELD_MASK		0xfULL
725 
726 /* BPF_FUNC_l4_csum_replace flags. */
727 #define BPF_F_PSEUDO_HDR		(1ULL << 4)
728 #define BPF_F_MARK_MANGLED_0		(1ULL << 5)
729 #define BPF_F_MARK_ENFORCE		(1ULL << 6)
730 
731 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
732 #define BPF_F_INGRESS			(1ULL << 0)
733 
734 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
735 #define BPF_F_TUNINFO_IPV6		(1ULL << 0)
736 
737 /* BPF_FUNC_get_stackid flags. */
738 #define BPF_F_SKIP_FIELD_MASK		0xffULL
739 #define BPF_F_USER_STACK		(1ULL << 8)
740 #define BPF_F_FAST_STACK_CMP		(1ULL << 9)
741 #define BPF_F_REUSE_STACKID		(1ULL << 10)
742 
743 /* BPF_FUNC_skb_set_tunnel_key flags. */
744 #define BPF_F_ZERO_CSUM_TX		(1ULL << 1)
745 #define BPF_F_DONT_FRAGMENT		(1ULL << 2)
746 
747 /* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */
748 #define BPF_F_INDEX_MASK		0xffffffffULL
749 #define BPF_F_CURRENT_CPU		BPF_F_INDEX_MASK
750 /* BPF_FUNC_perf_event_output for sk_buff input context. */
751 #define BPF_F_CTXLEN_MASK		(0xfffffULL << 32)
752 
753 /* Mode for BPF_FUNC_skb_adjust_room helper. */
754 enum bpf_adj_room_mode {
755 	BPF_ADJ_ROOM_NET,
756 };
757 
758 /* user accessible mirror of in-kernel sk_buff.
759  * new fields can only be added to the end of this structure
760  */
761 struct __sk_buff {
762 	__u32 len;
763 	__u32 pkt_type;
764 	__u32 mark;
765 	__u32 queue_mapping;
766 	__u32 protocol;
767 	__u32 vlan_present;
768 	__u32 vlan_tci;
769 	__u32 vlan_proto;
770 	__u32 priority;
771 	__u32 ingress_ifindex;
772 	__u32 ifindex;
773 	__u32 tc_index;
774 	__u32 cb[5];
775 	__u32 hash;
776 	__u32 tc_classid;
777 	__u32 data;
778 	__u32 data_end;
779 	__u32 napi_id;
780 
781 	/* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
782 	__u32 family;
783 	__u32 remote_ip4;	/* Stored in network byte order */
784 	__u32 local_ip4;	/* Stored in network byte order */
785 	__u32 remote_ip6[4];	/* Stored in network byte order */
786 	__u32 local_ip6[4];	/* Stored in network byte order */
787 	__u32 remote_port;	/* Stored in network byte order */
788 	__u32 local_port;	/* stored in host byte order */
789 	/* ... here. */
790 
791 	__u32 data_meta;
792 };
793 
794 struct bpf_tunnel_key {
795 	__u32 tunnel_id;
796 	union {
797 		__u32 remote_ipv4;
798 		__u32 remote_ipv6[4];
799 	};
800 	__u8 tunnel_tos;
801 	__u8 tunnel_ttl;
802 	__u16 tunnel_ext;
803 	__u32 tunnel_label;
804 };
805 
806 /* Generic BPF return codes which all BPF program types may support.
807  * The values are binary compatible with their TC_ACT_* counter-part to
808  * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
809  * programs.
810  *
811  * XDP is handled seprately, see XDP_*.
812  */
813 enum bpf_ret_code {
814 	BPF_OK = 0,
815 	/* 1 reserved */
816 	BPF_DROP = 2,
817 	/* 3-6 reserved */
818 	BPF_REDIRECT = 7,
819 	/* >127 are reserved for prog type specific return codes */
820 };
821 
822 struct bpf_sock {
823 	__u32 bound_dev_if;
824 	__u32 family;
825 	__u32 type;
826 	__u32 protocol;
827 	__u32 mark;
828 	__u32 priority;
829 };
830 
831 #define XDP_PACKET_HEADROOM 256
832 
833 /* User return codes for XDP prog type.
834  * A valid XDP program must return one of these defined values. All other
835  * return codes are reserved for future use. Unknown return codes will
836  * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
837  */
838 enum xdp_action {
839 	XDP_ABORTED = 0,
840 	XDP_DROP,
841 	XDP_PASS,
842 	XDP_TX,
843 	XDP_REDIRECT,
844 };
845 
846 /* user accessible metadata for XDP packet hook
847  * new fields must be added to the end of this structure
848  */
849 struct xdp_md {
850 	__u32 data;
851 	__u32 data_end;
852 	__u32 data_meta;
853 };
854 
855 enum sk_action {
856 	SK_ABORTED = 0,
857 	SK_DROP,
858 	SK_REDIRECT,
859 };
860 
861 #define BPF_TAG_SIZE	8
862 
863 struct bpf_prog_info {
864 	__u32 type;
865 	__u32 id;
866 	__u8  tag[BPF_TAG_SIZE];
867 	__u32 jited_prog_len;
868 	__u32 xlated_prog_len;
869 	__aligned_u64 jited_prog_insns;
870 	__aligned_u64 xlated_prog_insns;
871 	__u64 load_time;	/* ns since boottime */
872 	__u32 created_by_uid;
873 	__u32 nr_map_ids;
874 	__aligned_u64 map_ids;
875 	char  name[BPF_OBJ_NAME_LEN];
876 } __attribute__((aligned(8)));
877 
878 struct bpf_map_info {
879 	__u32 type;
880 	__u32 id;
881 	__u32 key_size;
882 	__u32 value_size;
883 	__u32 max_entries;
884 	__u32 map_flags;
885 	char  name[BPF_OBJ_NAME_LEN];
886 } __attribute__((aligned(8)));
887 
888 /* User bpf_sock_ops struct to access socket values and specify request ops
889  * and their replies.
890  * Some of this fields are in network (bigendian) byte order and may need
891  * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
892  * New fields can only be added at the end of this structure
893  */
894 struct bpf_sock_ops {
895 	__u32 op;
896 	union {
897 		__u32 reply;
898 		__u32 replylong[4];
899 	};
900 	__u32 family;
901 	__u32 remote_ip4;	/* Stored in network byte order */
902 	__u32 local_ip4;	/* Stored in network byte order */
903 	__u32 remote_ip6[4];	/* Stored in network byte order */
904 	__u32 local_ip6[4];	/* Stored in network byte order */
905 	__u32 remote_port;	/* Stored in network byte order */
906 	__u32 local_port;	/* stored in host byte order */
907 };
908 
909 /* List of known BPF sock_ops operators.
910  * New entries can only be added at the end
911  */
912 enum {
913 	BPF_SOCK_OPS_VOID,
914 	BPF_SOCK_OPS_TIMEOUT_INIT,	/* Should return SYN-RTO value to use or
915 					 * -1 if default value should be used
916 					 */
917 	BPF_SOCK_OPS_RWND_INIT,		/* Should return initial advertized
918 					 * window (in packets) or -1 if default
919 					 * value should be used
920 					 */
921 	BPF_SOCK_OPS_TCP_CONNECT_CB,	/* Calls BPF program right before an
922 					 * active connection is initialized
923 					 */
924 	BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB,	/* Calls BPF program when an
925 						 * active connection is
926 						 * established
927 						 */
928 	BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB,	/* Calls BPF program when a
929 						 * passive connection is
930 						 * established
931 						 */
932 	BPF_SOCK_OPS_NEEDS_ECN,		/* If connection's congestion control
933 					 * needs ECN
934 					 */
935 };
936 
937 #define TCP_BPF_IW		1001	/* Set TCP initial congestion window */
938 #define TCP_BPF_SNDCWND_CLAMP	1002	/* Set sndcwnd_clamp */
939 
940 #endif /* _UAPI__LINUX_BPF_H__ */
941