xref: /linux-6.15/include/uapi/linux/bpf.h (revision 90a53e44)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #ifndef _UAPI__LINUX_BPF_H__
8 #define _UAPI__LINUX_BPF_H__
9 
10 #include <linux/types.h>
11 #include <linux/bpf_common.h>
12 
13 /* Extended instruction set based on top of classic BPF */
14 
15 /* instruction classes */
16 #define BPF_ALU64	0x07	/* alu mode in double word width */
17 
18 /* ld/ldx fields */
19 #define BPF_DW		0x18	/* double word */
20 #define BPF_XADD	0xc0	/* exclusive add */
21 
22 /* alu/jmp fields */
23 #define BPF_MOV		0xb0	/* mov reg to reg */
24 #define BPF_ARSH	0xc0	/* sign extending arithmetic shift right */
25 
26 /* change endianness of a register */
27 #define BPF_END		0xd0	/* flags for endianness conversion: */
28 #define BPF_TO_LE	0x00	/* convert to little-endian */
29 #define BPF_TO_BE	0x08	/* convert to big-endian */
30 #define BPF_FROM_LE	BPF_TO_LE
31 #define BPF_FROM_BE	BPF_TO_BE
32 
33 /* jmp encodings */
34 #define BPF_JNE		0x50	/* jump != */
35 #define BPF_JLT		0xa0	/* LT is unsigned, '<' */
36 #define BPF_JLE		0xb0	/* LE is unsigned, '<=' */
37 #define BPF_JSGT	0x60	/* SGT is signed '>', GT in x86 */
38 #define BPF_JSGE	0x70	/* SGE is signed '>=', GE in x86 */
39 #define BPF_JSLT	0xc0	/* SLT is signed, '<' */
40 #define BPF_JSLE	0xd0	/* SLE is signed, '<=' */
41 #define BPF_CALL	0x80	/* function call */
42 #define BPF_EXIT	0x90	/* function return */
43 
44 /* Register numbers */
45 enum {
46 	BPF_REG_0 = 0,
47 	BPF_REG_1,
48 	BPF_REG_2,
49 	BPF_REG_3,
50 	BPF_REG_4,
51 	BPF_REG_5,
52 	BPF_REG_6,
53 	BPF_REG_7,
54 	BPF_REG_8,
55 	BPF_REG_9,
56 	BPF_REG_10,
57 	__MAX_BPF_REG,
58 };
59 
60 /* BPF has 10 general purpose 64-bit registers and stack frame. */
61 #define MAX_BPF_REG	__MAX_BPF_REG
62 
63 struct bpf_insn {
64 	__u8	code;		/* opcode */
65 	__u8	dst_reg:4;	/* dest register */
66 	__u8	src_reg:4;	/* source register */
67 	__s16	off;		/* signed offset */
68 	__s32	imm;		/* signed immediate constant */
69 };
70 
71 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
72 struct bpf_lpm_trie_key {
73 	__u32	prefixlen;	/* up to 32 for AF_INET, 128 for AF_INET6 */
74 	__u8	data[0];	/* Arbitrary size */
75 };
76 
77 /* BPF syscall commands, see bpf(2) man-page for details. */
78 enum bpf_cmd {
79 	BPF_MAP_CREATE,
80 	BPF_MAP_LOOKUP_ELEM,
81 	BPF_MAP_UPDATE_ELEM,
82 	BPF_MAP_DELETE_ELEM,
83 	BPF_MAP_GET_NEXT_KEY,
84 	BPF_PROG_LOAD,
85 	BPF_OBJ_PIN,
86 	BPF_OBJ_GET,
87 	BPF_PROG_ATTACH,
88 	BPF_PROG_DETACH,
89 	BPF_PROG_TEST_RUN,
90 	BPF_PROG_GET_NEXT_ID,
91 	BPF_MAP_GET_NEXT_ID,
92 	BPF_PROG_GET_FD_BY_ID,
93 	BPF_MAP_GET_FD_BY_ID,
94 	BPF_OBJ_GET_INFO_BY_FD,
95 	BPF_PROG_QUERY,
96 };
97 
98 enum bpf_map_type {
99 	BPF_MAP_TYPE_UNSPEC,
100 	BPF_MAP_TYPE_HASH,
101 	BPF_MAP_TYPE_ARRAY,
102 	BPF_MAP_TYPE_PROG_ARRAY,
103 	BPF_MAP_TYPE_PERF_EVENT_ARRAY,
104 	BPF_MAP_TYPE_PERCPU_HASH,
105 	BPF_MAP_TYPE_PERCPU_ARRAY,
106 	BPF_MAP_TYPE_STACK_TRACE,
107 	BPF_MAP_TYPE_CGROUP_ARRAY,
108 	BPF_MAP_TYPE_LRU_HASH,
109 	BPF_MAP_TYPE_LRU_PERCPU_HASH,
110 	BPF_MAP_TYPE_LPM_TRIE,
111 	BPF_MAP_TYPE_ARRAY_OF_MAPS,
112 	BPF_MAP_TYPE_HASH_OF_MAPS,
113 	BPF_MAP_TYPE_DEVMAP,
114 	BPF_MAP_TYPE_SOCKMAP,
115 };
116 
117 enum bpf_prog_type {
118 	BPF_PROG_TYPE_UNSPEC,
119 	BPF_PROG_TYPE_SOCKET_FILTER,
120 	BPF_PROG_TYPE_KPROBE,
121 	BPF_PROG_TYPE_SCHED_CLS,
122 	BPF_PROG_TYPE_SCHED_ACT,
123 	BPF_PROG_TYPE_TRACEPOINT,
124 	BPF_PROG_TYPE_XDP,
125 	BPF_PROG_TYPE_PERF_EVENT,
126 	BPF_PROG_TYPE_CGROUP_SKB,
127 	BPF_PROG_TYPE_CGROUP_SOCK,
128 	BPF_PROG_TYPE_LWT_IN,
129 	BPF_PROG_TYPE_LWT_OUT,
130 	BPF_PROG_TYPE_LWT_XMIT,
131 	BPF_PROG_TYPE_SOCK_OPS,
132 	BPF_PROG_TYPE_SK_SKB,
133 };
134 
135 enum bpf_attach_type {
136 	BPF_CGROUP_INET_INGRESS,
137 	BPF_CGROUP_INET_EGRESS,
138 	BPF_CGROUP_INET_SOCK_CREATE,
139 	BPF_CGROUP_SOCK_OPS,
140 	BPF_SK_SKB_STREAM_PARSER,
141 	BPF_SK_SKB_STREAM_VERDICT,
142 	__MAX_BPF_ATTACH_TYPE
143 };
144 
145 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
146 
147 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
148  *
149  * NONE(default): No further bpf programs allowed in the subtree.
150  *
151  * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
152  * the program in this cgroup yields to sub-cgroup program.
153  *
154  * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
155  * that cgroup program gets run in addition to the program in this cgroup.
156  *
157  * Only one program is allowed to be attached to a cgroup with
158  * NONE or BPF_F_ALLOW_OVERRIDE flag.
159  * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
160  * release old program and attach the new one. Attach flags has to match.
161  *
162  * Multiple programs are allowed to be attached to a cgroup with
163  * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
164  * (those that were attached first, run first)
165  * The programs of sub-cgroup are executed first, then programs of
166  * this cgroup and then programs of parent cgroup.
167  * When children program makes decision (like picking TCP CA or sock bind)
168  * parent program has a chance to override it.
169  *
170  * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
171  * A cgroup with NONE doesn't allow any programs in sub-cgroups.
172  * Ex1:
173  * cgrp1 (MULTI progs A, B) ->
174  *    cgrp2 (OVERRIDE prog C) ->
175  *      cgrp3 (MULTI prog D) ->
176  *        cgrp4 (OVERRIDE prog E) ->
177  *          cgrp5 (NONE prog F)
178  * the event in cgrp5 triggers execution of F,D,A,B in that order.
179  * if prog F is detached, the execution is E,D,A,B
180  * if prog F and D are detached, the execution is E,A,B
181  * if prog F, E and D are detached, the execution is C,A,B
182  *
183  * All eligible programs are executed regardless of return code from
184  * earlier programs.
185  */
186 #define BPF_F_ALLOW_OVERRIDE	(1U << 0)
187 #define BPF_F_ALLOW_MULTI	(1U << 1)
188 
189 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
190  * verifier will perform strict alignment checking as if the kernel
191  * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
192  * and NET_IP_ALIGN defined to 2.
193  */
194 #define BPF_F_STRICT_ALIGNMENT	(1U << 0)
195 
196 #define BPF_PSEUDO_MAP_FD	1
197 
198 /* flags for BPF_MAP_UPDATE_ELEM command */
199 #define BPF_ANY		0 /* create new element or update existing */
200 #define BPF_NOEXIST	1 /* create new element if it didn't exist */
201 #define BPF_EXIST	2 /* update existing element */
202 
203 /* flags for BPF_MAP_CREATE command */
204 #define BPF_F_NO_PREALLOC	(1U << 0)
205 /* Instead of having one common LRU list in the
206  * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
207  * which can scale and perform better.
208  * Note, the LRU nodes (including free nodes) cannot be moved
209  * across different LRU lists.
210  */
211 #define BPF_F_NO_COMMON_LRU	(1U << 1)
212 /* Specify numa node during map creation */
213 #define BPF_F_NUMA_NODE		(1U << 2)
214 
215 /* flags for BPF_PROG_QUERY */
216 #define BPF_F_QUERY_EFFECTIVE	(1U << 0)
217 
218 #define BPF_OBJ_NAME_LEN 16U
219 
220 union bpf_attr {
221 	struct { /* anonymous struct used by BPF_MAP_CREATE command */
222 		__u32	map_type;	/* one of enum bpf_map_type */
223 		__u32	key_size;	/* size of key in bytes */
224 		__u32	value_size;	/* size of value in bytes */
225 		__u32	max_entries;	/* max number of entries in a map */
226 		__u32	map_flags;	/* BPF_MAP_CREATE related
227 					 * flags defined above.
228 					 */
229 		__u32	inner_map_fd;	/* fd pointing to the inner map */
230 		__u32	numa_node;	/* numa node (effective only if
231 					 * BPF_F_NUMA_NODE is set).
232 					 */
233 		__u8	map_name[BPF_OBJ_NAME_LEN];
234 	};
235 
236 	struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
237 		__u32		map_fd;
238 		__aligned_u64	key;
239 		union {
240 			__aligned_u64 value;
241 			__aligned_u64 next_key;
242 		};
243 		__u64		flags;
244 	};
245 
246 	struct { /* anonymous struct used by BPF_PROG_LOAD command */
247 		__u32		prog_type;	/* one of enum bpf_prog_type */
248 		__u32		insn_cnt;
249 		__aligned_u64	insns;
250 		__aligned_u64	license;
251 		__u32		log_level;	/* verbosity level of verifier */
252 		__u32		log_size;	/* size of user buffer */
253 		__aligned_u64	log_buf;	/* user supplied buffer */
254 		__u32		kern_version;	/* checked when prog_type=kprobe */
255 		__u32		prog_flags;
256 		__u8		prog_name[BPF_OBJ_NAME_LEN];
257 	};
258 
259 	struct { /* anonymous struct used by BPF_OBJ_* commands */
260 		__aligned_u64	pathname;
261 		__u32		bpf_fd;
262 	};
263 
264 	struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
265 		__u32		target_fd;	/* container object to attach to */
266 		__u32		attach_bpf_fd;	/* eBPF program to attach */
267 		__u32		attach_type;
268 		__u32		attach_flags;
269 	};
270 
271 	struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
272 		__u32		prog_fd;
273 		__u32		retval;
274 		__u32		data_size_in;
275 		__u32		data_size_out;
276 		__aligned_u64	data_in;
277 		__aligned_u64	data_out;
278 		__u32		repeat;
279 		__u32		duration;
280 	} test;
281 
282 	struct { /* anonymous struct used by BPF_*_GET_*_ID */
283 		union {
284 			__u32		start_id;
285 			__u32		prog_id;
286 			__u32		map_id;
287 		};
288 		__u32		next_id;
289 	};
290 
291 	struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
292 		__u32		bpf_fd;
293 		__u32		info_len;
294 		__aligned_u64	info;
295 	} info;
296 
297 	struct { /* anonymous struct used by BPF_PROG_QUERY command */
298 		__u32		target_fd;	/* container object to query */
299 		__u32		attach_type;
300 		__u32		query_flags;
301 		__u32		attach_flags;
302 		__aligned_u64	prog_ids;
303 		__u32		prog_cnt;
304 	} query;
305 } __attribute__((aligned(8)));
306 
307 /* BPF helper function descriptions:
308  *
309  * void *bpf_map_lookup_elem(&map, &key)
310  *     Return: Map value or NULL
311  *
312  * int bpf_map_update_elem(&map, &key, &value, flags)
313  *     Return: 0 on success or negative error
314  *
315  * int bpf_map_delete_elem(&map, &key)
316  *     Return: 0 on success or negative error
317  *
318  * int bpf_probe_read(void *dst, int size, void *src)
319  *     Return: 0 on success or negative error
320  *
321  * u64 bpf_ktime_get_ns(void)
322  *     Return: current ktime
323  *
324  * int bpf_trace_printk(const char *fmt, int fmt_size, ...)
325  *     Return: length of buffer written or negative error
326  *
327  * u32 bpf_prandom_u32(void)
328  *     Return: random value
329  *
330  * u32 bpf_raw_smp_processor_id(void)
331  *     Return: SMP processor ID
332  *
333  * int bpf_skb_store_bytes(skb, offset, from, len, flags)
334  *     store bytes into packet
335  *     @skb: pointer to skb
336  *     @offset: offset within packet from skb->mac_header
337  *     @from: pointer where to copy bytes from
338  *     @len: number of bytes to store into packet
339  *     @flags: bit 0 - if true, recompute skb->csum
340  *             other bits - reserved
341  *     Return: 0 on success or negative error
342  *
343  * int bpf_l3_csum_replace(skb, offset, from, to, flags)
344  *     recompute IP checksum
345  *     @skb: pointer to skb
346  *     @offset: offset within packet where IP checksum is located
347  *     @from: old value of header field
348  *     @to: new value of header field
349  *     @flags: bits 0-3 - size of header field
350  *             other bits - reserved
351  *     Return: 0 on success or negative error
352  *
353  * int bpf_l4_csum_replace(skb, offset, from, to, flags)
354  *     recompute TCP/UDP checksum
355  *     @skb: pointer to skb
356  *     @offset: offset within packet where TCP/UDP checksum is located
357  *     @from: old value of header field
358  *     @to: new value of header field
359  *     @flags: bits 0-3 - size of header field
360  *             bit 4 - is pseudo header
361  *             other bits - reserved
362  *     Return: 0 on success or negative error
363  *
364  * int bpf_tail_call(ctx, prog_array_map, index)
365  *     jump into another BPF program
366  *     @ctx: context pointer passed to next program
367  *     @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
368  *     @index: 32-bit index inside array that selects specific program to run
369  *     Return: 0 on success or negative error
370  *
371  * int bpf_clone_redirect(skb, ifindex, flags)
372  *     redirect to another netdev
373  *     @skb: pointer to skb
374  *     @ifindex: ifindex of the net device
375  *     @flags: bit 0 - if set, redirect to ingress instead of egress
376  *             other bits - reserved
377  *     Return: 0 on success or negative error
378  *
379  * u64 bpf_get_current_pid_tgid(void)
380  *     Return: current->tgid << 32 | current->pid
381  *
382  * u64 bpf_get_current_uid_gid(void)
383  *     Return: current_gid << 32 | current_uid
384  *
385  * int bpf_get_current_comm(char *buf, int size_of_buf)
386  *     stores current->comm into buf
387  *     Return: 0 on success or negative error
388  *
389  * u32 bpf_get_cgroup_classid(skb)
390  *     retrieve a proc's classid
391  *     @skb: pointer to skb
392  *     Return: classid if != 0
393  *
394  * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci)
395  *     Return: 0 on success or negative error
396  *
397  * int bpf_skb_vlan_pop(skb)
398  *     Return: 0 on success or negative error
399  *
400  * int bpf_skb_get_tunnel_key(skb, key, size, flags)
401  * int bpf_skb_set_tunnel_key(skb, key, size, flags)
402  *     retrieve or populate tunnel metadata
403  *     @skb: pointer to skb
404  *     @key: pointer to 'struct bpf_tunnel_key'
405  *     @size: size of 'struct bpf_tunnel_key'
406  *     @flags: room for future extensions
407  *     Return: 0 on success or negative error
408  *
409  * u64 bpf_perf_event_read(map, flags)
410  *     read perf event counter value
411  *     @map: pointer to perf_event_array map
412  *     @flags: index of event in the map or bitmask flags
413  *     Return: value of perf event counter read or error code
414  *
415  * int bpf_redirect(ifindex, flags)
416  *     redirect to another netdev
417  *     @ifindex: ifindex of the net device
418  *     @flags:
419  *	  cls_bpf:
420  *          bit 0 - if set, redirect to ingress instead of egress
421  *          other bits - reserved
422  *	  xdp_bpf:
423  *	    all bits - reserved
424  *     Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error
425  *	       xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error
426  * int bpf_redirect_map(map, key, flags)
427  *     redirect to endpoint in map
428  *     @map: pointer to dev map
429  *     @key: index in map to lookup
430  *     @flags: --
431  *     Return: XDP_REDIRECT on success or XDP_ABORT on error
432  *
433  * u32 bpf_get_route_realm(skb)
434  *     retrieve a dst's tclassid
435  *     @skb: pointer to skb
436  *     Return: realm if != 0
437  *
438  * int bpf_perf_event_output(ctx, map, flags, data, size)
439  *     output perf raw sample
440  *     @ctx: struct pt_regs*
441  *     @map: pointer to perf_event_array map
442  *     @flags: index of event in the map or bitmask flags
443  *     @data: data on stack to be output as raw data
444  *     @size: size of data
445  *     Return: 0 on success or negative error
446  *
447  * int bpf_get_stackid(ctx, map, flags)
448  *     walk user or kernel stack and return id
449  *     @ctx: struct pt_regs*
450  *     @map: pointer to stack_trace map
451  *     @flags: bits 0-7 - numer of stack frames to skip
452  *             bit 8 - collect user stack instead of kernel
453  *             bit 9 - compare stacks by hash only
454  *             bit 10 - if two different stacks hash into the same stackid
455  *                      discard old
456  *             other bits - reserved
457  *     Return: >= 0 stackid on success or negative error
458  *
459  * s64 bpf_csum_diff(from, from_size, to, to_size, seed)
460  *     calculate csum diff
461  *     @from: raw from buffer
462  *     @from_size: length of from buffer
463  *     @to: raw to buffer
464  *     @to_size: length of to buffer
465  *     @seed: optional seed
466  *     Return: csum result or negative error code
467  *
468  * int bpf_skb_get_tunnel_opt(skb, opt, size)
469  *     retrieve tunnel options metadata
470  *     @skb: pointer to skb
471  *     @opt: pointer to raw tunnel option data
472  *     @size: size of @opt
473  *     Return: option size
474  *
475  * int bpf_skb_set_tunnel_opt(skb, opt, size)
476  *     populate tunnel options metadata
477  *     @skb: pointer to skb
478  *     @opt: pointer to raw tunnel option data
479  *     @size: size of @opt
480  *     Return: 0 on success or negative error
481  *
482  * int bpf_skb_change_proto(skb, proto, flags)
483  *     Change protocol of the skb. Currently supported is v4 -> v6,
484  *     v6 -> v4 transitions. The helper will also resize the skb. eBPF
485  *     program is expected to fill the new headers via skb_store_bytes
486  *     and lX_csum_replace.
487  *     @skb: pointer to skb
488  *     @proto: new skb->protocol type
489  *     @flags: reserved
490  *     Return: 0 on success or negative error
491  *
492  * int bpf_skb_change_type(skb, type)
493  *     Change packet type of skb.
494  *     @skb: pointer to skb
495  *     @type: new skb->pkt_type type
496  *     Return: 0 on success or negative error
497  *
498  * int bpf_skb_under_cgroup(skb, map, index)
499  *     Check cgroup2 membership of skb
500  *     @skb: pointer to skb
501  *     @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
502  *     @index: index of the cgroup in the bpf_map
503  *     Return:
504  *       == 0 skb failed the cgroup2 descendant test
505  *       == 1 skb succeeded the cgroup2 descendant test
506  *        < 0 error
507  *
508  * u32 bpf_get_hash_recalc(skb)
509  *     Retrieve and possibly recalculate skb->hash.
510  *     @skb: pointer to skb
511  *     Return: hash
512  *
513  * u64 bpf_get_current_task(void)
514  *     Returns current task_struct
515  *     Return: current
516  *
517  * int bpf_probe_write_user(void *dst, void *src, int len)
518  *     safely attempt to write to a location
519  *     @dst: destination address in userspace
520  *     @src: source address on stack
521  *     @len: number of bytes to copy
522  *     Return: 0 on success or negative error
523  *
524  * int bpf_current_task_under_cgroup(map, index)
525  *     Check cgroup2 membership of current task
526  *     @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
527  *     @index: index of the cgroup in the bpf_map
528  *     Return:
529  *       == 0 current failed the cgroup2 descendant test
530  *       == 1 current succeeded the cgroup2 descendant test
531  *        < 0 error
532  *
533  * int bpf_skb_change_tail(skb, len, flags)
534  *     The helper will resize the skb to the given new size, to be used f.e.
535  *     with control messages.
536  *     @skb: pointer to skb
537  *     @len: new skb length
538  *     @flags: reserved
539  *     Return: 0 on success or negative error
540  *
541  * int bpf_skb_pull_data(skb, len)
542  *     The helper will pull in non-linear data in case the skb is non-linear
543  *     and not all of len are part of the linear section. Only needed for
544  *     read/write with direct packet access.
545  *     @skb: pointer to skb
546  *     @len: len to make read/writeable
547  *     Return: 0 on success or negative error
548  *
549  * s64 bpf_csum_update(skb, csum)
550  *     Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
551  *     @skb: pointer to skb
552  *     @csum: csum to add
553  *     Return: csum on success or negative error
554  *
555  * void bpf_set_hash_invalid(skb)
556  *     Invalidate current skb->hash.
557  *     @skb: pointer to skb
558  *
559  * int bpf_get_numa_node_id()
560  *     Return: Id of current NUMA node.
561  *
562  * int bpf_skb_change_head()
563  *     Grows headroom of skb and adjusts MAC header offset accordingly.
564  *     Will extends/reallocae as required automatically.
565  *     May change skb data pointer and will thus invalidate any check
566  *     performed for direct packet access.
567  *     @skb: pointer to skb
568  *     @len: length of header to be pushed in front
569  *     @flags: Flags (unused for now)
570  *     Return: 0 on success or negative error
571  *
572  * int bpf_xdp_adjust_head(xdp_md, delta)
573  *     Adjust the xdp_md.data by delta
574  *     @xdp_md: pointer to xdp_md
575  *     @delta: An positive/negative integer to be added to xdp_md.data
576  *     Return: 0 on success or negative on error
577  *
578  * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
579  *     Copy a NUL terminated string from unsafe address. In case the string
580  *     length is smaller than size, the target is not padded with further NUL
581  *     bytes. In case the string length is larger than size, just count-1
582  *     bytes are copied and the last byte is set to NUL.
583  *     @dst: destination address
584  *     @size: maximum number of bytes to copy, including the trailing NUL
585  *     @unsafe_ptr: unsafe address
586  *     Return:
587  *       > 0 length of the string including the trailing NUL on success
588  *       < 0 error
589  *
590  * u64 bpf_get_socket_cookie(skb)
591  *     Get the cookie for the socket stored inside sk_buff.
592  *     @skb: pointer to skb
593  *     Return: 8 Bytes non-decreasing number on success or 0 if the socket
594  *     field is missing inside sk_buff
595  *
596  * u32 bpf_get_socket_uid(skb)
597  *     Get the owner uid of the socket stored inside sk_buff.
598  *     @skb: pointer to skb
599  *     Return: uid of the socket owner on success or overflowuid if failed.
600  *
601  * u32 bpf_set_hash(skb, hash)
602  *     Set full skb->hash.
603  *     @skb: pointer to skb
604  *     @hash: hash to set
605  *
606  * int bpf_setsockopt(bpf_socket, level, optname, optval, optlen)
607  *     Calls setsockopt. Not all opts are available, only those with
608  *     integer optvals plus TCP_CONGESTION.
609  *     Supported levels: SOL_SOCKET and IPROTO_TCP
610  *     @bpf_socket: pointer to bpf_socket
611  *     @level: SOL_SOCKET or IPROTO_TCP
612  *     @optname: option name
613  *     @optval: pointer to option value
614  *     @optlen: length of optval in byes
615  *     Return: 0 or negative error
616  *
617  * int bpf_skb_adjust_room(skb, len_diff, mode, flags)
618  *     Grow or shrink room in sk_buff.
619  *     @skb: pointer to skb
620  *     @len_diff: (signed) amount of room to grow/shrink
621  *     @mode: operation mode (enum bpf_adj_room_mode)
622  *     @flags: reserved for future use
623  *     Return: 0 on success or negative error code
624  *
625  * int bpf_sk_redirect_map(map, key, flags)
626  *     Redirect skb to a sock in map using key as a lookup key for the
627  *     sock in map.
628  *     @map: pointer to sockmap
629  *     @key: key to lookup sock in map
630  *     @flags: reserved for future use
631  *     Return: SK_REDIRECT
632  *
633  * int bpf_sock_map_update(skops, map, key, flags)
634  *	@skops: pointer to bpf_sock_ops
635  *	@map: pointer to sockmap to update
636  *	@key: key to insert/update sock in map
637  *	@flags: same flags as map update elem
638  *
639  * int bpf_xdp_adjust_meta(xdp_md, delta)
640  *     Adjust the xdp_md.data_meta by delta
641  *     @xdp_md: pointer to xdp_md
642  *     @delta: An positive/negative integer to be added to xdp_md.data_meta
643  *     Return: 0 on success or negative on error
644  */
645 #define __BPF_FUNC_MAPPER(FN)		\
646 	FN(unspec),			\
647 	FN(map_lookup_elem),		\
648 	FN(map_update_elem),		\
649 	FN(map_delete_elem),		\
650 	FN(probe_read),			\
651 	FN(ktime_get_ns),		\
652 	FN(trace_printk),		\
653 	FN(get_prandom_u32),		\
654 	FN(get_smp_processor_id),	\
655 	FN(skb_store_bytes),		\
656 	FN(l3_csum_replace),		\
657 	FN(l4_csum_replace),		\
658 	FN(tail_call),			\
659 	FN(clone_redirect),		\
660 	FN(get_current_pid_tgid),	\
661 	FN(get_current_uid_gid),	\
662 	FN(get_current_comm),		\
663 	FN(get_cgroup_classid),		\
664 	FN(skb_vlan_push),		\
665 	FN(skb_vlan_pop),		\
666 	FN(skb_get_tunnel_key),		\
667 	FN(skb_set_tunnel_key),		\
668 	FN(perf_event_read),		\
669 	FN(redirect),			\
670 	FN(get_route_realm),		\
671 	FN(perf_event_output),		\
672 	FN(skb_load_bytes),		\
673 	FN(get_stackid),		\
674 	FN(csum_diff),			\
675 	FN(skb_get_tunnel_opt),		\
676 	FN(skb_set_tunnel_opt),		\
677 	FN(skb_change_proto),		\
678 	FN(skb_change_type),		\
679 	FN(skb_under_cgroup),		\
680 	FN(get_hash_recalc),		\
681 	FN(get_current_task),		\
682 	FN(probe_write_user),		\
683 	FN(current_task_under_cgroup),	\
684 	FN(skb_change_tail),		\
685 	FN(skb_pull_data),		\
686 	FN(csum_update),		\
687 	FN(set_hash_invalid),		\
688 	FN(get_numa_node_id),		\
689 	FN(skb_change_head),		\
690 	FN(xdp_adjust_head),		\
691 	FN(probe_read_str),		\
692 	FN(get_socket_cookie),		\
693 	FN(get_socket_uid),		\
694 	FN(set_hash),			\
695 	FN(setsockopt),			\
696 	FN(skb_adjust_room),		\
697 	FN(redirect_map),		\
698 	FN(sk_redirect_map),		\
699 	FN(sock_map_update),		\
700 	FN(xdp_adjust_meta),
701 
702 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
703  * function eBPF program intends to call
704  */
705 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
706 enum bpf_func_id {
707 	__BPF_FUNC_MAPPER(__BPF_ENUM_FN)
708 	__BPF_FUNC_MAX_ID,
709 };
710 #undef __BPF_ENUM_FN
711 
712 /* All flags used by eBPF helper functions, placed here. */
713 
714 /* BPF_FUNC_skb_store_bytes flags. */
715 #define BPF_F_RECOMPUTE_CSUM		(1ULL << 0)
716 #define BPF_F_INVALIDATE_HASH		(1ULL << 1)
717 
718 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
719  * First 4 bits are for passing the header field size.
720  */
721 #define BPF_F_HDR_FIELD_MASK		0xfULL
722 
723 /* BPF_FUNC_l4_csum_replace flags. */
724 #define BPF_F_PSEUDO_HDR		(1ULL << 4)
725 #define BPF_F_MARK_MANGLED_0		(1ULL << 5)
726 #define BPF_F_MARK_ENFORCE		(1ULL << 6)
727 
728 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
729 #define BPF_F_INGRESS			(1ULL << 0)
730 
731 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
732 #define BPF_F_TUNINFO_IPV6		(1ULL << 0)
733 
734 /* BPF_FUNC_get_stackid flags. */
735 #define BPF_F_SKIP_FIELD_MASK		0xffULL
736 #define BPF_F_USER_STACK		(1ULL << 8)
737 #define BPF_F_FAST_STACK_CMP		(1ULL << 9)
738 #define BPF_F_REUSE_STACKID		(1ULL << 10)
739 
740 /* BPF_FUNC_skb_set_tunnel_key flags. */
741 #define BPF_F_ZERO_CSUM_TX		(1ULL << 1)
742 #define BPF_F_DONT_FRAGMENT		(1ULL << 2)
743 
744 /* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */
745 #define BPF_F_INDEX_MASK		0xffffffffULL
746 #define BPF_F_CURRENT_CPU		BPF_F_INDEX_MASK
747 /* BPF_FUNC_perf_event_output for sk_buff input context. */
748 #define BPF_F_CTXLEN_MASK		(0xfffffULL << 32)
749 
750 /* Mode for BPF_FUNC_skb_adjust_room helper. */
751 enum bpf_adj_room_mode {
752 	BPF_ADJ_ROOM_NET,
753 };
754 
755 /* user accessible mirror of in-kernel sk_buff.
756  * new fields can only be added to the end of this structure
757  */
758 struct __sk_buff {
759 	__u32 len;
760 	__u32 pkt_type;
761 	__u32 mark;
762 	__u32 queue_mapping;
763 	__u32 protocol;
764 	__u32 vlan_present;
765 	__u32 vlan_tci;
766 	__u32 vlan_proto;
767 	__u32 priority;
768 	__u32 ingress_ifindex;
769 	__u32 ifindex;
770 	__u32 tc_index;
771 	__u32 cb[5];
772 	__u32 hash;
773 	__u32 tc_classid;
774 	__u32 data;
775 	__u32 data_end;
776 	__u32 napi_id;
777 
778 	/* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
779 	__u32 family;
780 	__u32 remote_ip4;	/* Stored in network byte order */
781 	__u32 local_ip4;	/* Stored in network byte order */
782 	__u32 remote_ip6[4];	/* Stored in network byte order */
783 	__u32 local_ip6[4];	/* Stored in network byte order */
784 	__u32 remote_port;	/* Stored in network byte order */
785 	__u32 local_port;	/* stored in host byte order */
786 	/* ... here. */
787 
788 	__u32 data_meta;
789 };
790 
791 struct bpf_tunnel_key {
792 	__u32 tunnel_id;
793 	union {
794 		__u32 remote_ipv4;
795 		__u32 remote_ipv6[4];
796 	};
797 	__u8 tunnel_tos;
798 	__u8 tunnel_ttl;
799 	__u16 tunnel_ext;
800 	__u32 tunnel_label;
801 };
802 
803 /* Generic BPF return codes which all BPF program types may support.
804  * The values are binary compatible with their TC_ACT_* counter-part to
805  * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
806  * programs.
807  *
808  * XDP is handled seprately, see XDP_*.
809  */
810 enum bpf_ret_code {
811 	BPF_OK = 0,
812 	/* 1 reserved */
813 	BPF_DROP = 2,
814 	/* 3-6 reserved */
815 	BPF_REDIRECT = 7,
816 	/* >127 are reserved for prog type specific return codes */
817 };
818 
819 struct bpf_sock {
820 	__u32 bound_dev_if;
821 	__u32 family;
822 	__u32 type;
823 	__u32 protocol;
824 	__u32 mark;
825 	__u32 priority;
826 };
827 
828 #define XDP_PACKET_HEADROOM 256
829 
830 /* User return codes for XDP prog type.
831  * A valid XDP program must return one of these defined values. All other
832  * return codes are reserved for future use. Unknown return codes will
833  * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
834  */
835 enum xdp_action {
836 	XDP_ABORTED = 0,
837 	XDP_DROP,
838 	XDP_PASS,
839 	XDP_TX,
840 	XDP_REDIRECT,
841 };
842 
843 /* user accessible metadata for XDP packet hook
844  * new fields must be added to the end of this structure
845  */
846 struct xdp_md {
847 	__u32 data;
848 	__u32 data_end;
849 	__u32 data_meta;
850 };
851 
852 enum sk_action {
853 	SK_ABORTED = 0,
854 	SK_DROP,
855 	SK_REDIRECT,
856 };
857 
858 #define BPF_TAG_SIZE	8
859 
860 struct bpf_prog_info {
861 	__u32 type;
862 	__u32 id;
863 	__u8  tag[BPF_TAG_SIZE];
864 	__u32 jited_prog_len;
865 	__u32 xlated_prog_len;
866 	__aligned_u64 jited_prog_insns;
867 	__aligned_u64 xlated_prog_insns;
868 	__u64 load_time;	/* ns since boottime */
869 	__u32 created_by_uid;
870 	__u32 nr_map_ids;
871 	__aligned_u64 map_ids;
872 	__u8  name[BPF_OBJ_NAME_LEN];
873 } __attribute__((aligned(8)));
874 
875 struct bpf_map_info {
876 	__u32 type;
877 	__u32 id;
878 	__u32 key_size;
879 	__u32 value_size;
880 	__u32 max_entries;
881 	__u32 map_flags;
882 	__u8  name[BPF_OBJ_NAME_LEN];
883 } __attribute__((aligned(8)));
884 
885 /* User bpf_sock_ops struct to access socket values and specify request ops
886  * and their replies.
887  * Some of this fields are in network (bigendian) byte order and may need
888  * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
889  * New fields can only be added at the end of this structure
890  */
891 struct bpf_sock_ops {
892 	__u32 op;
893 	union {
894 		__u32 reply;
895 		__u32 replylong[4];
896 	};
897 	__u32 family;
898 	__u32 remote_ip4;	/* Stored in network byte order */
899 	__u32 local_ip4;	/* Stored in network byte order */
900 	__u32 remote_ip6[4];	/* Stored in network byte order */
901 	__u32 local_ip6[4];	/* Stored in network byte order */
902 	__u32 remote_port;	/* Stored in network byte order */
903 	__u32 local_port;	/* stored in host byte order */
904 };
905 
906 /* List of known BPF sock_ops operators.
907  * New entries can only be added at the end
908  */
909 enum {
910 	BPF_SOCK_OPS_VOID,
911 	BPF_SOCK_OPS_TIMEOUT_INIT,	/* Should return SYN-RTO value to use or
912 					 * -1 if default value should be used
913 					 */
914 	BPF_SOCK_OPS_RWND_INIT,		/* Should return initial advertized
915 					 * window (in packets) or -1 if default
916 					 * value should be used
917 					 */
918 	BPF_SOCK_OPS_TCP_CONNECT_CB,	/* Calls BPF program right before an
919 					 * active connection is initialized
920 					 */
921 	BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB,	/* Calls BPF program when an
922 						 * active connection is
923 						 * established
924 						 */
925 	BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB,	/* Calls BPF program when a
926 						 * passive connection is
927 						 * established
928 						 */
929 	BPF_SOCK_OPS_NEEDS_ECN,		/* If connection's congestion control
930 					 * needs ECN
931 					 */
932 };
933 
934 #define TCP_BPF_IW		1001	/* Set TCP initial congestion window */
935 #define TCP_BPF_SNDCWND_CLAMP	1002	/* Set sndcwnd_clamp */
936 
937 #endif /* _UAPI__LINUX_BPF_H__ */
938