1 #ifndef __LINUX_BRIDGE_NETFILTER_H
2 #define __LINUX_BRIDGE_NETFILTER_H
3 
4 #include <uapi/linux/netfilter_bridge.h>
5 
6 
7 enum nf_br_hook_priorities {
8 	NF_BR_PRI_FIRST = INT_MIN,
9 	NF_BR_PRI_NAT_DST_BRIDGED = -300,
10 	NF_BR_PRI_FILTER_BRIDGED = -200,
11 	NF_BR_PRI_BRNF = 0,
12 	NF_BR_PRI_NAT_DST_OTHER = 100,
13 	NF_BR_PRI_FILTER_OTHER = 200,
14 	NF_BR_PRI_NAT_SRC = 300,
15 	NF_BR_PRI_LAST = INT_MAX,
16 };
17 
18 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
19 
20 #define BRNF_PKT_TYPE			0x01
21 #define BRNF_BRIDGED_DNAT		0x02
22 #define BRNF_BRIDGED			0x04
23 #define BRNF_NF_BRIDGE_PREROUTING	0x08
24 #define BRNF_8021Q			0x10
25 #define BRNF_PPPoE			0x20
26 
27 static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
28 {
29 	switch (skb->protocol) {
30 	case __cpu_to_be16(ETH_P_8021Q):
31 		return VLAN_HLEN;
32 	case __cpu_to_be16(ETH_P_PPP_SES):
33 		return PPPOE_SES_HLEN;
34 	default:
35 		return 0;
36 	}
37 }
38 
39 static inline void nf_bridge_update_protocol(struct sk_buff *skb)
40 {
41 	if (skb->nf_bridge->mask & BRNF_8021Q)
42 		skb->protocol = htons(ETH_P_8021Q);
43 	else if (skb->nf_bridge->mask & BRNF_PPPoE)
44 		skb->protocol = htons(ETH_P_PPP_SES);
45 }
46 
47 /* Fill in the header for fragmented IP packets handled by
48  * the IPv4 connection tracking code.
49  *
50  * Only used in br_forward.c
51  */
52 static inline int nf_bridge_copy_header(struct sk_buff *skb)
53 {
54 	int err;
55 	unsigned int header_size;
56 
57 	nf_bridge_update_protocol(skb);
58 	header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
59 	err = skb_cow_head(skb, header_size);
60 	if (err)
61 		return err;
62 
63 	skb_copy_to_linear_data_offset(skb, -header_size,
64 				       skb->nf_bridge->data, header_size);
65 	__skb_push(skb, nf_bridge_encap_header_len(skb));
66 	return 0;
67 }
68 
69 static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
70 {
71 	if (skb->nf_bridge &&
72 	    skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
73 		return nf_bridge_copy_header(skb);
74   	return 0;
75 }
76 
77 static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
78 {
79 	if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
80 		return PPPOE_SES_HLEN;
81 	return 0;
82 }
83 
84 int br_handle_frame_finish(struct sk_buff *skb);
85 /* Only used in br_device.c */
86 static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
87 {
88 	struct nf_bridge_info *nf_bridge = skb->nf_bridge;
89 
90 	skb_pull(skb, ETH_HLEN);
91 	nf_bridge->mask ^= BRNF_BRIDGED_DNAT;
92 	skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN),
93 				       skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
94 	skb->dev = nf_bridge->physindev;
95 	return br_handle_frame_finish(skb);
96 }
97 
98 /* This is called by the IP fragmenting code and it ensures there is
99  * enough room for the encapsulating header (if there is one). */
100 static inline unsigned int nf_bridge_pad(const struct sk_buff *skb)
101 {
102 	if (skb->nf_bridge)
103 		return nf_bridge_encap_header_len(skb);
104 	return 0;
105 }
106 
107 struct bridge_skb_cb {
108 	union {
109 		__be32 ipv4;
110 	} daddr;
111 };
112 
113 static inline void br_drop_fake_rtable(struct sk_buff *skb)
114 {
115 	struct dst_entry *dst = skb_dst(skb);
116 
117 	if (dst && (dst->flags & DST_FAKE_RTABLE))
118 		skb_dst_drop(skb);
119 }
120 
121 #else
122 #define nf_bridge_maybe_copy_header(skb)	(0)
123 #define nf_bridge_pad(skb)			(0)
124 #define br_drop_fake_rtable(skb)	        do { } while (0)
125 #endif /* CONFIG_BRIDGE_NETFILTER */
126 
127 #endif
128