17463acfbSLukas Wunner /* SPDX-License-Identifier: GPL-2.0 */
217d20784SLukas Wunner #ifndef _NETFILTER_NETDEV_H_
317d20784SLukas Wunner #define _NETFILTER_NETDEV_H_
47463acfbSLukas Wunner
57463acfbSLukas Wunner #include <linux/netfilter.h>
67463acfbSLukas Wunner #include <linux/netdevice.h>
77463acfbSLukas Wunner
87463acfbSLukas Wunner #ifdef CONFIG_NETFILTER_INGRESS
nf_hook_ingress_active(const struct sk_buff * skb)97463acfbSLukas Wunner static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
107463acfbSLukas Wunner {
117463acfbSLukas Wunner #ifdef CONFIG_JUMP_LABEL
127463acfbSLukas Wunner if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
137463acfbSLukas Wunner return false;
147463acfbSLukas Wunner #endif
157463acfbSLukas Wunner return rcu_access_pointer(skb->dev->nf_hooks_ingress);
167463acfbSLukas Wunner }
177463acfbSLukas Wunner
187463acfbSLukas Wunner /* caller must hold rcu_read_lock */
nf_hook_ingress(struct sk_buff * skb)197463acfbSLukas Wunner static inline int nf_hook_ingress(struct sk_buff *skb)
207463acfbSLukas Wunner {
217463acfbSLukas Wunner struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
227463acfbSLukas Wunner struct nf_hook_state state;
237463acfbSLukas Wunner int ret;
247463acfbSLukas Wunner
257463acfbSLukas Wunner /* Must recheck the ingress hook head, in the event it became NULL
267463acfbSLukas Wunner * after the check in nf_hook_ingress_active evaluated to true.
277463acfbSLukas Wunner */
287463acfbSLukas Wunner if (unlikely(!e))
297463acfbSLukas Wunner return 0;
307463acfbSLukas Wunner
317463acfbSLukas Wunner nf_hook_state_init(&state, NF_NETDEV_INGRESS,
327463acfbSLukas Wunner NFPROTO_NETDEV, skb->dev, NULL, NULL,
337463acfbSLukas Wunner dev_net(skb->dev), NULL);
347463acfbSLukas Wunner ret = nf_hook_slow(skb, &state, e, 0);
357463acfbSLukas Wunner if (ret == 0)
367463acfbSLukas Wunner return -1;
377463acfbSLukas Wunner
387463acfbSLukas Wunner return ret;
397463acfbSLukas Wunner }
407463acfbSLukas Wunner
417463acfbSLukas Wunner #else /* CONFIG_NETFILTER_INGRESS */
nf_hook_ingress_active(struct sk_buff * skb)427463acfbSLukas Wunner static inline int nf_hook_ingress_active(struct sk_buff *skb)
437463acfbSLukas Wunner {
447463acfbSLukas Wunner return 0;
457463acfbSLukas Wunner }
467463acfbSLukas Wunner
nf_hook_ingress(struct sk_buff * skb)477463acfbSLukas Wunner static inline int nf_hook_ingress(struct sk_buff *skb)
487463acfbSLukas Wunner {
497463acfbSLukas Wunner return 0;
507463acfbSLukas Wunner }
517463acfbSLukas Wunner #endif /* CONFIG_NETFILTER_INGRESS */
5217d20784SLukas Wunner
5342df6e1dSLukas Wunner #ifdef CONFIG_NETFILTER_EGRESS
nf_hook_egress_active(void)5442df6e1dSLukas Wunner static inline bool nf_hook_egress_active(void)
5542df6e1dSLukas Wunner {
5642df6e1dSLukas Wunner #ifdef CONFIG_JUMP_LABEL
5742df6e1dSLukas Wunner if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_EGRESS]))
5842df6e1dSLukas Wunner return false;
5942df6e1dSLukas Wunner #endif
6042df6e1dSLukas Wunner return true;
6142df6e1dSLukas Wunner }
6242df6e1dSLukas Wunner
6342df6e1dSLukas Wunner /**
6442df6e1dSLukas Wunner * nf_hook_egress - classify packets before transmission
6542df6e1dSLukas Wunner * @skb: packet to be classified
6642df6e1dSLukas Wunner * @rc: result code which shall be returned by __dev_queue_xmit() on failure
6742df6e1dSLukas Wunner * @dev: netdev whose egress hooks shall be applied to @skb
6842df6e1dSLukas Wunner *
6942df6e1dSLukas Wunner * Caller must hold rcu_read_lock.
7042df6e1dSLukas Wunner *
7142df6e1dSLukas Wunner * On ingress, packets are classified first by tc, then by netfilter.
7242df6e1dSLukas Wunner * On egress, the order is reversed for symmetry. Conceptually, tc and
7342df6e1dSLukas Wunner * netfilter can be thought of as layers, with netfilter layered above tc:
7442df6e1dSLukas Wunner * When tc redirects a packet to another interface, netfilter is not applied
7542df6e1dSLukas Wunner * because the packet is on the tc layer.
7642df6e1dSLukas Wunner *
7742df6e1dSLukas Wunner * The nf_skip_egress flag controls whether netfilter is applied on egress.
7842df6e1dSLukas Wunner * It is updated by __netif_receive_skb_core() and __dev_queue_xmit() when the
7942df6e1dSLukas Wunner * packet passes through tc and netfilter. Because __dev_queue_xmit() may be
8042df6e1dSLukas Wunner * called recursively by tunnel drivers such as vxlan, the flag is reverted to
8142df6e1dSLukas Wunner * false after sch_handle_egress(). This ensures that netfilter is applied
8242df6e1dSLukas Wunner * both on the overlay and underlying network.
83*3f330db3SJakub Kicinski *
84*3f330db3SJakub Kicinski * Returns: @skb on success or %NULL if the packet was consumed or filtered.
8542df6e1dSLukas Wunner */
nf_hook_egress(struct sk_buff * skb,int * rc,struct net_device * dev)8642df6e1dSLukas Wunner static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
8742df6e1dSLukas Wunner struct net_device *dev)
8842df6e1dSLukas Wunner {
8942df6e1dSLukas Wunner struct nf_hook_entries *e;
9042df6e1dSLukas Wunner struct nf_hook_state state;
9142df6e1dSLukas Wunner int ret;
9242df6e1dSLukas Wunner
9342df6e1dSLukas Wunner #ifdef CONFIG_NETFILTER_SKIP_EGRESS
9442df6e1dSLukas Wunner if (skb->nf_skip_egress)
9542df6e1dSLukas Wunner return skb;
9642df6e1dSLukas Wunner #endif
9742df6e1dSLukas Wunner
986316136eSFlorian Westphal e = rcu_dereference_check(dev->nf_hooks_egress, rcu_read_lock_bh_held());
9942df6e1dSLukas Wunner if (!e)
10042df6e1dSLukas Wunner return skb;
10142df6e1dSLukas Wunner
10242df6e1dSLukas Wunner nf_hook_state_init(&state, NF_NETDEV_EGRESS,
103d645552eSPhil Sutter NFPROTO_NETDEV, NULL, dev, NULL,
10442df6e1dSLukas Wunner dev_net(dev), NULL);
10517a8f31bSFlorian Westphal
10617a8f31bSFlorian Westphal /* nf assumes rcu_read_lock, not just read_lock_bh */
10717a8f31bSFlorian Westphal rcu_read_lock();
10842df6e1dSLukas Wunner ret = nf_hook_slow(skb, &state, e, 0);
10917a8f31bSFlorian Westphal rcu_read_unlock();
11042df6e1dSLukas Wunner
11142df6e1dSLukas Wunner if (ret == 1) {
11242df6e1dSLukas Wunner return skb;
11342df6e1dSLukas Wunner } else if (ret < 0) {
11442df6e1dSLukas Wunner *rc = NET_XMIT_DROP;
11542df6e1dSLukas Wunner return NULL;
11642df6e1dSLukas Wunner } else { /* ret == 0 */
11742df6e1dSLukas Wunner *rc = NET_XMIT_SUCCESS;
11842df6e1dSLukas Wunner return NULL;
11942df6e1dSLukas Wunner }
12042df6e1dSLukas Wunner }
12142df6e1dSLukas Wunner #else /* CONFIG_NETFILTER_EGRESS */
nf_hook_egress_active(void)12242df6e1dSLukas Wunner static inline bool nf_hook_egress_active(void)
12342df6e1dSLukas Wunner {
12442df6e1dSLukas Wunner return false;
12542df6e1dSLukas Wunner }
12642df6e1dSLukas Wunner
nf_hook_egress(struct sk_buff * skb,int * rc,struct net_device * dev)12742df6e1dSLukas Wunner static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
12842df6e1dSLukas Wunner struct net_device *dev)
12942df6e1dSLukas Wunner {
13042df6e1dSLukas Wunner return skb;
13142df6e1dSLukas Wunner }
13242df6e1dSLukas Wunner #endif /* CONFIG_NETFILTER_EGRESS */
13342df6e1dSLukas Wunner
nf_skip_egress(struct sk_buff * skb,bool skip)13442df6e1dSLukas Wunner static inline void nf_skip_egress(struct sk_buff *skb, bool skip)
13542df6e1dSLukas Wunner {
13642df6e1dSLukas Wunner #ifdef CONFIG_NETFILTER_SKIP_EGRESS
13742df6e1dSLukas Wunner skb->nf_skip_egress = skip;
13842df6e1dSLukas Wunner #endif
13942df6e1dSLukas Wunner }
14042df6e1dSLukas Wunner
nf_hook_netdev_init(struct net_device * dev)14117d20784SLukas Wunner static inline void nf_hook_netdev_init(struct net_device *dev)
14217d20784SLukas Wunner {
14317d20784SLukas Wunner #ifdef CONFIG_NETFILTER_INGRESS
14417d20784SLukas Wunner RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
14517d20784SLukas Wunner #endif
14642df6e1dSLukas Wunner #ifdef CONFIG_NETFILTER_EGRESS
14742df6e1dSLukas Wunner RCU_INIT_POINTER(dev->nf_hooks_egress, NULL);
14842df6e1dSLukas Wunner #endif
14917d20784SLukas Wunner }
15017d20784SLukas Wunner
15117d20784SLukas Wunner #endif /* _NETFILTER_NETDEV_H_ */
152