1 /* 2 * Common code for low-level network console, dump, and debugger code 3 * 4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches 5 */ 6 7 #ifndef _LINUX_NETPOLL_H 8 #define _LINUX_NETPOLL_H 9 10 #include <linux/netdevice.h> 11 #include <linux/interrupt.h> 12 #include <linux/rcupdate.h> 13 #include <linux/list.h> 14 15 struct netpoll { 16 struct net_device *dev; 17 char dev_name[IFNAMSIZ]; 18 const char *name; 19 void (*rx_hook)(struct netpoll *, int, char *, int); 20 21 __be32 local_ip, remote_ip; 22 u16 local_port, remote_port; 23 u8 remote_mac[ETH_ALEN]; 24 25 struct list_head rx; /* rx_np list element */ 26 }; 27 28 struct netpoll_info { 29 atomic_t refcnt; 30 31 int rx_flags; 32 spinlock_t rx_lock; 33 struct list_head rx_np; /* netpolls that registered an rx_hook */ 34 35 struct sk_buff_head arp_tx; /* list of arp requests to reply to */ 36 struct sk_buff_head txq; 37 38 struct delayed_work tx_work; 39 40 struct netpoll *netpoll; 41 }; 42 43 void netpoll_poll_dev(struct net_device *dev); 44 void netpoll_poll(struct netpoll *np); 45 void netpoll_send_udp(struct netpoll *np, const char *msg, int len); 46 void netpoll_print_options(struct netpoll *np); 47 int netpoll_parse_options(struct netpoll *np, char *opt); 48 int __netpoll_setup(struct netpoll *np); 49 int netpoll_setup(struct netpoll *np); 50 int netpoll_trap(void); 51 void netpoll_set_trap(int trap); 52 void __netpoll_cleanup(struct netpoll *np); 53 void netpoll_cleanup(struct netpoll *np); 54 int __netpoll_rx(struct sk_buff *skb); 55 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, 56 struct net_device *dev); 57 static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) 58 { 59 netpoll_send_skb_on_dev(np, skb, np->dev); 60 } 61 62 63 64 #ifdef CONFIG_NETPOLL 65 static inline bool netpoll_rx(struct sk_buff *skb) 66 { 67 struct netpoll_info *npinfo; 68 unsigned long flags; 69 bool ret = false; 70 71 local_irq_save(flags); 72 npinfo = rcu_dereference_bh(skb->dev->npinfo); 73 74 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) 75 goto out; 76 77 spin_lock(&npinfo->rx_lock); 78 /* check rx_flags again with the lock held */ 79 if (npinfo->rx_flags && __netpoll_rx(skb)) 80 ret = true; 81 spin_unlock(&npinfo->rx_lock); 82 83 out: 84 local_irq_restore(flags); 85 return ret; 86 } 87 88 static inline int netpoll_rx_on(struct sk_buff *skb) 89 { 90 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo); 91 92 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); 93 } 94 95 static inline int netpoll_receive_skb(struct sk_buff *skb) 96 { 97 if (!list_empty(&skb->dev->napi_list)) 98 return netpoll_rx(skb); 99 return 0; 100 } 101 102 static inline void *netpoll_poll_lock(struct napi_struct *napi) 103 { 104 struct net_device *dev = napi->dev; 105 106 if (dev && dev->npinfo) { 107 spin_lock(&napi->poll_lock); 108 napi->poll_owner = smp_processor_id(); 109 return napi; 110 } 111 return NULL; 112 } 113 114 static inline void netpoll_poll_unlock(void *have) 115 { 116 struct napi_struct *napi = have; 117 118 if (napi) { 119 napi->poll_owner = -1; 120 spin_unlock(&napi->poll_lock); 121 } 122 } 123 124 static inline int netpoll_tx_running(struct net_device *dev) 125 { 126 return irqs_disabled(); 127 } 128 129 #else 130 static inline bool netpoll_rx(struct sk_buff *skb) 131 { 132 return 0; 133 } 134 static inline int netpoll_rx_on(struct sk_buff *skb) 135 { 136 return 0; 137 } 138 static inline int netpoll_receive_skb(struct sk_buff *skb) 139 { 140 return 0; 141 } 142 static inline void *netpoll_poll_lock(struct napi_struct *napi) 143 { 144 return NULL; 145 } 146 static inline void netpoll_poll_unlock(void *have) 147 { 148 } 149 static inline void netpoll_netdev_init(struct net_device *dev) 150 { 151 } 152 static inline int netpoll_tx_running(struct net_device *dev) 153 { 154 return 0; 155 } 156 #endif 157 158 #endif 159