1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the Interfaces handler. 7 * 8 * Version: @(#)dev.h 1.0.10 08/12/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <[email protected]> 12 * Corey Minyard <[email protected]> 13 * Donald J. Becker, <[email protected]> 14 * Alan Cox, <[email protected]> 15 * Bjorn Ekwall. <[email protected]> 16 * Pekka Riikonen <[email protected]> 17 * 18 * This program is free software; you can redistribute it and/or 19 * modify it under the terms of the GNU General Public License 20 * as published by the Free Software Foundation; either version 21 * 2 of the License, or (at your option) any later version. 22 * 23 * Moved to /usr/include/linux for NET3 24 */ 25 #ifndef _LINUX_NETDEVICE_H 26 #define _LINUX_NETDEVICE_H 27 28 #include <linux/if.h> 29 #include <linux/if_ether.h> 30 #include <linux/if_packet.h> 31 32 #ifdef __KERNEL__ 33 #include <linux/timer.h> 34 #include <asm/atomic.h> 35 #include <asm/cache.h> 36 #include <asm/byteorder.h> 37 38 #include <linux/device.h> 39 #include <linux/percpu.h> 40 #include <linux/dmaengine.h> 41 42 struct vlan_group; 43 struct ethtool_ops; 44 struct netpoll_info; 45 /* 802.11 specific */ 46 struct wireless_dev; 47 /* source back-compat hooks */ 48 #define SET_ETHTOOL_OPS(netdev,ops) \ 49 ( (netdev)->ethtool_ops = (ops) ) 50 51 #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev 52 functions are available. */ 53 #define HAVE_FREE_NETDEV /* free_netdev() */ 54 #define HAVE_NETDEV_PRIV /* netdev_priv() */ 55 56 #define NET_XMIT_SUCCESS 0 57 #define NET_XMIT_DROP 1 /* skb dropped */ 58 #define NET_XMIT_CN 2 /* congestion notification */ 59 #define NET_XMIT_POLICED 3 /* skb is shot by police */ 60 #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue; 61 (TC use only - dev_queue_xmit 62 returns this as NET_XMIT_SUCCESS) */ 63 64 /* Backlog congestion levels */ 65 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 66 #define NET_RX_DROP 1 /* packet dropped */ 67 #define NET_RX_CN_LOW 2 /* storm alert, just in case */ 68 #define NET_RX_CN_MOD 3 /* Storm on its way! */ 69 #define NET_RX_CN_HIGH 4 /* The storm is here */ 70 #define NET_RX_BAD 5 /* packet dropped due to kernel error */ 71 72 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 73 * indicates that the device will soon be dropping packets, or already drops 74 * some packets of the same priority; prompting us to send less aggressively. */ 75 #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e)) 76 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 77 78 #endif 79 80 #define MAX_ADDR_LEN 32 /* Largest hardware address length */ 81 82 /* Driver transmit return codes */ 83 #define NETDEV_TX_OK 0 /* driver took care of packet */ 84 #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/ 85 #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */ 86 87 /* 88 * Compute the worst case header length according to the protocols 89 * used. 90 */ 91 92 #if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR) 93 #define LL_MAX_HEADER 32 94 #else 95 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 96 #define LL_MAX_HEADER 96 97 #else 98 #define LL_MAX_HEADER 48 99 #endif 100 #endif 101 102 #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ 103 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \ 104 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \ 105 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE) 106 #define MAX_HEADER LL_MAX_HEADER 107 #else 108 #define MAX_HEADER (LL_MAX_HEADER + 48) 109 #endif 110 111 /* 112 * Network device statistics. Akin to the 2.0 ether stats but 113 * with byte counters. 114 */ 115 116 struct net_device_stats 117 { 118 unsigned long rx_packets; /* total packets received */ 119 unsigned long tx_packets; /* total packets transmitted */ 120 unsigned long rx_bytes; /* total bytes received */ 121 unsigned long tx_bytes; /* total bytes transmitted */ 122 unsigned long rx_errors; /* bad packets received */ 123 unsigned long tx_errors; /* packet transmit problems */ 124 unsigned long rx_dropped; /* no space in linux buffers */ 125 unsigned long tx_dropped; /* no space available in linux */ 126 unsigned long multicast; /* multicast packets received */ 127 unsigned long collisions; 128 129 /* detailed rx_errors: */ 130 unsigned long rx_length_errors; 131 unsigned long rx_over_errors; /* receiver ring buff overflow */ 132 unsigned long rx_crc_errors; /* recved pkt with crc error */ 133 unsigned long rx_frame_errors; /* recv'd frame alignment error */ 134 unsigned long rx_fifo_errors; /* recv'r fifo overrun */ 135 unsigned long rx_missed_errors; /* receiver missed packet */ 136 137 /* detailed tx_errors */ 138 unsigned long tx_aborted_errors; 139 unsigned long tx_carrier_errors; 140 unsigned long tx_fifo_errors; 141 unsigned long tx_heartbeat_errors; 142 unsigned long tx_window_errors; 143 144 /* for cslip etc */ 145 unsigned long rx_compressed; 146 unsigned long tx_compressed; 147 }; 148 149 150 /* Media selection options. */ 151 enum { 152 IF_PORT_UNKNOWN = 0, 153 IF_PORT_10BASE2, 154 IF_PORT_10BASET, 155 IF_PORT_AUI, 156 IF_PORT_100BASET, 157 IF_PORT_100BASETX, 158 IF_PORT_100BASEFX 159 }; 160 161 #ifdef __KERNEL__ 162 163 #include <linux/cache.h> 164 #include <linux/skbuff.h> 165 166 struct neighbour; 167 struct neigh_parms; 168 struct sk_buff; 169 170 struct netif_rx_stats 171 { 172 unsigned total; 173 unsigned dropped; 174 unsigned time_squeeze; 175 unsigned cpu_collision; 176 }; 177 178 DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); 179 180 181 /* 182 * We tag multicasts with these structures. 183 */ 184 185 struct dev_mc_list 186 { 187 struct dev_mc_list *next; 188 __u8 dmi_addr[MAX_ADDR_LEN]; 189 unsigned char dmi_addrlen; 190 int dmi_users; 191 int dmi_gusers; 192 }; 193 194 struct hh_cache 195 { 196 struct hh_cache *hh_next; /* Next entry */ 197 atomic_t hh_refcnt; /* number of users */ 198 /* 199 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate 200 * cache line on SMP. 201 * They are mostly read, but hh_refcnt may be changed quite frequently, 202 * incurring cache line ping pongs. 203 */ 204 __be16 hh_type ____cacheline_aligned_in_smp; 205 /* protocol identifier, f.e ETH_P_IP 206 * NOTE: For VLANs, this will be the 207 * encapuslated type. --BLG 208 */ 209 u16 hh_len; /* length of header */ 210 int (*hh_output)(struct sk_buff *skb); 211 seqlock_t hh_lock; 212 213 /* cached hardware header; allow for machine alignment needs. */ 214 #define HH_DATA_MOD 16 215 #define HH_DATA_OFF(__len) \ 216 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) 217 #define HH_DATA_ALIGN(__len) \ 218 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) 219 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 220 }; 221 222 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. 223 * Alternative is: 224 * dev->hard_header_len ? (dev->hard_header_len + 225 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 226 * 227 * We could use other alignment values, but we must maintain the 228 * relationship HH alignment <= LL alignment. 229 */ 230 #define LL_RESERVED_SPACE(dev) \ 231 (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 232 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 233 ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 234 235 /* These flag bits are private to the generic network queueing 236 * layer, they may not be explicitly referenced by any other 237 * code. 238 */ 239 240 enum netdev_state_t 241 { 242 __LINK_STATE_XOFF=0, 243 __LINK_STATE_START, 244 __LINK_STATE_PRESENT, 245 __LINK_STATE_SCHED, 246 __LINK_STATE_NOCARRIER, 247 __LINK_STATE_RX_SCHED, 248 __LINK_STATE_LINKWATCH_PENDING, 249 __LINK_STATE_DORMANT, 250 __LINK_STATE_QDISC_RUNNING, 251 }; 252 253 254 /* 255 * This structure holds at boot time configured netdevice settings. They 256 * are then used in the device probing. 257 */ 258 struct netdev_boot_setup { 259 char name[IFNAMSIZ]; 260 struct ifmap map; 261 }; 262 #define NETDEV_BOOT_SETUP_MAX 8 263 264 extern int __init netdev_boot_setup(char *str); 265 266 /* 267 * The DEVICE structure. 268 * Actually, this whole structure is a big mistake. It mixes I/O 269 * data with strictly "high-level" data, and it has to know about 270 * almost every data structure used in the INET module. 271 * 272 * FIXME: cleanup struct net_device such that network protocol info 273 * moves out. 274 */ 275 276 struct net_device 277 { 278 279 /* 280 * This is the first field of the "visible" part of this structure 281 * (i.e. as seen by users in the "Space.c" file). It is the name 282 * the interface. 283 */ 284 char name[IFNAMSIZ]; 285 /* device name hash chain */ 286 struct hlist_node name_hlist; 287 288 /* 289 * I/O specific fields 290 * FIXME: Merge these and struct ifmap into one 291 */ 292 unsigned long mem_end; /* shared mem end */ 293 unsigned long mem_start; /* shared mem start */ 294 unsigned long base_addr; /* device I/O address */ 295 unsigned int irq; /* device IRQ number */ 296 297 /* 298 * Some hardware also needs these fields, but they are not 299 * part of the usual set specified in Space.c. 300 */ 301 302 unsigned char if_port; /* Selectable AUI, TP,..*/ 303 unsigned char dma; /* DMA channel */ 304 305 unsigned long state; 306 307 struct list_head dev_list; 308 309 /* The device initialization function. Called only once. */ 310 int (*init)(struct net_device *dev); 311 312 /* ------- Fields preinitialized in Space.c finish here ------- */ 313 314 /* Net device features */ 315 unsigned long features; 316 #define NETIF_F_SG 1 /* Scatter/gather IO. */ 317 #define NETIF_F_IP_CSUM 2 /* Can checksum only TCP/UDP over IPv4. */ 318 #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ 319 #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ 320 #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ 321 #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ 322 #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ 323 #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ 324 #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ 325 #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ 326 #define NETIF_F_GSO 2048 /* Enable software GSO. */ 327 #define NETIF_F_LLTX 4096 /* LockLess TX */ 328 329 /* Segmentation offload features */ 330 #define NETIF_F_GSO_SHIFT 16 331 #define NETIF_F_GSO_MASK 0xffff0000 332 #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) 333 #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) 334 #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) 335 #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) 336 #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) 337 338 /* List of features with software fallbacks. */ 339 #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) 340 341 #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) 342 #define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) 343 344 struct net_device *next_sched; 345 346 /* Interface index. Unique device identifier */ 347 int ifindex; 348 int iflink; 349 350 351 struct net_device_stats* (*get_stats)(struct net_device *dev); 352 struct net_device_stats stats; 353 354 #ifdef CONFIG_WIRELESS_EXT 355 /* List of functions to handle Wireless Extensions (instead of ioctl). 356 * See <net/iw_handler.h> for details. Jean II */ 357 const struct iw_handler_def * wireless_handlers; 358 /* Instance data managed by the core of Wireless Extensions. */ 359 struct iw_public_data * wireless_data; 360 #endif 361 const struct ethtool_ops *ethtool_ops; 362 363 /* 364 * This marks the end of the "visible" part of the structure. All 365 * fields hereafter are internal to the system, and may change at 366 * will (read: may be cleaned up at will). 367 */ 368 369 370 unsigned int flags; /* interface flags (a la BSD) */ 371 unsigned short gflags; 372 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */ 373 unsigned short padded; /* How much padding added by alloc_netdev() */ 374 375 unsigned char operstate; /* RFC2863 operstate */ 376 unsigned char link_mode; /* mapping policy to operstate */ 377 378 unsigned mtu; /* interface MTU value */ 379 unsigned short type; /* interface hardware type */ 380 unsigned short hard_header_len; /* hardware hdr length */ 381 382 struct net_device *master; /* Pointer to master device of a group, 383 * which this device is member of. 384 */ 385 386 /* Interface address info. */ 387 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ 388 unsigned char addr_len; /* hardware address length */ 389 unsigned short dev_id; /* for shared network cards */ 390 391 struct dev_mc_list *mc_list; /* Multicast mac addresses */ 392 int mc_count; /* Number of installed mcasts */ 393 int promiscuity; 394 int allmulti; 395 396 397 /* Protocol specific pointers */ 398 399 void *atalk_ptr; /* AppleTalk link */ 400 void *ip_ptr; /* IPv4 specific data */ 401 void *dn_ptr; /* DECnet specific data */ 402 void *ip6_ptr; /* IPv6 specific data */ 403 void *ec_ptr; /* Econet specific data */ 404 void *ax25_ptr; /* AX.25 specific data */ 405 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data, 406 assign before registering */ 407 408 /* 409 * Cache line mostly used on receive path (including eth_type_trans()) 410 */ 411 struct list_head poll_list ____cacheline_aligned_in_smp; 412 /* Link to poll list */ 413 414 int (*poll) (struct net_device *dev, int *quota); 415 int quota; 416 int weight; 417 unsigned long last_rx; /* Time of last Rx */ 418 /* Interface address info used in eth_type_trans() */ 419 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast 420 because most packets are unicast) */ 421 422 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 423 424 /* 425 * Cache line mostly used on queue transmit path (qdisc) 426 */ 427 /* device queue lock */ 428 spinlock_t queue_lock ____cacheline_aligned_in_smp; 429 struct Qdisc *qdisc; 430 struct Qdisc *qdisc_sleeping; 431 struct list_head qdisc_list; 432 unsigned long tx_queue_len; /* Max frames per queue allowed */ 433 434 /* Partially transmitted GSO packet. */ 435 struct sk_buff *gso_skb; 436 437 /* ingress path synchronizer */ 438 spinlock_t ingress_lock; 439 struct Qdisc *qdisc_ingress; 440 441 /* 442 * One part is mostly used on xmit path (device) 443 */ 444 /* hard_start_xmit synchronizer */ 445 spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 446 /* cpu id of processor entered to hard_start_xmit or -1, 447 if nobody entered there. 448 */ 449 int xmit_lock_owner; 450 void *priv; /* pointer to private data */ 451 int (*hard_start_xmit) (struct sk_buff *skb, 452 struct net_device *dev); 453 /* These may be needed for future network-power-down code. */ 454 unsigned long trans_start; /* Time (in jiffies) of last Tx */ 455 456 int watchdog_timeo; /* used by dev_watchdog() */ 457 struct timer_list watchdog_timer; 458 459 /* 460 * refcnt is a very hot point, so align it on SMP 461 */ 462 /* Number of references to this device */ 463 atomic_t refcnt ____cacheline_aligned_in_smp; 464 465 /* delayed register/unregister */ 466 struct list_head todo_list; 467 /* device index hash chain */ 468 struct hlist_node index_hlist; 469 470 /* register/unregister state machine */ 471 enum { NETREG_UNINITIALIZED=0, 472 NETREG_REGISTERED, /* completed register_netdevice */ 473 NETREG_UNREGISTERING, /* called unregister_netdevice */ 474 NETREG_UNREGISTERED, /* completed unregister todo */ 475 NETREG_RELEASED, /* called free_netdev */ 476 } reg_state; 477 478 /* Called after device is detached from network. */ 479 void (*uninit)(struct net_device *dev); 480 /* Called after last user reference disappears. */ 481 void (*destructor)(struct net_device *dev); 482 483 /* Pointers to interface service routines. */ 484 int (*open)(struct net_device *dev); 485 int (*stop)(struct net_device *dev); 486 #define HAVE_NETDEV_POLL 487 int (*hard_header) (struct sk_buff *skb, 488 struct net_device *dev, 489 unsigned short type, 490 void *daddr, 491 void *saddr, 492 unsigned len); 493 int (*rebuild_header)(struct sk_buff *skb); 494 #define HAVE_MULTICAST 495 void (*set_multicast_list)(struct net_device *dev); 496 #define HAVE_SET_MAC_ADDR 497 int (*set_mac_address)(struct net_device *dev, 498 void *addr); 499 #define HAVE_PRIVATE_IOCTL 500 int (*do_ioctl)(struct net_device *dev, 501 struct ifreq *ifr, int cmd); 502 #define HAVE_SET_CONFIG 503 int (*set_config)(struct net_device *dev, 504 struct ifmap *map); 505 #define HAVE_HEADER_CACHE 506 int (*hard_header_cache)(struct neighbour *neigh, 507 struct hh_cache *hh); 508 void (*header_cache_update)(struct hh_cache *hh, 509 struct net_device *dev, 510 unsigned char * haddr); 511 #define HAVE_CHANGE_MTU 512 int (*change_mtu)(struct net_device *dev, int new_mtu); 513 514 #define HAVE_TX_TIMEOUT 515 void (*tx_timeout) (struct net_device *dev); 516 517 void (*vlan_rx_register)(struct net_device *dev, 518 struct vlan_group *grp); 519 void (*vlan_rx_add_vid)(struct net_device *dev, 520 unsigned short vid); 521 void (*vlan_rx_kill_vid)(struct net_device *dev, 522 unsigned short vid); 523 524 int (*hard_header_parse)(struct sk_buff *skb, 525 unsigned char *haddr); 526 int (*neigh_setup)(struct net_device *dev, struct neigh_parms *); 527 #ifdef CONFIG_NETPOLL 528 struct netpoll_info *npinfo; 529 #endif 530 #ifdef CONFIG_NET_POLL_CONTROLLER 531 void (*poll_controller)(struct net_device *dev); 532 #endif 533 534 /* bridge stuff */ 535 struct net_bridge_port *br_port; 536 537 /* class/net/name entry */ 538 struct device dev; 539 /* space for optional statistics and wireless sysfs groups */ 540 struct attribute_group *sysfs_groups[3]; 541 }; 542 #define to_net_dev(d) container_of(d, struct net_device, dev) 543 544 #define NETDEV_ALIGN 32 545 #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) 546 547 static inline void *netdev_priv(struct net_device *dev) 548 { 549 return (char *)dev + ((sizeof(struct net_device) 550 + NETDEV_ALIGN_CONST) 551 & ~NETDEV_ALIGN_CONST); 552 } 553 554 #define SET_MODULE_OWNER(dev) do { } while (0) 555 /* Set the sysfs physical device reference for the network logical device 556 * if set prior to registration will cause a symlink during initialization. 557 */ 558 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 559 560 struct packet_type { 561 __be16 type; /* This is really htons(ether_type). */ 562 struct net_device *dev; /* NULL is wildcarded here */ 563 int (*func) (struct sk_buff *, 564 struct net_device *, 565 struct packet_type *, 566 struct net_device *); 567 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 568 int features); 569 int (*gso_send_check)(struct sk_buff *skb); 570 void *af_packet_priv; 571 struct list_head list; 572 }; 573 574 #include <linux/interrupt.h> 575 #include <linux/notifier.h> 576 577 extern struct net_device loopback_dev; /* The loopback */ 578 extern struct list_head dev_base_head; /* All devices */ 579 extern rwlock_t dev_base_lock; /* Device list lock */ 580 581 #define for_each_netdev(d) \ 582 list_for_each_entry(d, &dev_base_head, dev_list) 583 #define for_each_netdev_safe(d, n) \ 584 list_for_each_entry_safe(d, n, &dev_base_head, dev_list) 585 #define for_each_netdev_continue(d) \ 586 list_for_each_entry_continue(d, &dev_base_head, dev_list) 587 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 588 589 static inline struct net_device *next_net_device(struct net_device *dev) 590 { 591 struct list_head *lh; 592 593 lh = dev->dev_list.next; 594 return lh == &dev_base_head ? NULL : net_device_entry(lh); 595 } 596 597 static inline struct net_device *first_net_device(void) 598 { 599 return list_empty(&dev_base_head) ? NULL : 600 net_device_entry(dev_base_head.next); 601 } 602 603 extern int netdev_boot_setup_check(struct net_device *dev); 604 extern unsigned long netdev_boot_base(const char *prefix, int unit); 605 extern struct net_device *dev_getbyhwaddr(unsigned short type, char *hwaddr); 606 extern struct net_device *dev_getfirstbyhwtype(unsigned short type); 607 extern struct net_device *__dev_getfirstbyhwtype(unsigned short type); 608 extern void dev_add_pack(struct packet_type *pt); 609 extern void dev_remove_pack(struct packet_type *pt); 610 extern void __dev_remove_pack(struct packet_type *pt); 611 612 extern struct net_device *dev_get_by_flags(unsigned short flags, 613 unsigned short mask); 614 extern struct net_device *dev_get_by_name(const char *name); 615 extern struct net_device *__dev_get_by_name(const char *name); 616 extern int dev_alloc_name(struct net_device *dev, const char *name); 617 extern int dev_open(struct net_device *dev); 618 extern int dev_close(struct net_device *dev); 619 extern int dev_queue_xmit(struct sk_buff *skb); 620 extern int register_netdevice(struct net_device *dev); 621 extern void unregister_netdevice(struct net_device *dev); 622 extern void free_netdev(struct net_device *dev); 623 extern void synchronize_net(void); 624 extern int register_netdevice_notifier(struct notifier_block *nb); 625 extern int unregister_netdevice_notifier(struct notifier_block *nb); 626 extern int call_netdevice_notifiers(unsigned long val, void *v); 627 extern struct net_device *dev_get_by_index(int ifindex); 628 extern struct net_device *__dev_get_by_index(int ifindex); 629 extern int dev_restart(struct net_device *dev); 630 #ifdef CONFIG_NETPOLL_TRAP 631 extern int netpoll_trap(void); 632 #endif 633 634 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); 635 extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); 636 static inline int unregister_gifconf(unsigned int family) 637 { 638 return register_gifconf(family, NULL); 639 } 640 641 /* 642 * Incoming packets are placed on per-cpu queues so that 643 * no locking is needed. 644 */ 645 646 struct softnet_data 647 { 648 struct net_device *output_queue; 649 struct sk_buff_head input_pkt_queue; 650 struct list_head poll_list; 651 struct sk_buff *completion_queue; 652 653 struct net_device backlog_dev; /* Sorry. 8) */ 654 #ifdef CONFIG_NET_DMA 655 struct dma_chan *net_dma; 656 #endif 657 }; 658 659 DECLARE_PER_CPU(struct softnet_data,softnet_data); 660 661 #define HAVE_NETIF_QUEUE 662 663 extern void __netif_schedule(struct net_device *dev); 664 665 static inline void netif_schedule(struct net_device *dev) 666 { 667 if (!test_bit(__LINK_STATE_XOFF, &dev->state)) 668 __netif_schedule(dev); 669 } 670 671 static inline void netif_start_queue(struct net_device *dev) 672 { 673 clear_bit(__LINK_STATE_XOFF, &dev->state); 674 } 675 676 static inline void netif_wake_queue(struct net_device *dev) 677 { 678 #ifdef CONFIG_NETPOLL_TRAP 679 if (netpoll_trap()) { 680 clear_bit(__LINK_STATE_XOFF, &dev->state); 681 return; 682 } 683 #endif 684 if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) 685 __netif_schedule(dev); 686 } 687 688 static inline void netif_stop_queue(struct net_device *dev) 689 { 690 set_bit(__LINK_STATE_XOFF, &dev->state); 691 } 692 693 static inline int netif_queue_stopped(const struct net_device *dev) 694 { 695 return test_bit(__LINK_STATE_XOFF, &dev->state); 696 } 697 698 static inline int netif_running(const struct net_device *dev) 699 { 700 return test_bit(__LINK_STATE_START, &dev->state); 701 } 702 703 704 /* Use this variant when it is known for sure that it 705 * is executing from interrupt context. 706 */ 707 static inline void dev_kfree_skb_irq(struct sk_buff *skb) 708 { 709 if (atomic_dec_and_test(&skb->users)) { 710 struct softnet_data *sd; 711 unsigned long flags; 712 713 local_irq_save(flags); 714 sd = &__get_cpu_var(softnet_data); 715 skb->next = sd->completion_queue; 716 sd->completion_queue = skb; 717 raise_softirq_irqoff(NET_TX_SOFTIRQ); 718 local_irq_restore(flags); 719 } 720 } 721 722 /* Use this variant in places where it could be invoked 723 * either from interrupt or non-interrupt context. 724 */ 725 extern void dev_kfree_skb_any(struct sk_buff *skb); 726 727 #define HAVE_NETIF_RX 1 728 extern int netif_rx(struct sk_buff *skb); 729 extern int netif_rx_ni(struct sk_buff *skb); 730 #define HAVE_NETIF_RECEIVE_SKB 1 731 extern int netif_receive_skb(struct sk_buff *skb); 732 extern int dev_valid_name(const char *name); 733 extern int dev_ioctl(unsigned int cmd, void __user *); 734 extern int dev_ethtool(struct ifreq *); 735 extern unsigned dev_get_flags(const struct net_device *); 736 extern int dev_change_flags(struct net_device *, unsigned); 737 extern int dev_change_name(struct net_device *, char *); 738 extern int dev_set_mtu(struct net_device *, int); 739 extern int dev_set_mac_address(struct net_device *, 740 struct sockaddr *); 741 extern int dev_hard_start_xmit(struct sk_buff *skb, 742 struct net_device *dev); 743 744 extern void dev_init(void); 745 746 extern int netdev_budget; 747 748 /* Called by rtnetlink.c:rtnl_unlock() */ 749 extern void netdev_run_todo(void); 750 751 static inline void dev_put(struct net_device *dev) 752 { 753 atomic_dec(&dev->refcnt); 754 } 755 756 static inline void dev_hold(struct net_device *dev) 757 { 758 atomic_inc(&dev->refcnt); 759 } 760 761 /* Carrier loss detection, dial on demand. The functions netif_carrier_on 762 * and _off may be called from IRQ context, but it is caller 763 * who is responsible for serialization of these calls. 764 * 765 * The name carrier is inappropriate, these functions should really be 766 * called netif_lowerlayer_*() because they represent the state of any 767 * kind of lower layer not just hardware media. 768 */ 769 770 extern void linkwatch_fire_event(struct net_device *dev); 771 772 static inline int netif_carrier_ok(const struct net_device *dev) 773 { 774 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 775 } 776 777 extern void __netdev_watchdog_up(struct net_device *dev); 778 779 extern void netif_carrier_on(struct net_device *dev); 780 781 extern void netif_carrier_off(struct net_device *dev); 782 783 static inline void netif_dormant_on(struct net_device *dev) 784 { 785 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) 786 linkwatch_fire_event(dev); 787 } 788 789 static inline void netif_dormant_off(struct net_device *dev) 790 { 791 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) 792 linkwatch_fire_event(dev); 793 } 794 795 static inline int netif_dormant(const struct net_device *dev) 796 { 797 return test_bit(__LINK_STATE_DORMANT, &dev->state); 798 } 799 800 801 static inline int netif_oper_up(const struct net_device *dev) { 802 return (dev->operstate == IF_OPER_UP || 803 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); 804 } 805 806 /* Hot-plugging. */ 807 static inline int netif_device_present(struct net_device *dev) 808 { 809 return test_bit(__LINK_STATE_PRESENT, &dev->state); 810 } 811 812 extern void netif_device_detach(struct net_device *dev); 813 814 extern void netif_device_attach(struct net_device *dev); 815 816 /* 817 * Network interface message level settings 818 */ 819 #define HAVE_NETIF_MSG 1 820 821 enum { 822 NETIF_MSG_DRV = 0x0001, 823 NETIF_MSG_PROBE = 0x0002, 824 NETIF_MSG_LINK = 0x0004, 825 NETIF_MSG_TIMER = 0x0008, 826 NETIF_MSG_IFDOWN = 0x0010, 827 NETIF_MSG_IFUP = 0x0020, 828 NETIF_MSG_RX_ERR = 0x0040, 829 NETIF_MSG_TX_ERR = 0x0080, 830 NETIF_MSG_TX_QUEUED = 0x0100, 831 NETIF_MSG_INTR = 0x0200, 832 NETIF_MSG_TX_DONE = 0x0400, 833 NETIF_MSG_RX_STATUS = 0x0800, 834 NETIF_MSG_PKTDATA = 0x1000, 835 NETIF_MSG_HW = 0x2000, 836 NETIF_MSG_WOL = 0x4000, 837 }; 838 839 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) 840 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) 841 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) 842 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) 843 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) 844 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) 845 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) 846 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) 847 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) 848 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) 849 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) 850 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) 851 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) 852 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) 853 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) 854 855 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) 856 { 857 /* use default */ 858 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) 859 return default_msg_enable_bits; 860 if (debug_value == 0) /* no output */ 861 return 0; 862 /* set low N bits */ 863 return (1 << debug_value) - 1; 864 } 865 866 /* Test if receive needs to be scheduled */ 867 static inline int __netif_rx_schedule_prep(struct net_device *dev) 868 { 869 return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state); 870 } 871 872 /* Test if receive needs to be scheduled but only if up */ 873 static inline int netif_rx_schedule_prep(struct net_device *dev) 874 { 875 return netif_running(dev) && __netif_rx_schedule_prep(dev); 876 } 877 878 /* Add interface to tail of rx poll list. This assumes that _prep has 879 * already been called and returned 1. 880 */ 881 882 extern void __netif_rx_schedule(struct net_device *dev); 883 884 /* Try to reschedule poll. Called by irq handler. */ 885 886 static inline void netif_rx_schedule(struct net_device *dev) 887 { 888 if (netif_rx_schedule_prep(dev)) 889 __netif_rx_schedule(dev); 890 } 891 892 /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). 893 * Do not inline this? 894 */ 895 static inline int netif_rx_reschedule(struct net_device *dev, int undo) 896 { 897 if (netif_rx_schedule_prep(dev)) { 898 unsigned long flags; 899 900 dev->quota += undo; 901 902 local_irq_save(flags); 903 list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); 904 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 905 local_irq_restore(flags); 906 return 1; 907 } 908 return 0; 909 } 910 911 /* Remove interface from poll list: it must be in the poll list 912 * on current cpu. This primitive is called by dev->poll(), when 913 * it completes the work. The device cannot be out of poll list at this 914 * moment, it is BUG(). 915 */ 916 static inline void netif_rx_complete(struct net_device *dev) 917 { 918 unsigned long flags; 919 920 local_irq_save(flags); 921 BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); 922 list_del(&dev->poll_list); 923 smp_mb__before_clear_bit(); 924 clear_bit(__LINK_STATE_RX_SCHED, &dev->state); 925 local_irq_restore(flags); 926 } 927 928 static inline void netif_poll_disable(struct net_device *dev) 929 { 930 while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) 931 /* No hurry. */ 932 schedule_timeout_interruptible(1); 933 } 934 935 static inline void netif_poll_enable(struct net_device *dev) 936 { 937 smp_mb__before_clear_bit(); 938 clear_bit(__LINK_STATE_RX_SCHED, &dev->state); 939 } 940 941 /* same as netif_rx_complete, except that local_irq_save(flags) 942 * has already been issued 943 */ 944 static inline void __netif_rx_complete(struct net_device *dev) 945 { 946 BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); 947 list_del(&dev->poll_list); 948 smp_mb__before_clear_bit(); 949 clear_bit(__LINK_STATE_RX_SCHED, &dev->state); 950 } 951 952 static inline void netif_tx_lock(struct net_device *dev) 953 { 954 spin_lock(&dev->_xmit_lock); 955 dev->xmit_lock_owner = smp_processor_id(); 956 } 957 958 static inline void netif_tx_lock_bh(struct net_device *dev) 959 { 960 spin_lock_bh(&dev->_xmit_lock); 961 dev->xmit_lock_owner = smp_processor_id(); 962 } 963 964 static inline int netif_tx_trylock(struct net_device *dev) 965 { 966 int ok = spin_trylock(&dev->_xmit_lock); 967 if (likely(ok)) 968 dev->xmit_lock_owner = smp_processor_id(); 969 return ok; 970 } 971 972 static inline void netif_tx_unlock(struct net_device *dev) 973 { 974 dev->xmit_lock_owner = -1; 975 spin_unlock(&dev->_xmit_lock); 976 } 977 978 static inline void netif_tx_unlock_bh(struct net_device *dev) 979 { 980 dev->xmit_lock_owner = -1; 981 spin_unlock_bh(&dev->_xmit_lock); 982 } 983 984 static inline void netif_tx_disable(struct net_device *dev) 985 { 986 netif_tx_lock_bh(dev); 987 netif_stop_queue(dev); 988 netif_tx_unlock_bh(dev); 989 } 990 991 /* These functions live elsewhere (drivers/net/net_init.c, but related) */ 992 993 extern void ether_setup(struct net_device *dev); 994 995 /* Support for loadable net-drivers */ 996 extern struct net_device *alloc_netdev(int sizeof_priv, const char *name, 997 void (*setup)(struct net_device *)); 998 extern int register_netdev(struct net_device *dev); 999 extern void unregister_netdev(struct net_device *dev); 1000 /* Functions used for multicast support */ 1001 extern void dev_mc_upload(struct net_device *dev); 1002 extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); 1003 extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); 1004 extern void dev_mc_discard(struct net_device *dev); 1005 extern void dev_set_promiscuity(struct net_device *dev, int inc); 1006 extern void dev_set_allmulti(struct net_device *dev, int inc); 1007 extern void netdev_state_change(struct net_device *dev); 1008 extern void netdev_features_change(struct net_device *dev); 1009 /* Load a device via the kmod */ 1010 extern void dev_load(const char *name); 1011 extern void dev_mcast_init(void); 1012 extern int netdev_max_backlog; 1013 extern int weight_p; 1014 extern int netdev_set_master(struct net_device *dev, struct net_device *master); 1015 extern int skb_checksum_help(struct sk_buff *skb); 1016 extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features); 1017 #ifdef CONFIG_BUG 1018 extern void netdev_rx_csum_fault(struct net_device *dev); 1019 #else 1020 static inline void netdev_rx_csum_fault(struct net_device *dev) 1021 { 1022 } 1023 #endif 1024 /* rx skb timestamps */ 1025 extern void net_enable_timestamp(void); 1026 extern void net_disable_timestamp(void); 1027 1028 #ifdef CONFIG_PROC_FS 1029 extern void *dev_seq_start(struct seq_file *seq, loff_t *pos); 1030 extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); 1031 extern void dev_seq_stop(struct seq_file *seq, void *v); 1032 #endif 1033 1034 extern void linkwatch_run_queue(void); 1035 1036 static inline int net_gso_ok(int features, int gso_type) 1037 { 1038 int feature = gso_type << NETIF_F_GSO_SHIFT; 1039 return (features & feature) == feature; 1040 } 1041 1042 static inline int skb_gso_ok(struct sk_buff *skb, int features) 1043 { 1044 return net_gso_ok(features, skb_shinfo(skb)->gso_type); 1045 } 1046 1047 static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 1048 { 1049 return skb_is_gso(skb) && 1050 (!skb_gso_ok(skb, dev->features) || 1051 unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); 1052 } 1053 1054 /* On bonding slaves other than the currently active slave, suppress 1055 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and 1056 * ARP on active-backup slaves with arp_validate enabled. 1057 */ 1058 static inline int skb_bond_should_drop(struct sk_buff *skb) 1059 { 1060 struct net_device *dev = skb->dev; 1061 struct net_device *master = dev->master; 1062 1063 if (master && 1064 (dev->priv_flags & IFF_SLAVE_INACTIVE)) { 1065 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && 1066 skb->protocol == __constant_htons(ETH_P_ARP)) 1067 return 0; 1068 1069 if (master->priv_flags & IFF_MASTER_ALB) { 1070 if (skb->pkt_type != PACKET_BROADCAST && 1071 skb->pkt_type != PACKET_MULTICAST) 1072 return 0; 1073 } 1074 if (master->priv_flags & IFF_MASTER_8023AD && 1075 skb->protocol == __constant_htons(ETH_P_SLOW)) 1076 return 0; 1077 1078 return 1; 1079 } 1080 return 0; 1081 } 1082 1083 #endif /* __KERNEL__ */ 1084 1085 #endif /* _LINUX_DEV_H */ 1086