Home
last modified time | relevance | path

Searched refs:net_hotdata (Results 1 – 25 of 26) sorted by relevance

12

/linux-6.15/include/net/
H A Dhotdata.h10 struct net_hotdata { struct
45 #define inet_ehash_secret net_hotdata.tcp_protocol.secret argument
46 #define udp_ehash_secret net_hotdata.udp_protocol.secret
47 #define inet6_ehash_secret net_hotdata.tcpv6_protocol.secret
48 #define tcp_ipv6_hash_secret net_hotdata.tcpv6_offload.secret
49 #define udp6_ehash_secret net_hotdata.udpv6_protocol.secret
50 #define udp_ipv6_hash_secret net_hotdata.udpv6_offload.secret
52 extern struct net_hotdata net_hotdata;
H A Dproto_memory.h68 if (unlikely(val >= READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv))) in sk_memory_allocated_add()
79 if (unlikely(val <= -READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv))) in sk_memory_allocated_sub()
H A Drps.h72 u32 val = hash & ~net_hotdata.rps_cpu_mask; in rps_record_sock_flow()
94 sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table); in sock_rps_record_flow_hash()
H A Dgro.h545 if (gro->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch)) in gro_normal_one()
/linux-6.15/net/core/
H A Dhotdata.c8 struct net_hotdata net_hotdata __cacheline_aligned = {
9 .offload_base = LIST_HEAD_INIT(net_hotdata.offload_base),
24 EXPORT_SYMBOL(net_hotdata);
H A Dsysctl_net_core.c162 net_hotdata.rps_sock_flow_table, in rps_sock_flow_sysctl()
183 net_hotdata.rps_cpu_mask = in rps_sock_flow_sysctl()
195 rcu_assign_pointer(net_hotdata.rps_sock_flow_table, in rps_sock_flow_sysctl()
404 .data = &net_hotdata.sysctl_mem_pcpu_rsv,
436 .data = &net_hotdata.max_backlog,
495 .data = &net_hotdata.tstamp_prequeue,
564 .data = &net_hotdata.netdev_budget,
578 .data = &net_hotdata.sysctl_max_skb_frags,
587 .data = &net_hotdata.netdev_budget_usecs,
620 .data = &net_hotdata.gro_normal_batch,
[all …]
H A Dgso.c20 list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) { in skb_eth_gso_segment()
51 list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) { in skb_mac_gso_segment()
H A Dgro.c29 list_for_each_entry(elem, &net_hotdata.offload_base, list) { in dev_add_offload()
53 struct list_head *head = &net_hotdata.offload_base; in __dev_remove_offload()
255 struct list_head *head = &net_hotdata.offload_base; in gro_complete()
464 struct list_head *head = &net_hotdata.offload_base; in dev_gro_receive()
570 struct list_head *offload_head = &net_hotdata.offload_base; in gro_find_receive_by_type()
584 struct list_head *offload_head = &net_hotdata.offload_base; in gro_find_complete_by_type()
H A Dskbuff.c333 n -= kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, in napi_skb_cache_get_bulk()
345 u32 cache_size = kmem_cache_size(net_hotdata.skbuff_cache); in napi_skb_cache_get_bulk()
413 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, in slab_build_skb()
465 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, in __build_skb()
648 ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache; in __alloc_skb()
1056 kmem_cache_free(net_hotdata.skb_small_head_cache, head); in skb_kfree_head()
1120 kmem_cache_free(net_hotdata.skbuff_cache, skb); in kfree_skbmem()
1141 kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones); in kfree_skbmem()
1441 kmem_cache_size(net_hotdata.skbuff_cache)); in napi_skb_cache_put()
6021 kmem_cache_free(net_hotdata.skbuff_cache, skb); in kfree_skb_partial()
[all …]
H A Dgro_cells.c30 if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) { in gro_cells_receive()
H A Ddev.c4852 if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask) in get_rps_cpu()
4855 next_cpu = ident & net_hotdata.rps_cpu_mask; in get_rps_cpu()
5026 if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1)) in skb_flow_limit()
5077 max_backlog = READ_ONCE(net_hotdata.max_backlog); in enqueue_to_backlog()
5378 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb); in netif_rx_internal()
5670 net_timestamp_check(!READ_ONCE(net_hotdata.tstamp_prequeue), skb); in __netif_receive_skb_core()
6068 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb); in netif_receive_skb_internal()
6097 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), in netif_receive_skb_list_internal()
6344 napi->weight = READ_ONCE(net_hotdata.dev_rx_weight); in process_backlog()
7475 usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs)); in net_rx_action()
[all …]
H A Dxdp.c849 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); in xdp_build_skb_from_frame()
/linux-6.15/net/ipv6/
H A Dudp_offload.c192 net_hotdata.udpv6_offload = (struct net_offload) { in udpv6_offload_init()
199 return inet6_add_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP); in udpv6_offload_init()
204 return inet6_del_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP); in udpv6_offload_exit()
H A Dtcpv6_offload.c198 net_hotdata.tcpv6_offload = (struct net_offload) { in tcpv6_offload_init()
205 return inet6_add_offload(&net_hotdata.tcpv6_offload, IPPROTO_TCP); in tcpv6_offload_init()
H A Dip6_offload.c470 net_hotdata.ipv6_packet_offload = (struct packet_offload) { in ipv6_offload_init()
478 dev_add_offload(&net_hotdata.ipv6_packet_offload); in ipv6_offload_init()
H A Dudp.c1949 net_hotdata.udpv6_protocol = (struct inet6_protocol) { in udpv6_init()
1954 ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP); in udpv6_init()
1965 inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP); in udpv6_init()
1972 inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP); in udpv6_exit()
H A Dtcp_ipv6.c2413 net_hotdata.tcpv6_protocol = (struct inet6_protocol) {
2418 ret = inet6_add_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
2443 inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
2451 inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
/linux-6.15/net/ipv4/
H A Daf_inet.c1870 net_hotdata.ip_packet_offload = (struct packet_offload) { in ipv4_offload_init()
1878 dev_add_offload(&net_hotdata.ip_packet_offload); in ipv4_offload_init()
1933 net_hotdata.udp_protocol = (struct net_protocol) { in inet_init()
1938 if (inet_add_protocol(&net_hotdata.udp_protocol, IPPROTO_UDP) < 0) in inet_init()
1941 net_hotdata.tcp_protocol = (struct net_protocol) { in inet_init()
1947 if (inet_add_protocol(&net_hotdata.tcp_protocol, IPPROTO_TCP) < 0) in inet_init()
H A Dtcp_offload.c496 net_hotdata.tcpv4_offload = (struct net_offload) { in tcpv4_offload_init()
503 return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP); in tcpv4_offload_init()
H A Dudp_offload.c822 net_hotdata.udpv4_offload = (struct net_offload) { in udpv4_offload_init()
829 return inet_add_offload(&net_hotdata.udpv4_offload, IPPROTO_UDP); in udpv4_offload_init()
/linux-6.15/net/xfrm/
H A Despintcp.c174 READ_ONCE(net_hotdata.max_backlog)) { in espintcp_queue_out()
H A Dxfrm_input.c800 if (skb_queue_len(&trans->queue) >= READ_ONCE(net_hotdata.max_backlog)) in xfrm_trans_queue_net()
/linux-6.15/net/sched/
H A Dsch_generic.c413 int quota = READ_ONCE(net_hotdata.dev_tx_weight); in __qdisc_run()
/linux-6.15/net/bpf/
H A Dtest_run.c260 n = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, nframes, in xdp_recv_frames()
/linux-6.15/net/mptcp/
H A Dprotocol.c1193 if (!can_coalesce && i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) { in mptcp_sendmsg_frag()

12