1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2007-2014 Nicira, Inc. 4 */ 5 6 #include "flow.h" 7 #include "datapath.h" 8 #include "flow_netlink.h" 9 #include <linux/uaccess.h> 10 #include <linux/netdevice.h> 11 #include <linux/etherdevice.h> 12 #include <linux/if_ether.h> 13 #include <linux/if_vlan.h> 14 #include <net/llc_pdu.h> 15 #include <linux/kernel.h> 16 #include <linux/jhash.h> 17 #include <linux/jiffies.h> 18 #include <linux/llc.h> 19 #include <linux/module.h> 20 #include <linux/in.h> 21 #include <linux/rcupdate.h> 22 #include <linux/cpumask.h> 23 #include <linux/if_arp.h> 24 #include <linux/ip.h> 25 #include <linux/ipv6.h> 26 #include <linux/sctp.h> 27 #include <linux/tcp.h> 28 #include <linux/udp.h> 29 #include <linux/icmp.h> 30 #include <linux/icmpv6.h> 31 #include <linux/rculist.h> 32 #include <linux/sort.h> 33 #include <net/ip.h> 34 #include <net/ipv6.h> 35 #include <net/ndisc.h> 36 37 #define TBL_MIN_BUCKETS 1024 38 #define MASK_ARRAY_SIZE_MIN 16 39 #define REHASH_INTERVAL (10 * 60 * HZ) 40 41 #define MC_DEFAULT_HASH_ENTRIES 256 42 #define MC_HASH_SHIFT 8 43 #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT) 44 45 static struct kmem_cache *flow_cache; 46 struct kmem_cache *flow_stats_cache __read_mostly; 47 48 static u16 range_n_bytes(const struct sw_flow_key_range *range) 49 { 50 return range->end - range->start; 51 } 52 53 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, 54 bool full, const struct sw_flow_mask *mask) 55 { 56 int start = full ? 0 : mask->range.start; 57 int len = full ? sizeof *dst : range_n_bytes(&mask->range); 58 const long *m = (const long *)((const u8 *)&mask->key + start); 59 const long *s = (const long *)((const u8 *)src + start); 60 long *d = (long *)((u8 *)dst + start); 61 int i; 62 63 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise, 64 * if 'full' is false the memory outside of the 'mask->range' is left 65 * uninitialized. This can be used as an optimization when further 66 * operations on 'dst' only use contents within 'mask->range'. 67 */ 68 for (i = 0; i < len; i += sizeof(long)) 69 *d++ = *s++ & *m++; 70 } 71 72 struct sw_flow *ovs_flow_alloc(void) 73 { 74 struct sw_flow *flow; 75 struct sw_flow_stats *stats; 76 77 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); 78 if (!flow) 79 return ERR_PTR(-ENOMEM); 80 81 flow->stats_last_writer = -1; 82 83 /* Initialize the default stat node. */ 84 stats = kmem_cache_alloc_node(flow_stats_cache, 85 GFP_KERNEL | __GFP_ZERO, 86 node_online(0) ? 0 : NUMA_NO_NODE); 87 if (!stats) 88 goto err; 89 90 spin_lock_init(&stats->lock); 91 92 RCU_INIT_POINTER(flow->stats[0], stats); 93 94 cpumask_set_cpu(0, &flow->cpu_used_mask); 95 96 return flow; 97 err: 98 kmem_cache_free(flow_cache, flow); 99 return ERR_PTR(-ENOMEM); 100 } 101 102 int ovs_flow_tbl_count(const struct flow_table *table) 103 { 104 return table->count; 105 } 106 107 static void flow_free(struct sw_flow *flow) 108 { 109 int cpu; 110 111 if (ovs_identifier_is_key(&flow->id)) 112 kfree(flow->id.unmasked_key); 113 if (flow->sf_acts) 114 ovs_nla_free_flow_actions((struct sw_flow_actions __force *) 115 flow->sf_acts); 116 /* We open code this to make sure cpu 0 is always considered */ 117 for (cpu = 0; cpu < nr_cpu_ids; 118 cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { 119 if (flow->stats[cpu]) 120 kmem_cache_free(flow_stats_cache, 121 (struct sw_flow_stats __force *)flow->stats[cpu]); 122 } 123 124 kmem_cache_free(flow_cache, flow); 125 } 126 127 static void rcu_free_flow_callback(struct rcu_head *rcu) 128 { 129 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); 130 131 flow_free(flow); 132 } 133 134 void ovs_flow_free(struct sw_flow *flow, bool deferred) 135 { 136 if (!flow) 137 return; 138 139 if (deferred) 140 call_rcu(&flow->rcu, rcu_free_flow_callback); 141 else 142 flow_free(flow); 143 } 144 145 static void __table_instance_destroy(struct table_instance *ti) 146 { 147 kvfree(ti->buckets); 148 kfree(ti); 149 } 150 151 static struct table_instance *table_instance_alloc(int new_size) 152 { 153 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL); 154 int i; 155 156 if (!ti) 157 return NULL; 158 159 ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head), 160 GFP_KERNEL); 161 if (!ti->buckets) { 162 kfree(ti); 163 return NULL; 164 } 165 166 for (i = 0; i < new_size; i++) 167 INIT_HLIST_HEAD(&ti->buckets[i]); 168 169 ti->n_buckets = new_size; 170 ti->node_ver = 0; 171 ti->keep_flows = false; 172 get_random_bytes(&ti->hash_seed, sizeof(u32)); 173 174 return ti; 175 } 176 177 static void __mask_array_destroy(struct mask_array *ma) 178 { 179 free_percpu(ma->masks_usage_cntr); 180 kfree(ma); 181 } 182 183 static void mask_array_rcu_cb(struct rcu_head *rcu) 184 { 185 struct mask_array *ma = container_of(rcu, struct mask_array, rcu); 186 187 __mask_array_destroy(ma); 188 } 189 190 static void tbl_mask_array_reset_counters(struct mask_array *ma) 191 { 192 int i, cpu; 193 194 /* As the per CPU counters are not atomic we can not go ahead and 195 * reset them from another CPU. To be able to still have an approximate 196 * zero based counter we store the value at reset, and subtract it 197 * later when processing. 198 */ 199 for (i = 0; i < ma->max; i++) { 200 ma->masks_usage_zero_cntr[i] = 0; 201 202 for_each_possible_cpu(cpu) { 203 u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr, 204 cpu); 205 unsigned int start; 206 u64 counter; 207 208 do { 209 start = u64_stats_fetch_begin_irq(&ma->syncp); 210 counter = usage_counters[i]; 211 } while (u64_stats_fetch_retry_irq(&ma->syncp, start)); 212 213 ma->masks_usage_zero_cntr[i] += counter; 214 } 215 } 216 } 217 218 static struct mask_array *tbl_mask_array_alloc(int size) 219 { 220 struct mask_array *new; 221 222 size = max(MASK_ARRAY_SIZE_MIN, size); 223 new = kzalloc(sizeof(struct mask_array) + 224 sizeof(struct sw_flow_mask *) * size + 225 sizeof(u64) * size, GFP_KERNEL); 226 if (!new) 227 return NULL; 228 229 new->masks_usage_zero_cntr = (u64 *)((u8 *)new + 230 sizeof(struct mask_array) + 231 sizeof(struct sw_flow_mask *) * 232 size); 233 234 new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size, 235 __alignof__(u64)); 236 if (!new->masks_usage_cntr) { 237 kfree(new); 238 return NULL; 239 } 240 241 new->count = 0; 242 new->max = size; 243 244 return new; 245 } 246 247 static int tbl_mask_array_realloc(struct flow_table *tbl, int size) 248 { 249 struct mask_array *old; 250 struct mask_array *new; 251 252 new = tbl_mask_array_alloc(size); 253 if (!new) 254 return -ENOMEM; 255 256 old = ovsl_dereference(tbl->mask_array); 257 if (old) { 258 int i; 259 260 for (i = 0; i < old->max; i++) { 261 if (ovsl_dereference(old->masks[i])) 262 new->masks[new->count++] = old->masks[i]; 263 } 264 call_rcu(&old->rcu, mask_array_rcu_cb); 265 } 266 267 rcu_assign_pointer(tbl->mask_array, new); 268 269 return 0; 270 } 271 272 static int tbl_mask_array_add_mask(struct flow_table *tbl, 273 struct sw_flow_mask *new) 274 { 275 struct mask_array *ma = ovsl_dereference(tbl->mask_array); 276 int err, ma_count = READ_ONCE(ma->count); 277 278 if (ma_count >= ma->max) { 279 err = tbl_mask_array_realloc(tbl, ma->max + 280 MASK_ARRAY_SIZE_MIN); 281 if (err) 282 return err; 283 284 ma = ovsl_dereference(tbl->mask_array); 285 } else { 286 /* On every add or delete we need to reset the counters so 287 * every new mask gets a fair chance of being prioritized. 288 */ 289 tbl_mask_array_reset_counters(ma); 290 } 291 292 BUG_ON(ovsl_dereference(ma->masks[ma_count])); 293 294 rcu_assign_pointer(ma->masks[ma_count], new); 295 WRITE_ONCE(ma->count, ma_count + 1); 296 297 return 0; 298 } 299 300 static void tbl_mask_array_del_mask(struct flow_table *tbl, 301 struct sw_flow_mask *mask) 302 { 303 struct mask_array *ma = ovsl_dereference(tbl->mask_array); 304 int i, ma_count = READ_ONCE(ma->count); 305 306 /* Remove the deleted mask pointers from the array */ 307 for (i = 0; i < ma_count; i++) { 308 if (mask == ovsl_dereference(ma->masks[i])) 309 goto found; 310 } 311 312 BUG(); 313 return; 314 315 found: 316 WRITE_ONCE(ma->count, ma_count - 1); 317 318 rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]); 319 RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL); 320 321 kfree_rcu(mask, rcu); 322 323 /* Shrink the mask array if necessary. */ 324 if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) && 325 ma_count <= (ma->max / 3)) 326 tbl_mask_array_realloc(tbl, ma->max / 2); 327 else 328 tbl_mask_array_reset_counters(ma); 329 330 } 331 332 /* Remove 'mask' from the mask list, if it is not needed any more. */ 333 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) 334 { 335 if (mask) { 336 /* ovs-lock is required to protect mask-refcount and 337 * mask list. 338 */ 339 ASSERT_OVSL(); 340 BUG_ON(!mask->ref_count); 341 mask->ref_count--; 342 343 if (!mask->ref_count) 344 tbl_mask_array_del_mask(tbl, mask); 345 } 346 } 347 348 static void __mask_cache_destroy(struct mask_cache *mc) 349 { 350 free_percpu(mc->mask_cache); 351 kfree(mc); 352 } 353 354 static void mask_cache_rcu_cb(struct rcu_head *rcu) 355 { 356 struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu); 357 358 __mask_cache_destroy(mc); 359 } 360 361 static struct mask_cache *tbl_mask_cache_alloc(u32 size) 362 { 363 struct mask_cache_entry __percpu *cache = NULL; 364 struct mask_cache *new; 365 366 /* Only allow size to be 0, or a power of 2, and does not exceed 367 * percpu allocation size. 368 */ 369 if ((!is_power_of_2(size) && size != 0) || 370 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE) 371 return NULL; 372 373 new = kzalloc(sizeof(*new), GFP_KERNEL); 374 if (!new) 375 return NULL; 376 377 new->cache_size = size; 378 if (new->cache_size > 0) { 379 cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry), 380 new->cache_size), 381 __alignof__(struct mask_cache_entry)); 382 if (!cache) { 383 kfree(new); 384 return NULL; 385 } 386 } 387 388 new->mask_cache = cache; 389 return new; 390 } 391 int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size) 392 { 393 struct mask_cache *mc = rcu_dereference(table->mask_cache); 394 struct mask_cache *new; 395 396 if (size == mc->cache_size) 397 return 0; 398 399 if ((!is_power_of_2(size) && size != 0) || 400 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE) 401 return -EINVAL; 402 403 new = tbl_mask_cache_alloc(size); 404 if (!new) 405 return -ENOMEM; 406 407 rcu_assign_pointer(table->mask_cache, new); 408 call_rcu(&mc->rcu, mask_cache_rcu_cb); 409 410 return 0; 411 } 412 413 int ovs_flow_tbl_init(struct flow_table *table) 414 { 415 struct table_instance *ti, *ufid_ti; 416 struct mask_cache *mc; 417 struct mask_array *ma; 418 419 mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES); 420 if (!mc) 421 return -ENOMEM; 422 423 ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN); 424 if (!ma) 425 goto free_mask_cache; 426 427 ti = table_instance_alloc(TBL_MIN_BUCKETS); 428 if (!ti) 429 goto free_mask_array; 430 431 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS); 432 if (!ufid_ti) 433 goto free_ti; 434 435 rcu_assign_pointer(table->ti, ti); 436 rcu_assign_pointer(table->ufid_ti, ufid_ti); 437 rcu_assign_pointer(table->mask_array, ma); 438 rcu_assign_pointer(table->mask_cache, mc); 439 table->last_rehash = jiffies; 440 table->count = 0; 441 table->ufid_count = 0; 442 return 0; 443 444 free_ti: 445 __table_instance_destroy(ti); 446 free_mask_array: 447 __mask_array_destroy(ma); 448 free_mask_cache: 449 __mask_cache_destroy(mc); 450 return -ENOMEM; 451 } 452 453 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) 454 { 455 struct table_instance *ti; 456 457 ti = container_of(rcu, struct table_instance, rcu); 458 __table_instance_destroy(ti); 459 } 460 461 static void table_instance_flow_free(struct flow_table *table, 462 struct table_instance *ti, 463 struct table_instance *ufid_ti, 464 struct sw_flow *flow) 465 { 466 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]); 467 table->count--; 468 469 if (ovs_identifier_is_ufid(&flow->id)) { 470 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]); 471 table->ufid_count--; 472 } 473 474 flow_mask_remove(table, flow->mask); 475 } 476 477 /* Must be called with OVS mutex held. */ 478 void table_instance_flow_flush(struct flow_table *table, 479 struct table_instance *ti, 480 struct table_instance *ufid_ti) 481 { 482 int i; 483 484 if (ti->keep_flows) 485 return; 486 487 for (i = 0; i < ti->n_buckets; i++) { 488 struct hlist_head *head = &ti->buckets[i]; 489 struct hlist_node *n; 490 struct sw_flow *flow; 491 492 hlist_for_each_entry_safe(flow, n, head, 493 flow_table.node[ti->node_ver]) { 494 495 table_instance_flow_free(table, ti, ufid_ti, 496 flow); 497 ovs_flow_free(flow, true); 498 } 499 } 500 501 if (WARN_ON(table->count != 0 || 502 table->ufid_count != 0)) { 503 table->count = 0; 504 table->ufid_count = 0; 505 } 506 } 507 508 static void table_instance_destroy(struct table_instance *ti, 509 struct table_instance *ufid_ti) 510 { 511 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); 512 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb); 513 } 514 515 /* No need for locking this function is called from RCU callback or 516 * error path. 517 */ 518 void ovs_flow_tbl_destroy(struct flow_table *table) 519 { 520 struct table_instance *ti = rcu_dereference_raw(table->ti); 521 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti); 522 struct mask_cache *mc = rcu_dereference_raw(table->mask_cache); 523 struct mask_array *ma = rcu_dereference_raw(table->mask_array); 524 525 call_rcu(&mc->rcu, mask_cache_rcu_cb); 526 call_rcu(&ma->rcu, mask_array_rcu_cb); 527 table_instance_destroy(ti, ufid_ti); 528 } 529 530 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, 531 u32 *bucket, u32 *last) 532 { 533 struct sw_flow *flow; 534 struct hlist_head *head; 535 int ver; 536 int i; 537 538 ver = ti->node_ver; 539 while (*bucket < ti->n_buckets) { 540 i = 0; 541 head = &ti->buckets[*bucket]; 542 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) { 543 if (i < *last) { 544 i++; 545 continue; 546 } 547 *last = i + 1; 548 return flow; 549 } 550 (*bucket)++; 551 *last = 0; 552 } 553 554 return NULL; 555 } 556 557 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash) 558 { 559 hash = jhash_1word(hash, ti->hash_seed); 560 return &ti->buckets[hash & (ti->n_buckets - 1)]; 561 } 562 563 static void table_instance_insert(struct table_instance *ti, 564 struct sw_flow *flow) 565 { 566 struct hlist_head *head; 567 568 head = find_bucket(ti, flow->flow_table.hash); 569 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head); 570 } 571 572 static void ufid_table_instance_insert(struct table_instance *ti, 573 struct sw_flow *flow) 574 { 575 struct hlist_head *head; 576 577 head = find_bucket(ti, flow->ufid_table.hash); 578 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head); 579 } 580 581 static void flow_table_copy_flows(struct table_instance *old, 582 struct table_instance *new, bool ufid) 583 { 584 int old_ver; 585 int i; 586 587 old_ver = old->node_ver; 588 new->node_ver = !old_ver; 589 590 /* Insert in new table. */ 591 for (i = 0; i < old->n_buckets; i++) { 592 struct sw_flow *flow; 593 struct hlist_head *head = &old->buckets[i]; 594 595 if (ufid) 596 hlist_for_each_entry_rcu(flow, head, 597 ufid_table.node[old_ver], 598 lockdep_ovsl_is_held()) 599 ufid_table_instance_insert(new, flow); 600 else 601 hlist_for_each_entry_rcu(flow, head, 602 flow_table.node[old_ver], 603 lockdep_ovsl_is_held()) 604 table_instance_insert(new, flow); 605 } 606 607 old->keep_flows = true; 608 } 609 610 static struct table_instance *table_instance_rehash(struct table_instance *ti, 611 int n_buckets, bool ufid) 612 { 613 struct table_instance *new_ti; 614 615 new_ti = table_instance_alloc(n_buckets); 616 if (!new_ti) 617 return NULL; 618 619 flow_table_copy_flows(ti, new_ti, ufid); 620 621 return new_ti; 622 } 623 624 int ovs_flow_tbl_flush(struct flow_table *flow_table) 625 { 626 struct table_instance *old_ti, *new_ti; 627 struct table_instance *old_ufid_ti, *new_ufid_ti; 628 629 new_ti = table_instance_alloc(TBL_MIN_BUCKETS); 630 if (!new_ti) 631 return -ENOMEM; 632 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS); 633 if (!new_ufid_ti) 634 goto err_free_ti; 635 636 old_ti = ovsl_dereference(flow_table->ti); 637 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti); 638 639 rcu_assign_pointer(flow_table->ti, new_ti); 640 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti); 641 flow_table->last_rehash = jiffies; 642 643 table_instance_flow_flush(flow_table, old_ti, old_ufid_ti); 644 table_instance_destroy(old_ti, old_ufid_ti); 645 return 0; 646 647 err_free_ti: 648 __table_instance_destroy(new_ti); 649 return -ENOMEM; 650 } 651 652 static u32 flow_hash(const struct sw_flow_key *key, 653 const struct sw_flow_key_range *range) 654 { 655 const u32 *hash_key = (const u32 *)((const u8 *)key + range->start); 656 657 /* Make sure number of hash bytes are multiple of u32. */ 658 int hash_u32s = range_n_bytes(range) >> 2; 659 660 return jhash2(hash_key, hash_u32s, 0); 661 } 662 663 static int flow_key_start(const struct sw_flow_key *key) 664 { 665 if (key->tun_proto) 666 return 0; 667 else 668 return rounddown(offsetof(struct sw_flow_key, phy), 669 sizeof(long)); 670 } 671 672 static bool cmp_key(const struct sw_flow_key *key1, 673 const struct sw_flow_key *key2, 674 int key_start, int key_end) 675 { 676 const long *cp1 = (const long *)((const u8 *)key1 + key_start); 677 const long *cp2 = (const long *)((const u8 *)key2 + key_start); 678 long diffs = 0; 679 int i; 680 681 for (i = key_start; i < key_end; i += sizeof(long)) 682 diffs |= *cp1++ ^ *cp2++; 683 684 return diffs == 0; 685 } 686 687 static bool flow_cmp_masked_key(const struct sw_flow *flow, 688 const struct sw_flow_key *key, 689 const struct sw_flow_key_range *range) 690 { 691 return cmp_key(&flow->key, key, range->start, range->end); 692 } 693 694 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, 695 const struct sw_flow_match *match) 696 { 697 struct sw_flow_key *key = match->key; 698 int key_start = flow_key_start(key); 699 int key_end = match->range.end; 700 701 BUG_ON(ovs_identifier_is_ufid(&flow->id)); 702 return cmp_key(flow->id.unmasked_key, key, key_start, key_end); 703 } 704 705 static struct sw_flow *masked_flow_lookup(struct table_instance *ti, 706 const struct sw_flow_key *unmasked, 707 const struct sw_flow_mask *mask, 708 u32 *n_mask_hit) 709 { 710 struct sw_flow *flow; 711 struct hlist_head *head; 712 u32 hash; 713 struct sw_flow_key masked_key; 714 715 ovs_flow_mask_key(&masked_key, unmasked, false, mask); 716 hash = flow_hash(&masked_key, &mask->range); 717 head = find_bucket(ti, hash); 718 (*n_mask_hit)++; 719 720 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver], 721 lockdep_ovsl_is_held()) { 722 if (flow->mask == mask && flow->flow_table.hash == hash && 723 flow_cmp_masked_key(flow, &masked_key, &mask->range)) 724 return flow; 725 } 726 return NULL; 727 } 728 729 /* Flow lookup does full lookup on flow table. It starts with 730 * mask from index passed in *index. 731 */ 732 static struct sw_flow *flow_lookup(struct flow_table *tbl, 733 struct table_instance *ti, 734 struct mask_array *ma, 735 const struct sw_flow_key *key, 736 u32 *n_mask_hit, 737 u32 *n_cache_hit, 738 u32 *index) 739 { 740 u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr); 741 struct sw_flow *flow; 742 struct sw_flow_mask *mask; 743 int i; 744 745 if (likely(*index < ma->max)) { 746 mask = rcu_dereference_ovsl(ma->masks[*index]); 747 if (mask) { 748 flow = masked_flow_lookup(ti, key, mask, n_mask_hit); 749 if (flow) { 750 u64_stats_update_begin(&ma->syncp); 751 usage_counters[*index]++; 752 u64_stats_update_end(&ma->syncp); 753 (*n_cache_hit)++; 754 return flow; 755 } 756 } 757 } 758 759 for (i = 0; i < ma->max; i++) { 760 761 if (i == *index) 762 continue; 763 764 mask = rcu_dereference_ovsl(ma->masks[i]); 765 if (unlikely(!mask)) 766 break; 767 768 flow = masked_flow_lookup(ti, key, mask, n_mask_hit); 769 if (flow) { /* Found */ 770 *index = i; 771 u64_stats_update_begin(&ma->syncp); 772 usage_counters[*index]++; 773 u64_stats_update_end(&ma->syncp); 774 return flow; 775 } 776 } 777 778 return NULL; 779 } 780 781 /* 782 * mask_cache maps flow to probable mask. This cache is not tightly 783 * coupled cache, It means updates to mask list can result in inconsistent 784 * cache entry in mask cache. 785 * This is per cpu cache and is divided in MC_HASH_SEGS segments. 786 * In case of a hash collision the entry is hashed in next segment. 787 * */ 788 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, 789 const struct sw_flow_key *key, 790 u32 skb_hash, 791 u32 *n_mask_hit, 792 u32 *n_cache_hit) 793 { 794 struct mask_cache *mc = rcu_dereference(tbl->mask_cache); 795 struct mask_array *ma = rcu_dereference(tbl->mask_array); 796 struct table_instance *ti = rcu_dereference(tbl->ti); 797 struct mask_cache_entry *entries, *ce; 798 struct sw_flow *flow; 799 u32 hash; 800 int seg; 801 802 *n_mask_hit = 0; 803 *n_cache_hit = 0; 804 if (unlikely(!skb_hash || mc->cache_size == 0)) { 805 u32 mask_index = 0; 806 u32 cache = 0; 807 808 return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache, 809 &mask_index); 810 } 811 812 /* Pre and post recirulation flows usually have the same skb_hash 813 * value. To avoid hash collisions, rehash the 'skb_hash' with 814 * 'recirc_id'. */ 815 if (key->recirc_id) 816 skb_hash = jhash_1word(skb_hash, key->recirc_id); 817 818 ce = NULL; 819 hash = skb_hash; 820 entries = this_cpu_ptr(mc->mask_cache); 821 822 /* Find the cache entry 'ce' to operate on. */ 823 for (seg = 0; seg < MC_HASH_SEGS; seg++) { 824 int index = hash & (mc->cache_size - 1); 825 struct mask_cache_entry *e; 826 827 e = &entries[index]; 828 if (e->skb_hash == skb_hash) { 829 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, 830 n_cache_hit, &e->mask_index); 831 if (!flow) 832 e->skb_hash = 0; 833 return flow; 834 } 835 836 if (!ce || e->skb_hash < ce->skb_hash) 837 ce = e; /* A better replacement cache candidate. */ 838 839 hash >>= MC_HASH_SHIFT; 840 } 841 842 /* Cache miss, do full lookup. */ 843 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit, 844 &ce->mask_index); 845 if (flow) 846 ce->skb_hash = skb_hash; 847 848 *n_cache_hit = 0; 849 return flow; 850 } 851 852 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, 853 const struct sw_flow_key *key) 854 { 855 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); 856 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); 857 u32 __always_unused n_mask_hit; 858 u32 __always_unused n_cache_hit; 859 u32 index = 0; 860 861 return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index); 862 } 863 864 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, 865 const struct sw_flow_match *match) 866 { 867 struct mask_array *ma = ovsl_dereference(tbl->mask_array); 868 int i; 869 870 /* Always called under ovs-mutex. */ 871 for (i = 0; i < ma->max; i++) { 872 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); 873 u32 __always_unused n_mask_hit; 874 struct sw_flow_mask *mask; 875 struct sw_flow *flow; 876 877 mask = ovsl_dereference(ma->masks[i]); 878 if (!mask) 879 continue; 880 881 flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit); 882 if (flow && ovs_identifier_is_key(&flow->id) && 883 ovs_flow_cmp_unmasked_key(flow, match)) { 884 return flow; 885 } 886 } 887 888 return NULL; 889 } 890 891 static u32 ufid_hash(const struct sw_flow_id *sfid) 892 { 893 return jhash(sfid->ufid, sfid->ufid_len, 0); 894 } 895 896 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow, 897 const struct sw_flow_id *sfid) 898 { 899 if (flow->id.ufid_len != sfid->ufid_len) 900 return false; 901 902 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len); 903 } 904 905 bool ovs_flow_cmp(const struct sw_flow *flow, 906 const struct sw_flow_match *match) 907 { 908 if (ovs_identifier_is_ufid(&flow->id)) 909 return flow_cmp_masked_key(flow, match->key, &match->range); 910 911 return ovs_flow_cmp_unmasked_key(flow, match); 912 } 913 914 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl, 915 const struct sw_flow_id *ufid) 916 { 917 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti); 918 struct sw_flow *flow; 919 struct hlist_head *head; 920 u32 hash; 921 922 hash = ufid_hash(ufid); 923 head = find_bucket(ti, hash); 924 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver], 925 lockdep_ovsl_is_held()) { 926 if (flow->ufid_table.hash == hash && 927 ovs_flow_cmp_ufid(flow, ufid)) 928 return flow; 929 } 930 return NULL; 931 } 932 933 int ovs_flow_tbl_num_masks(const struct flow_table *table) 934 { 935 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); 936 return READ_ONCE(ma->count); 937 } 938 939 u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table) 940 { 941 struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache); 942 943 return READ_ONCE(mc->cache_size); 944 } 945 946 static struct table_instance *table_instance_expand(struct table_instance *ti, 947 bool ufid) 948 { 949 return table_instance_rehash(ti, ti->n_buckets * 2, ufid); 950 } 951 952 /* Must be called with OVS mutex held. */ 953 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) 954 { 955 struct table_instance *ti = ovsl_dereference(table->ti); 956 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti); 957 958 BUG_ON(table->count == 0); 959 table_instance_flow_free(table, ti, ufid_ti, flow); 960 } 961 962 static struct sw_flow_mask *mask_alloc(void) 963 { 964 struct sw_flow_mask *mask; 965 966 mask = kmalloc(sizeof(*mask), GFP_KERNEL); 967 if (mask) 968 mask->ref_count = 1; 969 970 return mask; 971 } 972 973 static bool mask_equal(const struct sw_flow_mask *a, 974 const struct sw_flow_mask *b) 975 { 976 const u8 *a_ = (const u8 *)&a->key + a->range.start; 977 const u8 *b_ = (const u8 *)&b->key + b->range.start; 978 979 return (a->range.end == b->range.end) 980 && (a->range.start == b->range.start) 981 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); 982 } 983 984 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, 985 const struct sw_flow_mask *mask) 986 { 987 struct mask_array *ma; 988 int i; 989 990 ma = ovsl_dereference(tbl->mask_array); 991 for (i = 0; i < ma->max; i++) { 992 struct sw_flow_mask *t; 993 t = ovsl_dereference(ma->masks[i]); 994 995 if (t && mask_equal(mask, t)) 996 return t; 997 } 998 999 return NULL; 1000 } 1001 1002 /* Add 'mask' into the mask list, if it is not already there. */ 1003 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, 1004 const struct sw_flow_mask *new) 1005 { 1006 struct sw_flow_mask *mask; 1007 1008 mask = flow_mask_find(tbl, new); 1009 if (!mask) { 1010 /* Allocate a new mask if none exsits. */ 1011 mask = mask_alloc(); 1012 if (!mask) 1013 return -ENOMEM; 1014 mask->key = new->key; 1015 mask->range = new->range; 1016 1017 /* Add mask to mask-list. */ 1018 if (tbl_mask_array_add_mask(tbl, mask)) { 1019 kfree(mask); 1020 return -ENOMEM; 1021 } 1022 } else { 1023 BUG_ON(!mask->ref_count); 1024 mask->ref_count++; 1025 } 1026 1027 flow->mask = mask; 1028 return 0; 1029 } 1030 1031 /* Must be called with OVS mutex held. */ 1032 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow) 1033 { 1034 struct table_instance *new_ti = NULL; 1035 struct table_instance *ti; 1036 1037 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range); 1038 ti = ovsl_dereference(table->ti); 1039 table_instance_insert(ti, flow); 1040 table->count++; 1041 1042 /* Expand table, if necessary, to make room. */ 1043 if (table->count > ti->n_buckets) 1044 new_ti = table_instance_expand(ti, false); 1045 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) 1046 new_ti = table_instance_rehash(ti, ti->n_buckets, false); 1047 1048 if (new_ti) { 1049 rcu_assign_pointer(table->ti, new_ti); 1050 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); 1051 table->last_rehash = jiffies; 1052 } 1053 } 1054 1055 /* Must be called with OVS mutex held. */ 1056 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow) 1057 { 1058 struct table_instance *ti; 1059 1060 flow->ufid_table.hash = ufid_hash(&flow->id); 1061 ti = ovsl_dereference(table->ufid_ti); 1062 ufid_table_instance_insert(ti, flow); 1063 table->ufid_count++; 1064 1065 /* Expand table, if necessary, to make room. */ 1066 if (table->ufid_count > ti->n_buckets) { 1067 struct table_instance *new_ti; 1068 1069 new_ti = table_instance_expand(ti, true); 1070 if (new_ti) { 1071 rcu_assign_pointer(table->ufid_ti, new_ti); 1072 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); 1073 } 1074 } 1075 } 1076 1077 /* Must be called with OVS mutex held. */ 1078 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, 1079 const struct sw_flow_mask *mask) 1080 { 1081 int err; 1082 1083 err = flow_mask_insert(table, flow, mask); 1084 if (err) 1085 return err; 1086 flow_key_insert(table, flow); 1087 if (ovs_identifier_is_ufid(&flow->id)) 1088 flow_ufid_insert(table, flow); 1089 1090 return 0; 1091 } 1092 1093 static int compare_mask_and_count(const void *a, const void *b) 1094 { 1095 const struct mask_count *mc_a = a; 1096 const struct mask_count *mc_b = b; 1097 1098 return (s64)mc_b->counter - (s64)mc_a->counter; 1099 } 1100 1101 /* Must be called with OVS mutex held. */ 1102 void ovs_flow_masks_rebalance(struct flow_table *table) 1103 { 1104 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); 1105 struct mask_count *masks_and_count; 1106 struct mask_array *new; 1107 int masks_entries = 0; 1108 int i; 1109 1110 /* Build array of all current entries with use counters. */ 1111 masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count), 1112 GFP_KERNEL); 1113 if (!masks_and_count) 1114 return; 1115 1116 for (i = 0; i < ma->max; i++) { 1117 struct sw_flow_mask *mask; 1118 unsigned int start; 1119 int cpu; 1120 1121 mask = rcu_dereference_ovsl(ma->masks[i]); 1122 if (unlikely(!mask)) 1123 break; 1124 1125 masks_and_count[i].index = i; 1126 masks_and_count[i].counter = 0; 1127 1128 for_each_possible_cpu(cpu) { 1129 u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr, 1130 cpu); 1131 u64 counter; 1132 1133 do { 1134 start = u64_stats_fetch_begin_irq(&ma->syncp); 1135 counter = usage_counters[i]; 1136 } while (u64_stats_fetch_retry_irq(&ma->syncp, start)); 1137 1138 masks_and_count[i].counter += counter; 1139 } 1140 1141 /* Subtract the zero count value. */ 1142 masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i]; 1143 1144 /* Rather than calling tbl_mask_array_reset_counters() 1145 * below when no change is needed, do it inline here. 1146 */ 1147 ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter; 1148 } 1149 1150 if (i == 0) 1151 goto free_mask_entries; 1152 1153 /* Sort the entries */ 1154 masks_entries = i; 1155 sort(masks_and_count, masks_entries, sizeof(*masks_and_count), 1156 compare_mask_and_count, NULL); 1157 1158 /* If the order is the same, nothing to do... */ 1159 for (i = 0; i < masks_entries; i++) { 1160 if (i != masks_and_count[i].index) 1161 break; 1162 } 1163 if (i == masks_entries) 1164 goto free_mask_entries; 1165 1166 /* Rebuilt the new list in order of usage. */ 1167 new = tbl_mask_array_alloc(ma->max); 1168 if (!new) 1169 goto free_mask_entries; 1170 1171 for (i = 0; i < masks_entries; i++) { 1172 int index = masks_and_count[i].index; 1173 1174 if (ovsl_dereference(ma->masks[index])) 1175 new->masks[new->count++] = ma->masks[index]; 1176 } 1177 1178 rcu_assign_pointer(table->mask_array, new); 1179 call_rcu(&ma->rcu, mask_array_rcu_cb); 1180 1181 free_mask_entries: 1182 kfree(masks_and_count); 1183 } 1184 1185 /* Initializes the flow module. 1186 * Returns zero if successful or a negative error code. */ 1187 int ovs_flow_init(void) 1188 { 1189 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); 1190 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); 1191 1192 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) 1193 + (nr_cpu_ids 1194 * sizeof(struct sw_flow_stats *)), 1195 0, 0, NULL); 1196 if (flow_cache == NULL) 1197 return -ENOMEM; 1198 1199 flow_stats_cache 1200 = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats), 1201 0, SLAB_HWCACHE_ALIGN, NULL); 1202 if (flow_stats_cache == NULL) { 1203 kmem_cache_destroy(flow_cache); 1204 flow_cache = NULL; 1205 return -ENOMEM; 1206 } 1207 1208 return 0; 1209 } 1210 1211 /* Uninitializes the flow module. */ 1212 void ovs_flow_exit(void) 1213 { 1214 kmem_cache_destroy(flow_stats_cache); 1215 kmem_cache_destroy(flow_cache); 1216 } 1217