1 /* 2 * Copyright (C) 2001 Momchil Velikov 3 * Portions Copyright (C) 2001 Christoph Hellwig 4 * Copyright (C) 2006 Nick Piggin 5 * Copyright (C) 2012 Konstantin Khlebnikov 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License as 9 * published by the Free Software Foundation; either version 2, or (at 10 * your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 */ 21 #ifndef _LINUX_RADIX_TREE_H 22 #define _LINUX_RADIX_TREE_H 23 24 #include <linux/bitops.h> 25 #include <linux/kernel.h> 26 #include <linux/list.h> 27 #include <linux/preempt.h> 28 #include <linux/rcupdate.h> 29 #include <linux/spinlock.h> 30 #include <linux/types.h> 31 32 /* 33 * The bottom two bits of the slot determine how the remaining bits in the 34 * slot are interpreted: 35 * 36 * 00 - data pointer 37 * 01 - internal entry 38 * 10 - exceptional entry 39 * 11 - this bit combination is currently unused/reserved 40 * 41 * The internal entry may be a pointer to the next level in the tree, a 42 * sibling entry, or an indicator that the entry in this slot has been moved 43 * to another location in the tree and the lookup should be restarted. While 44 * NULL fits the 'data pointer' pattern, it means that there is no entry in 45 * the tree for this index (no matter what level of the tree it is found at). 46 * This means that you cannot store NULL in the tree as a value for the index. 47 */ 48 #define RADIX_TREE_ENTRY_MASK 3UL 49 #define RADIX_TREE_INTERNAL_NODE 1UL 50 51 /* 52 * Most users of the radix tree store pointers but shmem/tmpfs stores swap 53 * entries in the same tree. They are marked as exceptional entries to 54 * distinguish them from pointers to struct page. 55 * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it. 56 */ 57 #define RADIX_TREE_EXCEPTIONAL_ENTRY 2 58 #define RADIX_TREE_EXCEPTIONAL_SHIFT 2 59 60 static inline bool radix_tree_is_internal_node(void *ptr) 61 { 62 return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) == 63 RADIX_TREE_INTERNAL_NODE; 64 } 65 66 /*** radix-tree API starts here ***/ 67 68 #define RADIX_TREE_MAX_TAGS 3 69 70 #ifndef RADIX_TREE_MAP_SHIFT 71 #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) 72 #endif 73 74 #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) 75 #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) 76 77 #define RADIX_TREE_TAG_LONGS \ 78 ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) 79 80 #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) 81 #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ 82 RADIX_TREE_MAP_SHIFT)) 83 84 /* 85 * @count is the count of every non-NULL element in the ->slots array 86 * whether that is an exceptional entry, a retry entry, a user pointer, 87 * a sibling entry or a pointer to the next level of the tree. 88 * @exceptional is the count of every element in ->slots which is 89 * either radix_tree_exceptional_entry() or is a sibling entry for an 90 * exceptional entry. 91 */ 92 struct radix_tree_node { 93 unsigned char shift; /* Bits remaining in each slot */ 94 unsigned char offset; /* Slot offset in parent */ 95 unsigned char count; /* Total entry count */ 96 unsigned char exceptional; /* Exceptional entry count */ 97 struct radix_tree_node *parent; /* Used when ascending tree */ 98 struct radix_tree_root *root; /* The tree we belong to */ 99 union { 100 struct list_head private_list; /* For tree user */ 101 struct rcu_head rcu_head; /* Used when freeing node */ 102 }; 103 void __rcu *slots[RADIX_TREE_MAP_SIZE]; 104 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; 105 }; 106 107 /* The top bits of gfp_mask are used to store the root tags and the IDR flag */ 108 #define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT)) 109 #define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1) 110 111 struct radix_tree_root { 112 gfp_t gfp_mask; 113 struct radix_tree_node __rcu *rnode; 114 }; 115 116 #define RADIX_TREE_INIT(mask) { \ 117 .gfp_mask = (mask), \ 118 .rnode = NULL, \ 119 } 120 121 #define RADIX_TREE(name, mask) \ 122 struct radix_tree_root name = RADIX_TREE_INIT(mask) 123 124 #define INIT_RADIX_TREE(root, mask) \ 125 do { \ 126 (root)->gfp_mask = (mask); \ 127 (root)->rnode = NULL; \ 128 } while (0) 129 130 static inline bool radix_tree_empty(const struct radix_tree_root *root) 131 { 132 return root->rnode == NULL; 133 } 134 135 /** 136 * struct radix_tree_iter - radix tree iterator state 137 * 138 * @index: index of current slot 139 * @next_index: one beyond the last index for this chunk 140 * @tags: bit-mask for tag-iterating 141 * @node: node that contains current slot 142 * @shift: shift for the node that holds our slots 143 * 144 * This radix tree iterator works in terms of "chunks" of slots. A chunk is a 145 * subinterval of slots contained within one radix tree leaf node. It is 146 * described by a pointer to its first slot and a struct radix_tree_iter 147 * which holds the chunk's position in the tree and its size. For tagged 148 * iteration radix_tree_iter also holds the slots' bit-mask for one chosen 149 * radix tree tag. 150 */ 151 struct radix_tree_iter { 152 unsigned long index; 153 unsigned long next_index; 154 unsigned long tags; 155 struct radix_tree_node *node; 156 #ifdef CONFIG_RADIX_TREE_MULTIORDER 157 unsigned int shift; 158 #endif 159 }; 160 161 static inline unsigned int iter_shift(const struct radix_tree_iter *iter) 162 { 163 #ifdef CONFIG_RADIX_TREE_MULTIORDER 164 return iter->shift; 165 #else 166 return 0; 167 #endif 168 } 169 170 /** 171 * Radix-tree synchronization 172 * 173 * The radix-tree API requires that users provide all synchronisation (with 174 * specific exceptions, noted below). 175 * 176 * Synchronization of access to the data items being stored in the tree, and 177 * management of their lifetimes must be completely managed by API users. 178 * 179 * For API usage, in general, 180 * - any function _modifying_ the tree or tags (inserting or deleting 181 * items, setting or clearing tags) must exclude other modifications, and 182 * exclude any functions reading the tree. 183 * - any function _reading_ the tree or tags (looking up items or tags, 184 * gang lookups) must exclude modifications to the tree, but may occur 185 * concurrently with other readers. 186 * 187 * The notable exceptions to this rule are the following functions: 188 * __radix_tree_lookup 189 * radix_tree_lookup 190 * radix_tree_lookup_slot 191 * radix_tree_tag_get 192 * radix_tree_gang_lookup 193 * radix_tree_gang_lookup_slot 194 * radix_tree_gang_lookup_tag 195 * radix_tree_gang_lookup_tag_slot 196 * radix_tree_tagged 197 * 198 * The first 8 functions are able to be called locklessly, using RCU. The 199 * caller must ensure calls to these functions are made within rcu_read_lock() 200 * regions. Other readers (lock-free or otherwise) and modifications may be 201 * running concurrently. 202 * 203 * It is still required that the caller manage the synchronization and lifetimes 204 * of the items. So if RCU lock-free lookups are used, typically this would mean 205 * that the items have their own locks, or are amenable to lock-free access; and 206 * that the items are freed by RCU (or only freed after having been deleted from 207 * the radix tree *and* a synchronize_rcu() grace period). 208 * 209 * (Note, rcu_assign_pointer and rcu_dereference are not needed to control 210 * access to data items when inserting into or looking up from the radix tree) 211 * 212 * Note that the value returned by radix_tree_tag_get() may not be relied upon 213 * if only the RCU read lock is held. Functions to set/clear tags and to 214 * delete nodes running concurrently with it may affect its result such that 215 * two consecutive reads in the same locked section may return different 216 * values. If reliability is required, modification functions must also be 217 * excluded from concurrency. 218 * 219 * radix_tree_tagged is able to be called without locking or RCU. 220 */ 221 222 /** 223 * radix_tree_deref_slot - dereference a slot 224 * @slot: slot pointer, returned by radix_tree_lookup_slot 225 * 226 * For use with radix_tree_lookup_slot(). Caller must hold tree at least read 227 * locked across slot lookup and dereference. Not required if write lock is 228 * held (ie. items cannot be concurrently inserted). 229 * 230 * radix_tree_deref_retry must be used to confirm validity of the pointer if 231 * only the read lock is held. 232 * 233 * Return: entry stored in that slot. 234 */ 235 static inline void *radix_tree_deref_slot(void __rcu **slot) 236 { 237 return rcu_dereference(*slot); 238 } 239 240 /** 241 * radix_tree_deref_slot_protected - dereference a slot with tree lock held 242 * @slot: slot pointer, returned by radix_tree_lookup_slot 243 * 244 * Similar to radix_tree_deref_slot. The caller does not hold the RCU read 245 * lock but it must hold the tree lock to prevent parallel updates. 246 * 247 * Return: entry stored in that slot. 248 */ 249 static inline void *radix_tree_deref_slot_protected(void __rcu **slot, 250 spinlock_t *treelock) 251 { 252 return rcu_dereference_protected(*slot, lockdep_is_held(treelock)); 253 } 254 255 /** 256 * radix_tree_deref_retry - check radix_tree_deref_slot 257 * @arg: pointer returned by radix_tree_deref_slot 258 * Returns: 0 if retry is not required, otherwise retry is required 259 * 260 * radix_tree_deref_retry must be used with radix_tree_deref_slot. 261 */ 262 static inline int radix_tree_deref_retry(void *arg) 263 { 264 return unlikely(radix_tree_is_internal_node(arg)); 265 } 266 267 /** 268 * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry? 269 * @arg: value returned by radix_tree_deref_slot 270 * Returns: 0 if well-aligned pointer, non-0 if exceptional entry. 271 */ 272 static inline int radix_tree_exceptional_entry(void *arg) 273 { 274 /* Not unlikely because radix_tree_exception often tested first */ 275 return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY; 276 } 277 278 /** 279 * radix_tree_exception - radix_tree_deref_slot returned either exception? 280 * @arg: value returned by radix_tree_deref_slot 281 * Returns: 0 if well-aligned pointer, non-0 if either kind of exception. 282 */ 283 static inline int radix_tree_exception(void *arg) 284 { 285 return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); 286 } 287 288 int __radix_tree_create(struct radix_tree_root *, unsigned long index, 289 unsigned order, struct radix_tree_node **nodep, 290 void __rcu ***slotp); 291 int __radix_tree_insert(struct radix_tree_root *, unsigned long index, 292 unsigned order, void *); 293 static inline int radix_tree_insert(struct radix_tree_root *root, 294 unsigned long index, void *entry) 295 { 296 return __radix_tree_insert(root, index, 0, entry); 297 } 298 void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, 299 struct radix_tree_node **nodep, void __rcu ***slotp); 300 void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); 301 void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *, 302 unsigned long index); 303 typedef void (*radix_tree_update_node_t)(struct radix_tree_node *); 304 void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *, 305 void __rcu **slot, void *entry, 306 radix_tree_update_node_t update_node); 307 void radix_tree_iter_replace(struct radix_tree_root *, 308 const struct radix_tree_iter *, void __rcu **slot, void *entry); 309 void radix_tree_replace_slot(struct radix_tree_root *, 310 void __rcu **slot, void *entry); 311 void __radix_tree_delete_node(struct radix_tree_root *, 312 struct radix_tree_node *, 313 radix_tree_update_node_t update_node); 314 void radix_tree_iter_delete(struct radix_tree_root *, 315 struct radix_tree_iter *iter, void __rcu **slot); 316 void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); 317 void *radix_tree_delete(struct radix_tree_root *, unsigned long); 318 void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *, 319 void __rcu **slot); 320 unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, 321 void **results, unsigned long first_index, 322 unsigned int max_items); 323 unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *, 324 void __rcu ***results, unsigned long *indices, 325 unsigned long first_index, unsigned int max_items); 326 int radix_tree_preload(gfp_t gfp_mask); 327 int radix_tree_maybe_preload(gfp_t gfp_mask); 328 int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); 329 void radix_tree_init(void); 330 void *radix_tree_tag_set(struct radix_tree_root *, 331 unsigned long index, unsigned int tag); 332 void *radix_tree_tag_clear(struct radix_tree_root *, 333 unsigned long index, unsigned int tag); 334 int radix_tree_tag_get(const struct radix_tree_root *, 335 unsigned long index, unsigned int tag); 336 void radix_tree_iter_tag_set(struct radix_tree_root *, 337 const struct radix_tree_iter *iter, unsigned int tag); 338 void radix_tree_iter_tag_clear(struct radix_tree_root *, 339 const struct radix_tree_iter *iter, unsigned int tag); 340 unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *, 341 void **results, unsigned long first_index, 342 unsigned int max_items, unsigned int tag); 343 unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, 344 void __rcu ***results, unsigned long first_index, 345 unsigned int max_items, unsigned int tag); 346 int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); 347 348 static inline void radix_tree_preload_end(void) 349 { 350 preempt_enable(); 351 } 352 353 int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t); 354 int radix_tree_split(struct radix_tree_root *, unsigned long index, 355 unsigned new_order); 356 int radix_tree_join(struct radix_tree_root *, unsigned long index, 357 unsigned new_order, void *); 358 359 void __rcu **idr_get_free_cmn(struct radix_tree_root *root, 360 struct radix_tree_iter *iter, gfp_t gfp, 361 unsigned long max); 362 static inline void __rcu **idr_get_free(struct radix_tree_root *root, 363 struct radix_tree_iter *iter, 364 gfp_t gfp, 365 int end) 366 { 367 return idr_get_free_cmn(root, iter, gfp, end > 0 ? end - 1 : INT_MAX); 368 } 369 370 static inline void __rcu **idr_get_free_ext(struct radix_tree_root *root, 371 struct radix_tree_iter *iter, 372 gfp_t gfp, 373 unsigned long end) 374 { 375 return idr_get_free_cmn(root, iter, gfp, end - 1); 376 } 377 378 enum { 379 RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ 380 RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */ 381 RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */ 382 }; 383 384 /** 385 * radix_tree_iter_init - initialize radix tree iterator 386 * 387 * @iter: pointer to iterator state 388 * @start: iteration starting index 389 * Returns: NULL 390 */ 391 static __always_inline void __rcu ** 392 radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) 393 { 394 /* 395 * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it 396 * in the case of a successful tagged chunk lookup. If the lookup was 397 * unsuccessful or non-tagged then nobody cares about ->tags. 398 * 399 * Set index to zero to bypass next_index overflow protection. 400 * See the comment in radix_tree_next_chunk() for details. 401 */ 402 iter->index = 0; 403 iter->next_index = start; 404 return NULL; 405 } 406 407 /** 408 * radix_tree_next_chunk - find next chunk of slots for iteration 409 * 410 * @root: radix tree root 411 * @iter: iterator state 412 * @flags: RADIX_TREE_ITER_* flags and tag index 413 * Returns: pointer to chunk first slot, or NULL if there no more left 414 * 415 * This function looks up the next chunk in the radix tree starting from 416 * @iter->next_index. It returns a pointer to the chunk's first slot. 417 * Also it fills @iter with data about chunk: position in the tree (index), 418 * its end (next_index), and constructs a bit mask for tagged iterating (tags). 419 */ 420 void __rcu **radix_tree_next_chunk(const struct radix_tree_root *, 421 struct radix_tree_iter *iter, unsigned flags); 422 423 /** 424 * radix_tree_iter_lookup - look up an index in the radix tree 425 * @root: radix tree root 426 * @iter: iterator state 427 * @index: key to look up 428 * 429 * If @index is present in the radix tree, this function returns the slot 430 * containing it and updates @iter to describe the entry. If @index is not 431 * present, it returns NULL. 432 */ 433 static inline void __rcu ** 434 radix_tree_iter_lookup(const struct radix_tree_root *root, 435 struct radix_tree_iter *iter, unsigned long index) 436 { 437 radix_tree_iter_init(iter, index); 438 return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG); 439 } 440 441 /** 442 * radix_tree_iter_find - find a present entry 443 * @root: radix tree root 444 * @iter: iterator state 445 * @index: start location 446 * 447 * This function returns the slot containing the entry with the lowest index 448 * which is at least @index. If @index is larger than any present entry, this 449 * function returns NULL. The @iter is updated to describe the entry found. 450 */ 451 static inline void __rcu ** 452 radix_tree_iter_find(const struct radix_tree_root *root, 453 struct radix_tree_iter *iter, unsigned long index) 454 { 455 radix_tree_iter_init(iter, index); 456 return radix_tree_next_chunk(root, iter, 0); 457 } 458 459 /** 460 * radix_tree_iter_retry - retry this chunk of the iteration 461 * @iter: iterator state 462 * 463 * If we iterate over a tree protected only by the RCU lock, a race 464 * against deletion or creation may result in seeing a slot for which 465 * radix_tree_deref_retry() returns true. If so, call this function 466 * and continue the iteration. 467 */ 468 static inline __must_check 469 void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) 470 { 471 iter->next_index = iter->index; 472 iter->tags = 0; 473 return NULL; 474 } 475 476 static inline unsigned long 477 __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) 478 { 479 return iter->index + (slots << iter_shift(iter)); 480 } 481 482 /** 483 * radix_tree_iter_resume - resume iterating when the chunk may be invalid 484 * @slot: pointer to current slot 485 * @iter: iterator state 486 * Returns: New slot pointer 487 * 488 * If the iterator needs to release then reacquire a lock, the chunk may 489 * have been invalidated by an insertion or deletion. Call this function 490 * before releasing the lock to continue the iteration from the next index. 491 */ 492 void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, 493 struct radix_tree_iter *iter); 494 495 /** 496 * radix_tree_chunk_size - get current chunk size 497 * 498 * @iter: pointer to radix tree iterator 499 * Returns: current chunk size 500 */ 501 static __always_inline long 502 radix_tree_chunk_size(struct radix_tree_iter *iter) 503 { 504 return (iter->next_index - iter->index) >> iter_shift(iter); 505 } 506 507 #ifdef CONFIG_RADIX_TREE_MULTIORDER 508 void __rcu **__radix_tree_next_slot(void __rcu **slot, 509 struct radix_tree_iter *iter, unsigned flags); 510 #else 511 /* Can't happen without sibling entries, but the compiler can't tell that */ 512 static inline void __rcu **__radix_tree_next_slot(void __rcu **slot, 513 struct radix_tree_iter *iter, unsigned flags) 514 { 515 return slot; 516 } 517 #endif 518 519 /** 520 * radix_tree_next_slot - find next slot in chunk 521 * 522 * @slot: pointer to current slot 523 * @iter: pointer to interator state 524 * @flags: RADIX_TREE_ITER_*, should be constant 525 * Returns: pointer to next slot, or NULL if there no more left 526 * 527 * This function updates @iter->index in the case of a successful lookup. 528 * For tagged lookup it also eats @iter->tags. 529 * 530 * There are several cases where 'slot' can be passed in as NULL to this 531 * function. These cases result from the use of radix_tree_iter_resume() or 532 * radix_tree_iter_retry(). In these cases we don't end up dereferencing 533 * 'slot' because either: 534 * a) we are doing tagged iteration and iter->tags has been set to 0, or 535 * b) we are doing non-tagged iteration, and iter->index and iter->next_index 536 * have been set up so that radix_tree_chunk_size() returns 1 or 0. 537 */ 538 static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, 539 struct radix_tree_iter *iter, unsigned flags) 540 { 541 if (flags & RADIX_TREE_ITER_TAGGED) { 542 iter->tags >>= 1; 543 if (unlikely(!iter->tags)) 544 return NULL; 545 if (likely(iter->tags & 1ul)) { 546 iter->index = __radix_tree_iter_add(iter, 1); 547 slot++; 548 goto found; 549 } 550 if (!(flags & RADIX_TREE_ITER_CONTIG)) { 551 unsigned offset = __ffs(iter->tags); 552 553 iter->tags >>= offset++; 554 iter->index = __radix_tree_iter_add(iter, offset); 555 slot += offset; 556 goto found; 557 } 558 } else { 559 long count = radix_tree_chunk_size(iter); 560 561 while (--count > 0) { 562 slot++; 563 iter->index = __radix_tree_iter_add(iter, 1); 564 565 if (likely(*slot)) 566 goto found; 567 if (flags & RADIX_TREE_ITER_CONTIG) { 568 /* forbid switching to the next chunk */ 569 iter->next_index = 0; 570 break; 571 } 572 } 573 } 574 return NULL; 575 576 found: 577 if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot)))) 578 return __radix_tree_next_slot(slot, iter, flags); 579 return slot; 580 } 581 582 /** 583 * radix_tree_for_each_slot - iterate over non-empty slots 584 * 585 * @slot: the void** variable for pointer to slot 586 * @root: the struct radix_tree_root pointer 587 * @iter: the struct radix_tree_iter pointer 588 * @start: iteration starting index 589 * 590 * @slot points to radix tree slot, @iter->index contains its index. 591 */ 592 #define radix_tree_for_each_slot(slot, root, iter, start) \ 593 for (slot = radix_tree_iter_init(iter, start) ; \ 594 slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \ 595 slot = radix_tree_next_slot(slot, iter, 0)) 596 597 /** 598 * radix_tree_for_each_contig - iterate over contiguous slots 599 * 600 * @slot: the void** variable for pointer to slot 601 * @root: the struct radix_tree_root pointer 602 * @iter: the struct radix_tree_iter pointer 603 * @start: iteration starting index 604 * 605 * @slot points to radix tree slot, @iter->index contains its index. 606 */ 607 #define radix_tree_for_each_contig(slot, root, iter, start) \ 608 for (slot = radix_tree_iter_init(iter, start) ; \ 609 slot || (slot = radix_tree_next_chunk(root, iter, \ 610 RADIX_TREE_ITER_CONTIG)) ; \ 611 slot = radix_tree_next_slot(slot, iter, \ 612 RADIX_TREE_ITER_CONTIG)) 613 614 /** 615 * radix_tree_for_each_tagged - iterate over tagged slots 616 * 617 * @slot: the void** variable for pointer to slot 618 * @root: the struct radix_tree_root pointer 619 * @iter: the struct radix_tree_iter pointer 620 * @start: iteration starting index 621 * @tag: tag index 622 * 623 * @slot points to radix tree slot, @iter->index contains its index. 624 */ 625 #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \ 626 for (slot = radix_tree_iter_init(iter, start) ; \ 627 slot || (slot = radix_tree_next_chunk(root, iter, \ 628 RADIX_TREE_ITER_TAGGED | tag)) ; \ 629 slot = radix_tree_next_slot(slot, iter, \ 630 RADIX_TREE_ITER_TAGGED | tag)) 631 632 #endif /* _LINUX_RADIX_TREE_H */ 633