1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_NODEMASK_H 3 #define __LINUX_NODEMASK_H 4 5 /* 6 * Nodemasks provide a bitmap suitable for representing the 7 * set of Node's in a system, one bit position per Node number. 8 * 9 * See detailed comments in the file linux/bitmap.h describing the 10 * data type on which these nodemasks are based. 11 * 12 * For details of nodemask_parse_user(), see bitmap_parse_user() in 13 * lib/bitmap.c. For details of nodelist_parse(), see bitmap_parselist(), 14 * also in bitmap.c. For details of node_remap(), see bitmap_bitremap in 15 * lib/bitmap.c. For details of nodes_remap(), see bitmap_remap in 16 * lib/bitmap.c. For details of nodes_onto(), see bitmap_onto in 17 * lib/bitmap.c. For details of nodes_fold(), see bitmap_fold in 18 * lib/bitmap.c. 19 * 20 * The available nodemask operations are: 21 * 22 * void node_set(node, mask) turn on bit 'node' in mask 23 * void node_clear(node, mask) turn off bit 'node' in mask 24 * void nodes_setall(mask) set all bits 25 * void nodes_clear(mask) clear all bits 26 * int node_isset(node, mask) true iff bit 'node' set in mask 27 * int node_test_and_set(node, mask) test and set bit 'node' in mask 28 * 29 * void nodes_and(dst, src1, src2) dst = src1 & src2 [intersection] 30 * void nodes_or(dst, src1, src2) dst = src1 | src2 [union] 31 * void nodes_xor(dst, src1, src2) dst = src1 ^ src2 32 * void nodes_andnot(dst, src1, src2) dst = src1 & ~src2 33 * void nodes_complement(dst, src) dst = ~src 34 * 35 * int nodes_equal(mask1, mask2) Does mask1 == mask2? 36 * int nodes_intersects(mask1, mask2) Do mask1 and mask2 intersect? 37 * int nodes_subset(mask1, mask2) Is mask1 a subset of mask2? 38 * int nodes_empty(mask) Is mask empty (no bits sets)? 39 * int nodes_full(mask) Is mask full (all bits sets)? 40 * int nodes_weight(mask) Hamming weight - number of set bits 41 * 42 * void nodes_shift_right(dst, src, n) Shift right 43 * void nodes_shift_left(dst, src, n) Shift left 44 * 45 * unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES 46 * unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES 47 * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first, 48 * or MAX_NUMNODES 49 * unsigned int first_unset_node(mask) First node not set in mask, or 50 * MAX_NUMNODES 51 * 52 * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set 53 * NODE_MASK_ALL Initializer - all bits set 54 * NODE_MASK_NONE Initializer - no bits set 55 * unsigned long *nodes_addr(mask) Array of unsigned long's in mask 56 * 57 * int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask 58 * int nodelist_parse(buf, map) Parse ascii string as nodelist 59 * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit) 60 * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src) 61 * void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap 62 * void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz 63 * 64 * for_each_node_mask(node, mask) for-loop node over mask 65 * 66 * int num_online_nodes() Number of online Nodes 67 * int num_possible_nodes() Number of all possible Nodes 68 * 69 * int node_random(mask) Random node with set bit in mask 70 * 71 * int node_online(node) Is some node online? 72 * int node_possible(node) Is some node possible? 73 * 74 * node_set_online(node) set bit 'node' in node_online_map 75 * node_set_offline(node) clear bit 'node' in node_online_map 76 * 77 * for_each_node(node) for-loop node over node_possible_map 78 * for_each_online_node(node) for-loop node over node_online_map 79 * 80 * Subtlety: 81 * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway) 82 * to generate slightly worse code. So use a simple one-line #define 83 * for node_isset(), instead of wrapping an inline inside a macro, the 84 * way we do the other calls. 85 * 86 * NODEMASK_SCRATCH 87 * When doing above logical AND, OR, XOR, Remap operations the callers tend to 88 * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large, 89 * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper 90 * for such situations. See below and CPUMASK_ALLOC also. 91 */ 92 93 #include <linux/threads.h> 94 #include <linux/bitmap.h> 95 #include <linux/minmax.h> 96 #include <linux/nodemask_types.h> 97 #include <linux/random.h> 98 99 extern nodemask_t _unused_nodemask_arg_; 100 101 /** 102 * nodemask_pr_args - printf args to output a nodemask 103 * @maskp: nodemask to be printed 104 * 105 * Can be used to provide arguments for '%*pb[l]' when printing a nodemask. 106 */ 107 #define nodemask_pr_args(maskp) __nodemask_pr_numnodes(maskp), \ 108 __nodemask_pr_bits(maskp) 109 static __always_inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m) 110 { 111 return m ? MAX_NUMNODES : 0; 112 } 113 static __always_inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m) 114 { 115 return m ? m->bits : NULL; 116 } 117 118 /* 119 * The inline keyword gives the compiler room to decide to inline, or 120 * not inline a function as it sees best. However, as these functions 121 * are called in both __init and non-__init functions, if they are not 122 * inlined we will end up with a section mismatch error (of the type of 123 * freeable items not being freed). So we must use __always_inline here 124 * to fix the problem. If other functions in the future also end up in 125 * this situation they will also need to be annotated as __always_inline 126 */ 127 #define node_set(node, dst) __node_set((node), &(dst)) 128 static __always_inline void __node_set(int node, volatile nodemask_t *dstp) 129 { 130 set_bit(node, dstp->bits); 131 } 132 133 #define node_clear(node, dst) __node_clear((node), &(dst)) 134 static __always_inline void __node_clear(int node, volatile nodemask_t *dstp) 135 { 136 clear_bit(node, dstp->bits); 137 } 138 139 #define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES) 140 static __always_inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits) 141 { 142 bitmap_fill(dstp->bits, nbits); 143 } 144 145 #define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES) 146 static __always_inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits) 147 { 148 bitmap_zero(dstp->bits, nbits); 149 } 150 151 /* No static inline type checking - see Subtlety (1) above. */ 152 #define node_isset(node, nodemask) test_bit((node), (nodemask).bits) 153 154 #define node_test_and_set(node, nodemask) \ 155 __node_test_and_set((node), &(nodemask)) 156 static __always_inline bool __node_test_and_set(int node, nodemask_t *addr) 157 { 158 return test_and_set_bit(node, addr->bits); 159 } 160 161 #define nodes_and(dst, src1, src2) \ 162 __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES) 163 static __always_inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p, 164 const nodemask_t *src2p, unsigned int nbits) 165 { 166 bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); 167 } 168 169 #define nodes_or(dst, src1, src2) \ 170 __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES) 171 static __always_inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p, 172 const nodemask_t *src2p, unsigned int nbits) 173 { 174 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); 175 } 176 177 #define nodes_xor(dst, src1, src2) \ 178 __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES) 179 static __always_inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p, 180 const nodemask_t *src2p, unsigned int nbits) 181 { 182 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); 183 } 184 185 #define nodes_andnot(dst, src1, src2) \ 186 __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES) 187 static __always_inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p, 188 const nodemask_t *src2p, unsigned int nbits) 189 { 190 bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); 191 } 192 193 #define nodes_copy(dst, src) __nodes_copy(&(dst), &(src), MAX_NUMNODES) 194 static __always_inline void __nodes_copy(nodemask_t *dstp, 195 const nodemask_t *srcp, unsigned int nbits) 196 { 197 bitmap_copy(dstp->bits, srcp->bits, nbits); 198 } 199 200 #define nodes_complement(dst, src) \ 201 __nodes_complement(&(dst), &(src), MAX_NUMNODES) 202 static __always_inline void __nodes_complement(nodemask_t *dstp, 203 const nodemask_t *srcp, unsigned int nbits) 204 { 205 bitmap_complement(dstp->bits, srcp->bits, nbits); 206 } 207 208 #define nodes_equal(src1, src2) \ 209 __nodes_equal(&(src1), &(src2), MAX_NUMNODES) 210 static __always_inline bool __nodes_equal(const nodemask_t *src1p, 211 const nodemask_t *src2p, unsigned int nbits) 212 { 213 return bitmap_equal(src1p->bits, src2p->bits, nbits); 214 } 215 216 #define nodes_intersects(src1, src2) \ 217 __nodes_intersects(&(src1), &(src2), MAX_NUMNODES) 218 static __always_inline bool __nodes_intersects(const nodemask_t *src1p, 219 const nodemask_t *src2p, unsigned int nbits) 220 { 221 return bitmap_intersects(src1p->bits, src2p->bits, nbits); 222 } 223 224 #define nodes_subset(src1, src2) \ 225 __nodes_subset(&(src1), &(src2), MAX_NUMNODES) 226 static __always_inline bool __nodes_subset(const nodemask_t *src1p, 227 const nodemask_t *src2p, unsigned int nbits) 228 { 229 return bitmap_subset(src1p->bits, src2p->bits, nbits); 230 } 231 232 #define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES) 233 static __always_inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits) 234 { 235 return bitmap_empty(srcp->bits, nbits); 236 } 237 238 #define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES) 239 static __always_inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits) 240 { 241 return bitmap_full(srcp->bits, nbits); 242 } 243 244 #define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES) 245 static __always_inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits) 246 { 247 return bitmap_weight(srcp->bits, nbits); 248 } 249 250 #define nodes_shift_right(dst, src, n) \ 251 __nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES) 252 static __always_inline void __nodes_shift_right(nodemask_t *dstp, 253 const nodemask_t *srcp, int n, int nbits) 254 { 255 bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); 256 } 257 258 #define nodes_shift_left(dst, src, n) \ 259 __nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES) 260 static __always_inline void __nodes_shift_left(nodemask_t *dstp, 261 const nodemask_t *srcp, int n, int nbits) 262 { 263 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); 264 } 265 266 /* FIXME: better would be to fix all architectures to never return 267 > MAX_NUMNODES, then the silly min_ts could be dropped. */ 268 269 #define first_node(src) __first_node(&(src)) 270 static __always_inline unsigned int __first_node(const nodemask_t *srcp) 271 { 272 return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); 273 } 274 275 #define next_node(n, src) __next_node((n), &(src)) 276 static __always_inline unsigned int __next_node(int n, const nodemask_t *srcp) 277 { 278 return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); 279 } 280 281 /* 282 * Find the next present node in src, starting after node n, wrapping around to 283 * the first node in src if needed. Returns MAX_NUMNODES if src is empty. 284 */ 285 #define next_node_in(n, src) __next_node_in((n), &(src)) 286 static __always_inline unsigned int __next_node_in(int node, const nodemask_t *srcp) 287 { 288 unsigned int ret = __next_node(node, srcp); 289 290 if (ret == MAX_NUMNODES) 291 ret = __first_node(srcp); 292 return ret; 293 } 294 295 static __always_inline void init_nodemask_of_node(nodemask_t *mask, int node) 296 { 297 nodes_clear(*mask); 298 node_set(node, *mask); 299 } 300 301 #define nodemask_of_node(node) \ 302 ({ \ 303 typeof(_unused_nodemask_arg_) m; \ 304 if (sizeof(m) == sizeof(unsigned long)) { \ 305 m.bits[0] = 1UL << (node); \ 306 } else { \ 307 init_nodemask_of_node(&m, (node)); \ 308 } \ 309 m; \ 310 }) 311 312 #define first_unset_node(mask) __first_unset_node(&(mask)) 313 static __always_inline unsigned int __first_unset_node(const nodemask_t *maskp) 314 { 315 return min_t(unsigned int, MAX_NUMNODES, 316 find_first_zero_bit(maskp->bits, MAX_NUMNODES)); 317 } 318 319 #define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES) 320 321 #if MAX_NUMNODES <= BITS_PER_LONG 322 323 #define NODE_MASK_ALL \ 324 ((nodemask_t) { { \ 325 [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ 326 } }) 327 328 #else 329 330 #define NODE_MASK_ALL \ 331 ((nodemask_t) { { \ 332 [0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL, \ 333 [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ 334 } }) 335 336 #endif 337 338 #define NODE_MASK_NONE \ 339 ((nodemask_t) { { \ 340 [0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] = 0UL \ 341 } }) 342 343 #define nodes_addr(src) ((src).bits) 344 345 #define nodemask_parse_user(ubuf, ulen, dst) \ 346 __nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES) 347 static __always_inline int __nodemask_parse_user(const char __user *buf, int len, 348 nodemask_t *dstp, int nbits) 349 { 350 return bitmap_parse_user(buf, len, dstp->bits, nbits); 351 } 352 353 #define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES) 354 static __always_inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits) 355 { 356 return bitmap_parselist(buf, dstp->bits, nbits); 357 } 358 359 #define node_remap(oldbit, old, new) \ 360 __node_remap((oldbit), &(old), &(new), MAX_NUMNODES) 361 static __always_inline int __node_remap(int oldbit, 362 const nodemask_t *oldp, const nodemask_t *newp, int nbits) 363 { 364 return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); 365 } 366 367 #define nodes_remap(dst, src, old, new) \ 368 __nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES) 369 static __always_inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp, 370 const nodemask_t *oldp, const nodemask_t *newp, int nbits) 371 { 372 bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); 373 } 374 375 #define nodes_onto(dst, orig, relmap) \ 376 __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES) 377 static __always_inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp, 378 const nodemask_t *relmapp, int nbits) 379 { 380 bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); 381 } 382 383 #define nodes_fold(dst, orig, sz) \ 384 __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES) 385 static __always_inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, 386 int sz, int nbits) 387 { 388 bitmap_fold(dstp->bits, origp->bits, sz, nbits); 389 } 390 391 #if MAX_NUMNODES > 1 392 #define for_each_node_mask(node, mask) \ 393 for ((node) = first_node(mask); \ 394 (node) < MAX_NUMNODES; \ 395 (node) = next_node((node), (mask))) 396 #else /* MAX_NUMNODES == 1 */ 397 #define for_each_node_mask(node, mask) \ 398 for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++) 399 #endif /* MAX_NUMNODES */ 400 401 /* 402 * Bitmasks that are kept for all the nodes. 403 */ 404 enum node_states { 405 N_POSSIBLE, /* The node could become online at some point */ 406 N_ONLINE, /* The node is online */ 407 N_NORMAL_MEMORY, /* The node has regular memory */ 408 #ifdef CONFIG_HIGHMEM 409 N_HIGH_MEMORY, /* The node has regular or high memory */ 410 #else 411 N_HIGH_MEMORY = N_NORMAL_MEMORY, 412 #endif 413 N_MEMORY, /* The node has memory(regular, high, movable) */ 414 N_CPU, /* The node has one or more cpus */ 415 N_GENERIC_INITIATOR, /* The node has one or more Generic Initiators */ 416 NR_NODE_STATES 417 }; 418 419 /* 420 * The following particular system nodemasks and operations 421 * on them manage all possible and online nodes. 422 */ 423 424 extern nodemask_t node_states[NR_NODE_STATES]; 425 426 #if MAX_NUMNODES > 1 427 static __always_inline int node_state(int node, enum node_states state) 428 { 429 return node_isset(node, node_states[state]); 430 } 431 432 static __always_inline void node_set_state(int node, enum node_states state) 433 { 434 __node_set(node, &node_states[state]); 435 } 436 437 static __always_inline void node_clear_state(int node, enum node_states state) 438 { 439 __node_clear(node, &node_states[state]); 440 } 441 442 static __always_inline int num_node_state(enum node_states state) 443 { 444 return nodes_weight(node_states[state]); 445 } 446 447 #define for_each_node_state(__node, __state) \ 448 for_each_node_mask((__node), node_states[__state]) 449 450 #define first_online_node first_node(node_states[N_ONLINE]) 451 #define first_memory_node first_node(node_states[N_MEMORY]) 452 static __always_inline unsigned int next_online_node(int nid) 453 { 454 return next_node(nid, node_states[N_ONLINE]); 455 } 456 static __always_inline unsigned int next_memory_node(int nid) 457 { 458 return next_node(nid, node_states[N_MEMORY]); 459 } 460 461 extern unsigned int nr_node_ids; 462 extern unsigned int nr_online_nodes; 463 464 static __always_inline void node_set_online(int nid) 465 { 466 node_set_state(nid, N_ONLINE); 467 nr_online_nodes = num_node_state(N_ONLINE); 468 } 469 470 static __always_inline void node_set_offline(int nid) 471 { 472 node_clear_state(nid, N_ONLINE); 473 nr_online_nodes = num_node_state(N_ONLINE); 474 } 475 476 #else 477 478 static __always_inline int node_state(int node, enum node_states state) 479 { 480 return node == 0; 481 } 482 483 static __always_inline void node_set_state(int node, enum node_states state) 484 { 485 } 486 487 static __always_inline void node_clear_state(int node, enum node_states state) 488 { 489 } 490 491 static __always_inline int num_node_state(enum node_states state) 492 { 493 return 1; 494 } 495 496 #define for_each_node_state(node, __state) \ 497 for ( (node) = 0; (node) == 0; (node) = 1) 498 499 #define first_online_node 0 500 #define first_memory_node 0 501 #define next_online_node(nid) (MAX_NUMNODES) 502 #define next_memory_node(nid) (MAX_NUMNODES) 503 #define nr_node_ids 1U 504 #define nr_online_nodes 1U 505 506 #define node_set_online(node) node_set_state((node), N_ONLINE) 507 #define node_set_offline(node) node_clear_state((node), N_ONLINE) 508 509 #endif 510 511 static __always_inline int node_random(const nodemask_t *maskp) 512 { 513 #if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1) 514 int w, bit; 515 516 w = nodes_weight(*maskp); 517 switch (w) { 518 case 0: 519 bit = NUMA_NO_NODE; 520 break; 521 case 1: 522 bit = first_node(*maskp); 523 break; 524 default: 525 bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_u32_below(w)); 526 break; 527 } 528 return bit; 529 #else 530 return 0; 531 #endif 532 } 533 534 #define node_online_map node_states[N_ONLINE] 535 #define node_possible_map node_states[N_POSSIBLE] 536 537 #define num_online_nodes() num_node_state(N_ONLINE) 538 #define num_possible_nodes() num_node_state(N_POSSIBLE) 539 #define node_online(node) node_state((node), N_ONLINE) 540 #define node_possible(node) node_state((node), N_POSSIBLE) 541 542 #define for_each_node(node) for_each_node_state(node, N_POSSIBLE) 543 #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) 544 545 /* 546 * For nodemask scratch area. 547 * NODEMASK_ALLOC(type, name) allocates an object with a specified type and 548 * name. 549 */ 550 #if NODES_SHIFT > 8 /* nodemask_t > 32 bytes */ 551 #define NODEMASK_ALLOC(type, name, gfp_flags) \ 552 type *name = kmalloc(sizeof(*name), gfp_flags) 553 #define NODEMASK_FREE(m) kfree(m) 554 #else 555 #define NODEMASK_ALLOC(type, name, gfp_flags) type _##name, *name = &_##name 556 #define NODEMASK_FREE(m) do {} while (0) 557 #endif 558 559 /* Example structure for using NODEMASK_ALLOC, used in mempolicy. */ 560 struct nodemask_scratch { 561 nodemask_t mask1; 562 nodemask_t mask2; 563 }; 564 565 #define NODEMASK_SCRATCH(x) \ 566 NODEMASK_ALLOC(struct nodemask_scratch, x, \ 567 GFP_KERNEL | __GFP_NORETRY) 568 #define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) 569 570 571 #endif /* __LINUX_NODEMASK_H */ 572