1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_CPUMASK_H 3 #define __LINUX_CPUMASK_H 4 5 /* 6 * Cpumasks provide a bitmap suitable for representing the 7 * set of CPU's in a system, one bit position per CPU number. In general, 8 * only nr_cpu_ids (<= NR_CPUS) bits are valid. 9 */ 10 #include <linux/kernel.h> 11 #include <linux/threads.h> 12 #include <linux/bitmap.h> 13 #include <linux/atomic.h> 14 #include <linux/bug.h> 15 #include <linux/gfp_types.h> 16 #include <linux/numa.h> 17 18 /* Don't assign or return these: may not be this big! */ 19 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 20 21 /** 22 * cpumask_bits - get the bits in a cpumask 23 * @maskp: the struct cpumask * 24 * 25 * You should only assume nr_cpu_ids bits of this mask are valid. This is 26 * a macro so it's const-correct. 27 */ 28 #define cpumask_bits(maskp) ((maskp)->bits) 29 30 /** 31 * cpumask_pr_args - printf args to output a cpumask 32 * @maskp: cpumask to be printed 33 * 34 * Can be used to provide arguments for '%*pb[l]' when printing a cpumask. 35 */ 36 #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp) 37 38 #if NR_CPUS == 1 39 #define nr_cpu_ids 1U 40 #else 41 extern unsigned int nr_cpu_ids; 42 #endif 43 44 #ifdef CONFIG_CPUMASK_OFFSTACK 45 /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, 46 * not all bits may be allocated. */ 47 #define nr_cpumask_bits nr_cpu_ids 48 #else 49 #define nr_cpumask_bits ((unsigned int)NR_CPUS) 50 #endif 51 52 /* 53 * The following particular system cpumasks and operations manage 54 * possible, present, active and online cpus. 55 * 56 * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable 57 * cpu_present_mask - has bit 'cpu' set iff cpu is populated 58 * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler 59 * cpu_active_mask - has bit 'cpu' set iff cpu available to migration 60 * 61 * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. 62 * 63 * The cpu_possible_mask is fixed at boot time, as the set of CPU id's 64 * that it is possible might ever be plugged in at anytime during the 65 * life of that system boot. The cpu_present_mask is dynamic(*), 66 * representing which CPUs are currently plugged in. And 67 * cpu_online_mask is the dynamic subset of cpu_present_mask, 68 * indicating those CPUs available for scheduling. 69 * 70 * If HOTPLUG is enabled, then cpu_possible_mask is forced to have 71 * all NR_CPUS bits set, otherwise it is just the set of CPUs that 72 * ACPI reports present at boot. 73 * 74 * If HOTPLUG is enabled, then cpu_present_mask varies dynamically, 75 * depending on what ACPI reports as currently plugged in, otherwise 76 * cpu_present_mask is just a copy of cpu_possible_mask. 77 * 78 * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not 79 * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot. 80 * 81 * Subtleties: 82 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode 83 * assumption that their single CPU is online. The UP 84 * cpu_{online,possible,present}_masks are placebos. Changing them 85 * will have no useful affect on the following num_*_cpus() 86 * and cpu_*() macros in the UP case. This ugliness is a UP 87 * optimization - don't waste any instructions or memory references 88 * asking if you're online or how many CPUs there are if there is 89 * only one CPU. 90 */ 91 92 extern struct cpumask __cpu_possible_mask; 93 extern struct cpumask __cpu_online_mask; 94 extern struct cpumask __cpu_present_mask; 95 extern struct cpumask __cpu_active_mask; 96 extern struct cpumask __cpu_dying_mask; 97 #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask) 98 #define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask) 99 #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) 100 #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask) 101 #define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask) 102 103 extern atomic_t __num_online_cpus; 104 105 extern cpumask_t cpus_booted_once_mask; 106 107 static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) 108 { 109 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 110 WARN_ON_ONCE(cpu >= bits); 111 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ 112 } 113 114 /* verify cpu argument to cpumask_* operators */ 115 static __always_inline unsigned int cpumask_check(unsigned int cpu) 116 { 117 cpu_max_bits_warn(cpu, nr_cpumask_bits); 118 return cpu; 119 } 120 121 /** 122 * cpumask_first - get the first cpu in a cpumask 123 * @srcp: the cpumask pointer 124 * 125 * Returns >= nr_cpu_ids if no cpus set. 126 */ 127 static inline unsigned int cpumask_first(const struct cpumask *srcp) 128 { 129 return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits); 130 } 131 132 /** 133 * cpumask_first_zero - get the first unset cpu in a cpumask 134 * @srcp: the cpumask pointer 135 * 136 * Returns >= nr_cpu_ids if all cpus are set. 137 */ 138 static inline unsigned int cpumask_first_zero(const struct cpumask *srcp) 139 { 140 return find_first_zero_bit(cpumask_bits(srcp), nr_cpumask_bits); 141 } 142 143 /** 144 * cpumask_first_and - return the first cpu from *srcp1 & *srcp2 145 * @src1p: the first input 146 * @src2p: the second input 147 * 148 * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). 149 */ 150 static inline 151 unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2) 152 { 153 return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), nr_cpumask_bits); 154 } 155 156 /** 157 * cpumask_last - get the last CPU in a cpumask 158 * @srcp: - the cpumask pointer 159 * 160 * Returns >= nr_cpumask_bits if no CPUs set. 161 */ 162 static inline unsigned int cpumask_last(const struct cpumask *srcp) 163 { 164 return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits); 165 } 166 167 /** 168 * cpumask_next - get the next cpu in a cpumask 169 * @n: the cpu prior to the place to search (ie. return will be > @n) 170 * @srcp: the cpumask pointer 171 * 172 * Returns >= nr_cpu_ids if no further cpus set. 173 */ 174 static inline 175 unsigned int cpumask_next(int n, const struct cpumask *srcp) 176 { 177 /* -1 is a legal arg here. */ 178 if (n != -1) 179 cpumask_check(n); 180 return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1); 181 } 182 183 /** 184 * cpumask_next_zero - get the next unset cpu in a cpumask 185 * @n: the cpu prior to the place to search (ie. return will be > @n) 186 * @srcp: the cpumask pointer 187 * 188 * Returns >= nr_cpu_ids if no further cpus unset. 189 */ 190 static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) 191 { 192 /* -1 is a legal arg here. */ 193 if (n != -1) 194 cpumask_check(n); 195 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); 196 } 197 198 #if NR_CPUS == 1 199 /* Uniprocessor: there is only one valid CPU */ 200 static inline unsigned int cpumask_local_spread(unsigned int i, int node) 201 { 202 return 0; 203 } 204 205 static inline int cpumask_any_and_distribute(const struct cpumask *src1p, 206 const struct cpumask *src2p) { 207 return cpumask_first_and(src1p, src2p); 208 } 209 210 static inline int cpumask_any_distribute(const struct cpumask *srcp) 211 { 212 return cpumask_first(srcp); 213 } 214 #else 215 unsigned int cpumask_local_spread(unsigned int i, int node); 216 unsigned int cpumask_any_and_distribute(const struct cpumask *src1p, 217 const struct cpumask *src2p); 218 unsigned int cpumask_any_distribute(const struct cpumask *srcp); 219 #endif /* NR_CPUS */ 220 221 /** 222 * cpumask_next_and - get the next cpu in *src1p & *src2p 223 * @n: the cpu prior to the place to search (ie. return will be > @n) 224 * @src1p: the first cpumask pointer 225 * @src2p: the second cpumask pointer 226 * 227 * Returns >= nr_cpu_ids if no further cpus set in both. 228 */ 229 static inline 230 unsigned int cpumask_next_and(int n, const struct cpumask *src1p, 231 const struct cpumask *src2p) 232 { 233 /* -1 is a legal arg here. */ 234 if (n != -1) 235 cpumask_check(n); 236 return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p), 237 nr_cpumask_bits, n + 1); 238 } 239 240 /** 241 * for_each_cpu - iterate over every cpu in a mask 242 * @cpu: the (optionally unsigned) integer iterator 243 * @mask: the cpumask pointer 244 * 245 * After the loop, cpu is >= nr_cpu_ids. 246 */ 247 #define for_each_cpu(cpu, mask) \ 248 for ((cpu) = -1; \ 249 (cpu) = cpumask_next((cpu), (mask)), \ 250 (cpu) < nr_cpu_ids;) 251 252 /** 253 * for_each_cpu_not - iterate over every cpu in a complemented mask 254 * @cpu: the (optionally unsigned) integer iterator 255 * @mask: the cpumask pointer 256 * 257 * After the loop, cpu is >= nr_cpu_ids. 258 */ 259 #define for_each_cpu_not(cpu, mask) \ 260 for ((cpu) = -1; \ 261 (cpu) = cpumask_next_zero((cpu), (mask)), \ 262 (cpu) < nr_cpu_ids;) 263 264 unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); 265 266 /** 267 * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location 268 * @cpu: the (optionally unsigned) integer iterator 269 * @mask: the cpumask pointer 270 * @start: the start location 271 * 272 * The implementation does not assume any bit in @mask is set (including @start). 273 * 274 * After the loop, cpu is >= nr_cpu_ids. 275 */ 276 #define for_each_cpu_wrap(cpu, mask, start) \ 277 for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \ 278 (cpu) < nr_cpumask_bits; \ 279 (cpu) = cpumask_next_wrap((cpu), (mask), (start), true)) 280 281 /** 282 * for_each_cpu_and - iterate over every cpu in both masks 283 * @cpu: the (optionally unsigned) integer iterator 284 * @mask1: the first cpumask pointer 285 * @mask2: the second cpumask pointer 286 * 287 * This saves a temporary CPU mask in many places. It is equivalent to: 288 * struct cpumask tmp; 289 * cpumask_and(&tmp, &mask1, &mask2); 290 * for_each_cpu(cpu, &tmp) 291 * ... 292 * 293 * After the loop, cpu is >= nr_cpu_ids. 294 */ 295 #define for_each_cpu_and(cpu, mask1, mask2) \ 296 for ((cpu) = -1; \ 297 (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \ 298 (cpu) < nr_cpu_ids;) 299 300 /** 301 * cpumask_any_but - return a "random" in a cpumask, but not this one. 302 * @mask: the cpumask to search 303 * @cpu: the cpu to ignore. 304 * 305 * Often used to find any cpu but smp_processor_id() in a mask. 306 * Returns >= nr_cpu_ids if no cpus set. 307 */ 308 static inline 309 unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) 310 { 311 unsigned int i; 312 313 cpumask_check(cpu); 314 for_each_cpu(i, mask) 315 if (i != cpu) 316 break; 317 return i; 318 } 319 320 #define CPU_BITS_NONE \ 321 { \ 322 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ 323 } 324 325 #define CPU_BITS_CPU0 \ 326 { \ 327 [0] = 1UL \ 328 } 329 330 /** 331 * cpumask_set_cpu - set a cpu in a cpumask 332 * @cpu: cpu number (< nr_cpu_ids) 333 * @dstp: the cpumask pointer 334 */ 335 static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) 336 { 337 set_bit(cpumask_check(cpu), cpumask_bits(dstp)); 338 } 339 340 static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) 341 { 342 __set_bit(cpumask_check(cpu), cpumask_bits(dstp)); 343 } 344 345 346 /** 347 * cpumask_clear_cpu - clear a cpu in a cpumask 348 * @cpu: cpu number (< nr_cpu_ids) 349 * @dstp: the cpumask pointer 350 */ 351 static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) 352 { 353 clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); 354 } 355 356 static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) 357 { 358 __clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); 359 } 360 361 /** 362 * cpumask_test_cpu - test for a cpu in a cpumask 363 * @cpu: cpu number (< nr_cpu_ids) 364 * @cpumask: the cpumask pointer 365 * 366 * Returns true if @cpu is set in @cpumask, else returns false 367 */ 368 static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask) 369 { 370 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); 371 } 372 373 /** 374 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask 375 * @cpu: cpu number (< nr_cpu_ids) 376 * @cpumask: the cpumask pointer 377 * 378 * Returns true if @cpu is set in old bitmap of @cpumask, else returns false 379 * 380 * test_and_set_bit wrapper for cpumasks. 381 */ 382 static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) 383 { 384 return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); 385 } 386 387 /** 388 * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask 389 * @cpu: cpu number (< nr_cpu_ids) 390 * @cpumask: the cpumask pointer 391 * 392 * Returns true if @cpu is set in old bitmap of @cpumask, else returns false 393 * 394 * test_and_clear_bit wrapper for cpumasks. 395 */ 396 static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) 397 { 398 return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask)); 399 } 400 401 /** 402 * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask 403 * @dstp: the cpumask pointer 404 */ 405 static inline void cpumask_setall(struct cpumask *dstp) 406 { 407 bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits); 408 } 409 410 /** 411 * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask 412 * @dstp: the cpumask pointer 413 */ 414 static inline void cpumask_clear(struct cpumask *dstp) 415 { 416 bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits); 417 } 418 419 /** 420 * cpumask_and - *dstp = *src1p & *src2p 421 * @dstp: the cpumask result 422 * @src1p: the first input 423 * @src2p: the second input 424 * 425 * If *@dstp is empty, returns false, else returns true 426 */ 427 static inline bool cpumask_and(struct cpumask *dstp, 428 const struct cpumask *src1p, 429 const struct cpumask *src2p) 430 { 431 return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), 432 cpumask_bits(src2p), nr_cpumask_bits); 433 } 434 435 /** 436 * cpumask_or - *dstp = *src1p | *src2p 437 * @dstp: the cpumask result 438 * @src1p: the first input 439 * @src2p: the second input 440 */ 441 static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, 442 const struct cpumask *src2p) 443 { 444 bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p), 445 cpumask_bits(src2p), nr_cpumask_bits); 446 } 447 448 /** 449 * cpumask_xor - *dstp = *src1p ^ *src2p 450 * @dstp: the cpumask result 451 * @src1p: the first input 452 * @src2p: the second input 453 */ 454 static inline void cpumask_xor(struct cpumask *dstp, 455 const struct cpumask *src1p, 456 const struct cpumask *src2p) 457 { 458 bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p), 459 cpumask_bits(src2p), nr_cpumask_bits); 460 } 461 462 /** 463 * cpumask_andnot - *dstp = *src1p & ~*src2p 464 * @dstp: the cpumask result 465 * @src1p: the first input 466 * @src2p: the second input 467 * 468 * If *@dstp is empty, returns false, else returns true 469 */ 470 static inline bool cpumask_andnot(struct cpumask *dstp, 471 const struct cpumask *src1p, 472 const struct cpumask *src2p) 473 { 474 return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), 475 cpumask_bits(src2p), nr_cpumask_bits); 476 } 477 478 /** 479 * cpumask_complement - *dstp = ~*srcp 480 * @dstp: the cpumask result 481 * @srcp: the input to invert 482 */ 483 static inline void cpumask_complement(struct cpumask *dstp, 484 const struct cpumask *srcp) 485 { 486 bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp), 487 nr_cpumask_bits); 488 } 489 490 /** 491 * cpumask_equal - *src1p == *src2p 492 * @src1p: the first input 493 * @src2p: the second input 494 */ 495 static inline bool cpumask_equal(const struct cpumask *src1p, 496 const struct cpumask *src2p) 497 { 498 return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), 499 nr_cpumask_bits); 500 } 501 502 /** 503 * cpumask_or_equal - *src1p | *src2p == *src3p 504 * @src1p: the first input 505 * @src2p: the second input 506 * @src3p: the third input 507 */ 508 static inline bool cpumask_or_equal(const struct cpumask *src1p, 509 const struct cpumask *src2p, 510 const struct cpumask *src3p) 511 { 512 return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p), 513 cpumask_bits(src3p), nr_cpumask_bits); 514 } 515 516 /** 517 * cpumask_intersects - (*src1p & *src2p) != 0 518 * @src1p: the first input 519 * @src2p: the second input 520 */ 521 static inline bool cpumask_intersects(const struct cpumask *src1p, 522 const struct cpumask *src2p) 523 { 524 return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p), 525 nr_cpumask_bits); 526 } 527 528 /** 529 * cpumask_subset - (*src1p & ~*src2p) == 0 530 * @src1p: the first input 531 * @src2p: the second input 532 * 533 * Returns true if *@src1p is a subset of *@src2p, else returns false 534 */ 535 static inline bool cpumask_subset(const struct cpumask *src1p, 536 const struct cpumask *src2p) 537 { 538 return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), 539 nr_cpumask_bits); 540 } 541 542 /** 543 * cpumask_empty - *srcp == 0 544 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear. 545 */ 546 static inline bool cpumask_empty(const struct cpumask *srcp) 547 { 548 return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits); 549 } 550 551 /** 552 * cpumask_full - *srcp == 0xFFFFFFFF... 553 * @srcp: the cpumask to that all cpus < nr_cpu_ids are set. 554 */ 555 static inline bool cpumask_full(const struct cpumask *srcp) 556 { 557 return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits); 558 } 559 560 /** 561 * cpumask_weight - Count of bits in *srcp 562 * @srcp: the cpumask to count bits (< nr_cpu_ids) in. 563 */ 564 static inline unsigned int cpumask_weight(const struct cpumask *srcp) 565 { 566 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits); 567 } 568 569 /** 570 * cpumask_shift_right - *dstp = *srcp >> n 571 * @dstp: the cpumask result 572 * @srcp: the input to shift 573 * @n: the number of bits to shift by 574 */ 575 static inline void cpumask_shift_right(struct cpumask *dstp, 576 const struct cpumask *srcp, int n) 577 { 578 bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n, 579 nr_cpumask_bits); 580 } 581 582 /** 583 * cpumask_shift_left - *dstp = *srcp << n 584 * @dstp: the cpumask result 585 * @srcp: the input to shift 586 * @n: the number of bits to shift by 587 */ 588 static inline void cpumask_shift_left(struct cpumask *dstp, 589 const struct cpumask *srcp, int n) 590 { 591 bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n, 592 nr_cpumask_bits); 593 } 594 595 /** 596 * cpumask_copy - *dstp = *srcp 597 * @dstp: the result 598 * @srcp: the input cpumask 599 */ 600 static inline void cpumask_copy(struct cpumask *dstp, 601 const struct cpumask *srcp) 602 { 603 bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); 604 } 605 606 /** 607 * cpumask_any - pick a "random" cpu from *srcp 608 * @srcp: the input cpumask 609 * 610 * Returns >= nr_cpu_ids if no cpus set. 611 */ 612 #define cpumask_any(srcp) cpumask_first(srcp) 613 614 /** 615 * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2 616 * @mask1: the first input cpumask 617 * @mask2: the second input cpumask 618 * 619 * Returns >= nr_cpu_ids if no cpus set. 620 */ 621 #define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2)) 622 623 /** 624 * cpumask_of - the cpumask containing just a given cpu 625 * @cpu: the cpu (<= nr_cpu_ids) 626 */ 627 #define cpumask_of(cpu) (get_cpu_mask(cpu)) 628 629 /** 630 * cpumask_parse_user - extract a cpumask from a user string 631 * @buf: the buffer to extract from 632 * @len: the length of the buffer 633 * @dstp: the cpumask to set. 634 * 635 * Returns -errno, or 0 for success. 636 */ 637 static inline int cpumask_parse_user(const char __user *buf, int len, 638 struct cpumask *dstp) 639 { 640 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); 641 } 642 643 /** 644 * cpumask_parselist_user - extract a cpumask from a user string 645 * @buf: the buffer to extract from 646 * @len: the length of the buffer 647 * @dstp: the cpumask to set. 648 * 649 * Returns -errno, or 0 for success. 650 */ 651 static inline int cpumask_parselist_user(const char __user *buf, int len, 652 struct cpumask *dstp) 653 { 654 return bitmap_parselist_user(buf, len, cpumask_bits(dstp), 655 nr_cpumask_bits); 656 } 657 658 /** 659 * cpumask_parse - extract a cpumask from a string 660 * @buf: the buffer to extract from 661 * @dstp: the cpumask to set. 662 * 663 * Returns -errno, or 0 for success. 664 */ 665 static inline int cpumask_parse(const char *buf, struct cpumask *dstp) 666 { 667 return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits); 668 } 669 670 /** 671 * cpulist_parse - extract a cpumask from a user string of ranges 672 * @buf: the buffer to extract from 673 * @dstp: the cpumask to set. 674 * 675 * Returns -errno, or 0 for success. 676 */ 677 static inline int cpulist_parse(const char *buf, struct cpumask *dstp) 678 { 679 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); 680 } 681 682 /** 683 * cpumask_size - size to allocate for a 'struct cpumask' in bytes 684 */ 685 static inline unsigned int cpumask_size(void) 686 { 687 return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long); 688 } 689 690 /* 691 * cpumask_var_t: struct cpumask for stack usage. 692 * 693 * Oh, the wicked games we play! In order to make kernel coding a 694 * little more difficult, we typedef cpumask_var_t to an array or a 695 * pointer: doing &mask on an array is a noop, so it still works. 696 * 697 * ie. 698 * cpumask_var_t tmpmask; 699 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) 700 * return -ENOMEM; 701 * 702 * ... use 'tmpmask' like a normal struct cpumask * ... 703 * 704 * free_cpumask_var(tmpmask); 705 * 706 * 707 * However, one notable exception is there. alloc_cpumask_var() allocates 708 * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has 709 * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t. 710 * 711 * cpumask_var_t tmpmask; 712 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) 713 * return -ENOMEM; 714 * 715 * var = *tmpmask; 716 * 717 * This code makes NR_CPUS length memcopy and brings to a memory corruption. 718 * cpumask_copy() provide safe copy functionality. 719 * 720 * Note that there is another evil here: If you define a cpumask_var_t 721 * as a percpu variable then the way to obtain the address of the cpumask 722 * structure differently influences what this_cpu_* operation needs to be 723 * used. Please use this_cpu_cpumask_var_t in those cases. The direct use 724 * of this_cpu_ptr() or this_cpu_read() will lead to failures when the 725 * other type of cpumask_var_t implementation is configured. 726 * 727 * Please also note that __cpumask_var_read_mostly can be used to declare 728 * a cpumask_var_t variable itself (not its content) as read mostly. 729 */ 730 #ifdef CONFIG_CPUMASK_OFFSTACK 731 typedef struct cpumask *cpumask_var_t; 732 733 #define this_cpu_cpumask_var_ptr(x) this_cpu_read(x) 734 #define __cpumask_var_read_mostly __read_mostly 735 736 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); 737 738 static inline 739 bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) 740 { 741 return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node); 742 } 743 744 /** 745 * alloc_cpumask_var - allocate a struct cpumask 746 * @mask: pointer to cpumask_var_t where the cpumask is returned 747 * @flags: GFP_ flags 748 * 749 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is 750 * a nop returning a constant 1 (in <linux/cpumask.h>). 751 * 752 * See alloc_cpumask_var_node. 753 */ 754 static inline 755 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 756 { 757 return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE); 758 } 759 760 static inline 761 bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 762 { 763 return alloc_cpumask_var(mask, flags | __GFP_ZERO); 764 } 765 766 void alloc_bootmem_cpumask_var(cpumask_var_t *mask); 767 void free_cpumask_var(cpumask_var_t mask); 768 void free_bootmem_cpumask_var(cpumask_var_t mask); 769 770 static inline bool cpumask_available(cpumask_var_t mask) 771 { 772 return mask != NULL; 773 } 774 775 #else 776 typedef struct cpumask cpumask_var_t[1]; 777 778 #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x) 779 #define __cpumask_var_read_mostly 780 781 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 782 { 783 return true; 784 } 785 786 static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, 787 int node) 788 { 789 return true; 790 } 791 792 static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 793 { 794 cpumask_clear(*mask); 795 return true; 796 } 797 798 static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, 799 int node) 800 { 801 cpumask_clear(*mask); 802 return true; 803 } 804 805 static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) 806 { 807 } 808 809 static inline void free_cpumask_var(cpumask_var_t mask) 810 { 811 } 812 813 static inline void free_bootmem_cpumask_var(cpumask_var_t mask) 814 { 815 } 816 817 static inline bool cpumask_available(cpumask_var_t mask) 818 { 819 return true; 820 } 821 #endif /* CONFIG_CPUMASK_OFFSTACK */ 822 823 /* It's common to want to use cpu_all_mask in struct member initializers, 824 * so it has to refer to an address rather than a pointer. */ 825 extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); 826 #define cpu_all_mask to_cpumask(cpu_all_bits) 827 828 /* First bits of cpu_bit_bitmap are in fact unset. */ 829 #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) 830 831 #if NR_CPUS == 1 832 /* Uniprocessor: the possible/online/present masks are always "1" */ 833 #define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) 834 #define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) 835 #define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) 836 #else 837 #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) 838 #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) 839 #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) 840 #endif 841 842 /* Wrappers for arch boot code to manipulate normally-constant masks */ 843 void init_cpu_present(const struct cpumask *src); 844 void init_cpu_possible(const struct cpumask *src); 845 void init_cpu_online(const struct cpumask *src); 846 847 static inline void reset_cpu_possible_mask(void) 848 { 849 bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS); 850 } 851 852 static inline void 853 set_cpu_possible(unsigned int cpu, bool possible) 854 { 855 if (possible) 856 cpumask_set_cpu(cpu, &__cpu_possible_mask); 857 else 858 cpumask_clear_cpu(cpu, &__cpu_possible_mask); 859 } 860 861 static inline void 862 set_cpu_present(unsigned int cpu, bool present) 863 { 864 if (present) 865 cpumask_set_cpu(cpu, &__cpu_present_mask); 866 else 867 cpumask_clear_cpu(cpu, &__cpu_present_mask); 868 } 869 870 void set_cpu_online(unsigned int cpu, bool online); 871 872 static inline void 873 set_cpu_active(unsigned int cpu, bool active) 874 { 875 if (active) 876 cpumask_set_cpu(cpu, &__cpu_active_mask); 877 else 878 cpumask_clear_cpu(cpu, &__cpu_active_mask); 879 } 880 881 static inline void 882 set_cpu_dying(unsigned int cpu, bool dying) 883 { 884 if (dying) 885 cpumask_set_cpu(cpu, &__cpu_dying_mask); 886 else 887 cpumask_clear_cpu(cpu, &__cpu_dying_mask); 888 } 889 890 /** 891 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * 892 * @bitmap: the bitmap 893 * 894 * There are a few places where cpumask_var_t isn't appropriate and 895 * static cpumasks must be used (eg. very early boot), yet we don't 896 * expose the definition of 'struct cpumask'. 897 * 898 * This does the conversion, and can be used as a constant initializer. 899 */ 900 #define to_cpumask(bitmap) \ 901 ((struct cpumask *)(1 ? (bitmap) \ 902 : (void *)sizeof(__check_is_bitmap(bitmap)))) 903 904 static inline int __check_is_bitmap(const unsigned long *bitmap) 905 { 906 return 1; 907 } 908 909 /* 910 * Special-case data structure for "single bit set only" constant CPU masks. 911 * 912 * We pre-generate all the 64 (or 32) possible bit positions, with enough 913 * padding to the left and the right, and return the constant pointer 914 * appropriately offset. 915 */ 916 extern const unsigned long 917 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; 918 919 static inline const struct cpumask *get_cpu_mask(unsigned int cpu) 920 { 921 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; 922 p -= cpu / BITS_PER_LONG; 923 return to_cpumask(p); 924 } 925 926 #if NR_CPUS > 1 927 /** 928 * num_online_cpus() - Read the number of online CPUs 929 * 930 * Despite the fact that __num_online_cpus is of type atomic_t, this 931 * interface gives only a momentary snapshot and is not protected against 932 * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held 933 * region. 934 */ 935 static inline unsigned int num_online_cpus(void) 936 { 937 return atomic_read(&__num_online_cpus); 938 } 939 #define num_possible_cpus() cpumask_weight(cpu_possible_mask) 940 #define num_present_cpus() cpumask_weight(cpu_present_mask) 941 #define num_active_cpus() cpumask_weight(cpu_active_mask) 942 943 static inline bool cpu_online(unsigned int cpu) 944 { 945 return cpumask_test_cpu(cpu, cpu_online_mask); 946 } 947 948 static inline bool cpu_possible(unsigned int cpu) 949 { 950 return cpumask_test_cpu(cpu, cpu_possible_mask); 951 } 952 953 static inline bool cpu_present(unsigned int cpu) 954 { 955 return cpumask_test_cpu(cpu, cpu_present_mask); 956 } 957 958 static inline bool cpu_active(unsigned int cpu) 959 { 960 return cpumask_test_cpu(cpu, cpu_active_mask); 961 } 962 963 static inline bool cpu_dying(unsigned int cpu) 964 { 965 return cpumask_test_cpu(cpu, cpu_dying_mask); 966 } 967 968 #else 969 970 #define num_online_cpus() 1U 971 #define num_possible_cpus() 1U 972 #define num_present_cpus() 1U 973 #define num_active_cpus() 1U 974 975 static inline bool cpu_online(unsigned int cpu) 976 { 977 return cpu == 0; 978 } 979 980 static inline bool cpu_possible(unsigned int cpu) 981 { 982 return cpu == 0; 983 } 984 985 static inline bool cpu_present(unsigned int cpu) 986 { 987 return cpu == 0; 988 } 989 990 static inline bool cpu_active(unsigned int cpu) 991 { 992 return cpu == 0; 993 } 994 995 static inline bool cpu_dying(unsigned int cpu) 996 { 997 return false; 998 } 999 1000 #endif /* NR_CPUS > 1 */ 1001 1002 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) 1003 1004 #if NR_CPUS <= BITS_PER_LONG 1005 #define CPU_BITS_ALL \ 1006 { \ 1007 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ 1008 } 1009 1010 #else /* NR_CPUS > BITS_PER_LONG */ 1011 1012 #define CPU_BITS_ALL \ 1013 { \ 1014 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ 1015 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ 1016 } 1017 #endif /* NR_CPUS > BITS_PER_LONG */ 1018 1019 /** 1020 * cpumap_print_to_pagebuf - copies the cpumask into the buffer either 1021 * as comma-separated list of cpus or hex values of cpumask 1022 * @list: indicates whether the cpumap must be list 1023 * @mask: the cpumask to copy 1024 * @buf: the buffer to copy into 1025 * 1026 * Returns the length of the (null-terminated) @buf string, zero if 1027 * nothing is copied. 1028 */ 1029 static inline ssize_t 1030 cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) 1031 { 1032 return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask), 1033 nr_cpu_ids); 1034 } 1035 1036 /** 1037 * cpumap_print_bitmask_to_buf - copies the cpumask into the buffer as 1038 * hex values of cpumask 1039 * 1040 * @buf: the buffer to copy into 1041 * @mask: the cpumask to copy 1042 * @off: in the string from which we are copying, we copy to @buf 1043 * @count: the maximum number of bytes to print 1044 * 1045 * The function prints the cpumask into the buffer as hex values of 1046 * cpumask; Typically used by bin_attribute to export cpumask bitmask 1047 * ABI. 1048 * 1049 * Returns the length of how many bytes have been copied, excluding 1050 * terminating '\0'. 1051 */ 1052 static inline ssize_t 1053 cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask, 1054 loff_t off, size_t count) 1055 { 1056 return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask), 1057 nr_cpu_ids, off, count) - 1; 1058 } 1059 1060 /** 1061 * cpumap_print_list_to_buf - copies the cpumask into the buffer as 1062 * comma-separated list of cpus 1063 * 1064 * Everything is same with the above cpumap_print_bitmask_to_buf() 1065 * except the print format. 1066 */ 1067 static inline ssize_t 1068 cpumap_print_list_to_buf(char *buf, const struct cpumask *mask, 1069 loff_t off, size_t count) 1070 { 1071 return bitmap_print_list_to_buf(buf, cpumask_bits(mask), 1072 nr_cpu_ids, off, count) - 1; 1073 } 1074 1075 #if NR_CPUS <= BITS_PER_LONG 1076 #define CPU_MASK_ALL \ 1077 (cpumask_t) { { \ 1078 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ 1079 } } 1080 #else 1081 #define CPU_MASK_ALL \ 1082 (cpumask_t) { { \ 1083 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ 1084 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ 1085 } } 1086 #endif /* NR_CPUS > BITS_PER_LONG */ 1087 1088 #define CPU_MASK_NONE \ 1089 (cpumask_t) { { \ 1090 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ 1091 } } 1092 1093 #define CPU_MASK_CPU0 \ 1094 (cpumask_t) { { \ 1095 [0] = 1UL \ 1096 } } 1097 1098 /* 1099 * Provide a valid theoretical max size for cpumap and cpulist sysfs files 1100 * to avoid breaking userspace which may allocate a buffer based on the size 1101 * reported by e.g. fstat. 1102 * 1103 * for cpumap NR_CPUS * 9/32 - 1 should be an exact length. 1104 * 1105 * For cpulist 7 is (ceil(log10(NR_CPUS)) + 1) allowing for NR_CPUS to be up 1106 * to 2 orders of magnitude larger than 8192. And then we divide by 2 to 1107 * cover a worst-case of every other cpu being on one of two nodes for a 1108 * very large NR_CPUS. 1109 * 1110 * Use PAGE_SIZE as a minimum for smaller configurations. 1111 */ 1112 #define CPUMAP_FILE_MAX_BYTES ((((NR_CPUS * 9)/32 - 1) > PAGE_SIZE) \ 1113 ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE) 1114 #define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE) 1115 1116 #endif /* __LINUX_CPUMASK_H */ 1117