1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_CPUMASK_H 3 #define __LINUX_CPUMASK_H 4 5 /* 6 * Cpumasks provide a bitmap suitable for representing the 7 * set of CPUs in a system, one bit position per CPU number. In general, 8 * only nr_cpu_ids (<= NR_CPUS) bits are valid. 9 */ 10 #include <linux/cleanup.h> 11 #include <linux/kernel.h> 12 #include <linux/threads.h> 13 #include <linux/bitmap.h> 14 #include <linux/atomic.h> 15 #include <linux/bug.h> 16 #include <linux/gfp_types.h> 17 #include <linux/numa.h> 18 19 /* Don't assign or return these: may not be this big! */ 20 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 21 22 /** 23 * cpumask_bits - get the bits in a cpumask 24 * @maskp: the struct cpumask * 25 * 26 * You should only assume nr_cpu_ids bits of this mask are valid. This is 27 * a macro so it's const-correct. 28 */ 29 #define cpumask_bits(maskp) ((maskp)->bits) 30 31 /** 32 * cpumask_pr_args - printf args to output a cpumask 33 * @maskp: cpumask to be printed 34 * 35 * Can be used to provide arguments for '%*pb[l]' when printing a cpumask. 36 */ 37 #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp) 38 39 #if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS) 40 #define nr_cpu_ids ((unsigned int)NR_CPUS) 41 #else 42 extern unsigned int nr_cpu_ids; 43 #endif 44 45 static inline void set_nr_cpu_ids(unsigned int nr) 46 { 47 #if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS) 48 WARN_ON(nr != nr_cpu_ids); 49 #else 50 nr_cpu_ids = nr; 51 #endif 52 } 53 54 /* 55 * We have several different "preferred sizes" for the cpumask 56 * operations, depending on operation. 57 * 58 * For example, the bitmap scanning and operating operations have 59 * optimized routines that work for the single-word case, but only when 60 * the size is constant. So if NR_CPUS fits in one single word, we are 61 * better off using that small constant, in order to trigger the 62 * optimized bit finding. That is 'small_cpumask_size'. 63 * 64 * The clearing and copying operations will similarly perform better 65 * with a constant size, but we limit that size arbitrarily to four 66 * words. We call this 'large_cpumask_size'. 67 * 68 * Finally, some operations just want the exact limit, either because 69 * they set bits or just don't have any faster fixed-sized versions. We 70 * call this just 'nr_cpumask_bits'. 71 * 72 * Note that these optional constants are always guaranteed to be at 73 * least as big as 'nr_cpu_ids' itself is, and all our cpumask 74 * allocations are at least that size (see cpumask_size()). The 75 * optimization comes from being able to potentially use a compile-time 76 * constant instead of a run-time generated exact number of CPUs. 77 */ 78 #if NR_CPUS <= BITS_PER_LONG 79 #define small_cpumask_bits ((unsigned int)NR_CPUS) 80 #define large_cpumask_bits ((unsigned int)NR_CPUS) 81 #elif NR_CPUS <= 4*BITS_PER_LONG 82 #define small_cpumask_bits nr_cpu_ids 83 #define large_cpumask_bits ((unsigned int)NR_CPUS) 84 #else 85 #define small_cpumask_bits nr_cpu_ids 86 #define large_cpumask_bits nr_cpu_ids 87 #endif 88 #define nr_cpumask_bits nr_cpu_ids 89 90 /* 91 * The following particular system cpumasks and operations manage 92 * possible, present, active and online cpus. 93 * 94 * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable 95 * cpu_present_mask - has bit 'cpu' set iff cpu is populated 96 * cpu_enabled_mask - has bit 'cpu' set iff cpu can be brought online 97 * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler 98 * cpu_active_mask - has bit 'cpu' set iff cpu available to migration 99 * 100 * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. 101 * 102 * The cpu_possible_mask is fixed at boot time, as the set of CPU IDs 103 * that it is possible might ever be plugged in at anytime during the 104 * life of that system boot. The cpu_present_mask is dynamic(*), 105 * representing which CPUs are currently plugged in. And 106 * cpu_online_mask is the dynamic subset of cpu_present_mask, 107 * indicating those CPUs available for scheduling. 108 * 109 * If HOTPLUG is enabled, then cpu_present_mask varies dynamically, 110 * depending on what ACPI reports as currently plugged in, otherwise 111 * cpu_present_mask is just a copy of cpu_possible_mask. 112 * 113 * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not 114 * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot. 115 * 116 * Subtleties: 117 * 1) UP ARCHes (NR_CPUS == 1, CONFIG_SMP not defined) hardcode 118 * assumption that their single CPU is online. The UP 119 * cpu_{online,possible,present}_masks are placebos. Changing them 120 * will have no useful affect on the following num_*_cpus() 121 * and cpu_*() macros in the UP case. This ugliness is a UP 122 * optimization - don't waste any instructions or memory references 123 * asking if you're online or how many CPUs there are if there is 124 * only one CPU. 125 */ 126 127 extern struct cpumask __cpu_possible_mask; 128 extern struct cpumask __cpu_online_mask; 129 extern struct cpumask __cpu_enabled_mask; 130 extern struct cpumask __cpu_present_mask; 131 extern struct cpumask __cpu_active_mask; 132 extern struct cpumask __cpu_dying_mask; 133 #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask) 134 #define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask) 135 #define cpu_enabled_mask ((const struct cpumask *)&__cpu_enabled_mask) 136 #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) 137 #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask) 138 #define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask) 139 140 extern atomic_t __num_online_cpus; 141 142 extern cpumask_t cpus_booted_once_mask; 143 144 static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) 145 { 146 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 147 WARN_ON_ONCE(cpu >= bits); 148 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ 149 } 150 151 /* verify cpu argument to cpumask_* operators */ 152 static __always_inline unsigned int cpumask_check(unsigned int cpu) 153 { 154 cpu_max_bits_warn(cpu, small_cpumask_bits); 155 return cpu; 156 } 157 158 /** 159 * cpumask_first - get the first cpu in a cpumask 160 * @srcp: the cpumask pointer 161 * 162 * Return: >= nr_cpu_ids if no cpus set. 163 */ 164 static inline unsigned int cpumask_first(const struct cpumask *srcp) 165 { 166 return find_first_bit(cpumask_bits(srcp), small_cpumask_bits); 167 } 168 169 /** 170 * cpumask_first_zero - get the first unset cpu in a cpumask 171 * @srcp: the cpumask pointer 172 * 173 * Return: >= nr_cpu_ids if all cpus are set. 174 */ 175 static inline unsigned int cpumask_first_zero(const struct cpumask *srcp) 176 { 177 return find_first_zero_bit(cpumask_bits(srcp), small_cpumask_bits); 178 } 179 180 /** 181 * cpumask_first_and - return the first cpu from *srcp1 & *srcp2 182 * @srcp1: the first input 183 * @srcp2: the second input 184 * 185 * Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). 186 */ 187 static inline 188 unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2) 189 { 190 return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits); 191 } 192 193 /** 194 * cpumask_first_and_and - return the first cpu from *srcp1 & *srcp2 & *srcp3 195 * @srcp1: the first input 196 * @srcp2: the second input 197 * @srcp3: the third input 198 * 199 * Return: >= nr_cpu_ids if no cpus set in all. 200 */ 201 static inline 202 unsigned int cpumask_first_and_and(const struct cpumask *srcp1, 203 const struct cpumask *srcp2, 204 const struct cpumask *srcp3) 205 { 206 return find_first_and_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), 207 cpumask_bits(srcp3), small_cpumask_bits); 208 } 209 210 /** 211 * cpumask_last - get the last CPU in a cpumask 212 * @srcp: - the cpumask pointer 213 * 214 * Return: >= nr_cpumask_bits if no CPUs set. 215 */ 216 static inline unsigned int cpumask_last(const struct cpumask *srcp) 217 { 218 return find_last_bit(cpumask_bits(srcp), small_cpumask_bits); 219 } 220 221 /** 222 * cpumask_next - get the next cpu in a cpumask 223 * @n: the cpu prior to the place to search (i.e. return will be > @n) 224 * @srcp: the cpumask pointer 225 * 226 * Return: >= nr_cpu_ids if no further cpus set. 227 */ 228 static inline 229 unsigned int cpumask_next(int n, const struct cpumask *srcp) 230 { 231 /* -1 is a legal arg here. */ 232 if (n != -1) 233 cpumask_check(n); 234 return find_next_bit(cpumask_bits(srcp), small_cpumask_bits, n + 1); 235 } 236 237 /** 238 * cpumask_next_zero - get the next unset cpu in a cpumask 239 * @n: the cpu prior to the place to search (i.e. return will be > @n) 240 * @srcp: the cpumask pointer 241 * 242 * Return: >= nr_cpu_ids if no further cpus unset. 243 */ 244 static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) 245 { 246 /* -1 is a legal arg here. */ 247 if (n != -1) 248 cpumask_check(n); 249 return find_next_zero_bit(cpumask_bits(srcp), small_cpumask_bits, n+1); 250 } 251 252 #if NR_CPUS == 1 253 /* Uniprocessor: there is only one valid CPU */ 254 static inline unsigned int cpumask_local_spread(unsigned int i, int node) 255 { 256 return 0; 257 } 258 259 static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p, 260 const struct cpumask *src2p) 261 { 262 return cpumask_first_and(src1p, src2p); 263 } 264 265 static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp) 266 { 267 return cpumask_first(srcp); 268 } 269 #else 270 unsigned int cpumask_local_spread(unsigned int i, int node); 271 unsigned int cpumask_any_and_distribute(const struct cpumask *src1p, 272 const struct cpumask *src2p); 273 unsigned int cpumask_any_distribute(const struct cpumask *srcp); 274 #endif /* NR_CPUS */ 275 276 /** 277 * cpumask_next_and - get the next cpu in *src1p & *src2p 278 * @n: the cpu prior to the place to search (i.e. return will be > @n) 279 * @src1p: the first cpumask pointer 280 * @src2p: the second cpumask pointer 281 * 282 * Return: >= nr_cpu_ids if no further cpus set in both. 283 */ 284 static inline 285 unsigned int cpumask_next_and(int n, const struct cpumask *src1p, 286 const struct cpumask *src2p) 287 { 288 /* -1 is a legal arg here. */ 289 if (n != -1) 290 cpumask_check(n); 291 return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p), 292 small_cpumask_bits, n + 1); 293 } 294 295 /** 296 * for_each_cpu - iterate over every cpu in a mask 297 * @cpu: the (optionally unsigned) integer iterator 298 * @mask: the cpumask pointer 299 * 300 * After the loop, cpu is >= nr_cpu_ids. 301 */ 302 #define for_each_cpu(cpu, mask) \ 303 for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits) 304 305 #if NR_CPUS == 1 306 static inline 307 unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) 308 { 309 cpumask_check(start); 310 if (n != -1) 311 cpumask_check(n); 312 313 /* 314 * Return the first available CPU when wrapping, or when starting before cpu0, 315 * since there is only one valid option. 316 */ 317 if (wrap && n >= 0) 318 return nr_cpumask_bits; 319 320 return cpumask_first(mask); 321 } 322 #else 323 unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); 324 #endif 325 326 /** 327 * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location 328 * @cpu: the (optionally unsigned) integer iterator 329 * @mask: the cpumask pointer 330 * @start: the start location 331 * 332 * The implementation does not assume any bit in @mask is set (including @start). 333 * 334 * After the loop, cpu is >= nr_cpu_ids. 335 */ 336 #define for_each_cpu_wrap(cpu, mask, start) \ 337 for_each_set_bit_wrap(cpu, cpumask_bits(mask), small_cpumask_bits, start) 338 339 /** 340 * for_each_cpu_and - iterate over every cpu in both masks 341 * @cpu: the (optionally unsigned) integer iterator 342 * @mask1: the first cpumask pointer 343 * @mask2: the second cpumask pointer 344 * 345 * This saves a temporary CPU mask in many places. It is equivalent to: 346 * struct cpumask tmp; 347 * cpumask_and(&tmp, &mask1, &mask2); 348 * for_each_cpu(cpu, &tmp) 349 * ... 350 * 351 * After the loop, cpu is >= nr_cpu_ids. 352 */ 353 #define for_each_cpu_and(cpu, mask1, mask2) \ 354 for_each_and_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits) 355 356 /** 357 * for_each_cpu_andnot - iterate over every cpu present in one mask, excluding 358 * those present in another. 359 * @cpu: the (optionally unsigned) integer iterator 360 * @mask1: the first cpumask pointer 361 * @mask2: the second cpumask pointer 362 * 363 * This saves a temporary CPU mask in many places. It is equivalent to: 364 * struct cpumask tmp; 365 * cpumask_andnot(&tmp, &mask1, &mask2); 366 * for_each_cpu(cpu, &tmp) 367 * ... 368 * 369 * After the loop, cpu is >= nr_cpu_ids. 370 */ 371 #define for_each_cpu_andnot(cpu, mask1, mask2) \ 372 for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits) 373 374 /** 375 * for_each_cpu_or - iterate over every cpu present in either mask 376 * @cpu: the (optionally unsigned) integer iterator 377 * @mask1: the first cpumask pointer 378 * @mask2: the second cpumask pointer 379 * 380 * This saves a temporary CPU mask in many places. It is equivalent to: 381 * struct cpumask tmp; 382 * cpumask_or(&tmp, &mask1, &mask2); 383 * for_each_cpu(cpu, &tmp) 384 * ... 385 * 386 * After the loop, cpu is >= nr_cpu_ids. 387 */ 388 #define for_each_cpu_or(cpu, mask1, mask2) \ 389 for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits) 390 391 /** 392 * for_each_cpu_from - iterate over CPUs present in @mask, from @cpu to the end of @mask. 393 * @cpu: the (optionally unsigned) integer iterator 394 * @mask: the cpumask pointer 395 * 396 * After the loop, cpu is >= nr_cpu_ids. 397 */ 398 #define for_each_cpu_from(cpu, mask) \ 399 for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits) 400 401 /** 402 * cpumask_any_but - return a "random" in a cpumask, but not this one. 403 * @mask: the cpumask to search 404 * @cpu: the cpu to ignore. 405 * 406 * Often used to find any cpu but smp_processor_id() in a mask. 407 * Return: >= nr_cpu_ids if no cpus set. 408 */ 409 static inline 410 unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) 411 { 412 unsigned int i; 413 414 cpumask_check(cpu); 415 for_each_cpu(i, mask) 416 if (i != cpu) 417 break; 418 return i; 419 } 420 421 /** 422 * cpumask_any_and_but - pick a "random" cpu from *mask1 & *mask2, but not this one. 423 * @mask1: the first input cpumask 424 * @mask2: the second input cpumask 425 * @cpu: the cpu to ignore 426 * 427 * Returns >= nr_cpu_ids if no cpus set. 428 */ 429 static inline 430 unsigned int cpumask_any_and_but(const struct cpumask *mask1, 431 const struct cpumask *mask2, 432 unsigned int cpu) 433 { 434 unsigned int i; 435 436 cpumask_check(cpu); 437 i = cpumask_first_and(mask1, mask2); 438 if (i != cpu) 439 return i; 440 441 return cpumask_next_and(cpu, mask1, mask2); 442 } 443 444 /** 445 * cpumask_nth - get the Nth cpu in a cpumask 446 * @srcp: the cpumask pointer 447 * @cpu: the Nth cpu to find, starting from 0 448 * 449 * Return: >= nr_cpu_ids if such cpu doesn't exist. 450 */ 451 static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp) 452 { 453 return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu)); 454 } 455 456 /** 457 * cpumask_nth_and - get the Nth cpu in 2 cpumasks 458 * @srcp1: the cpumask pointer 459 * @srcp2: the cpumask pointer 460 * @cpu: the Nth cpu to find, starting from 0 461 * 462 * Return: >= nr_cpu_ids if such cpu doesn't exist. 463 */ 464 static inline 465 unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1, 466 const struct cpumask *srcp2) 467 { 468 return find_nth_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), 469 small_cpumask_bits, cpumask_check(cpu)); 470 } 471 472 /** 473 * cpumask_nth_andnot - get the Nth cpu set in 1st cpumask, and clear in 2nd. 474 * @srcp1: the cpumask pointer 475 * @srcp2: the cpumask pointer 476 * @cpu: the Nth cpu to find, starting from 0 477 * 478 * Return: >= nr_cpu_ids if such cpu doesn't exist. 479 */ 480 static inline 481 unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1, 482 const struct cpumask *srcp2) 483 { 484 return find_nth_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), 485 small_cpumask_bits, cpumask_check(cpu)); 486 } 487 488 /** 489 * cpumask_nth_and_andnot - get the Nth cpu set in 1st and 2nd cpumask, and clear in 3rd. 490 * @srcp1: the cpumask pointer 491 * @srcp2: the cpumask pointer 492 * @srcp3: the cpumask pointer 493 * @cpu: the Nth cpu to find, starting from 0 494 * 495 * Return: >= nr_cpu_ids if such cpu doesn't exist. 496 */ 497 static __always_inline 498 unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1, 499 const struct cpumask *srcp2, 500 const struct cpumask *srcp3) 501 { 502 return find_nth_and_andnot_bit(cpumask_bits(srcp1), 503 cpumask_bits(srcp2), 504 cpumask_bits(srcp3), 505 small_cpumask_bits, cpumask_check(cpu)); 506 } 507 508 #define CPU_BITS_NONE \ 509 { \ 510 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ 511 } 512 513 #define CPU_BITS_CPU0 \ 514 { \ 515 [0] = 1UL \ 516 } 517 518 /** 519 * cpumask_set_cpu - set a cpu in a cpumask 520 * @cpu: cpu number (< nr_cpu_ids) 521 * @dstp: the cpumask pointer 522 */ 523 static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) 524 { 525 set_bit(cpumask_check(cpu), cpumask_bits(dstp)); 526 } 527 528 static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) 529 { 530 __set_bit(cpumask_check(cpu), cpumask_bits(dstp)); 531 } 532 533 534 /** 535 * cpumask_clear_cpu - clear a cpu in a cpumask 536 * @cpu: cpu number (< nr_cpu_ids) 537 * @dstp: the cpumask pointer 538 */ 539 static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) 540 { 541 clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); 542 } 543 544 static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) 545 { 546 __clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); 547 } 548 549 /** 550 * cpumask_assign_cpu - assign a cpu in a cpumask 551 * @cpu: cpu number (< nr_cpu_ids) 552 * @dstp: the cpumask pointer 553 * @bool: the value to assign 554 */ 555 static __always_inline void cpumask_assign_cpu(int cpu, struct cpumask *dstp, bool value) 556 { 557 assign_bit(cpumask_check(cpu), cpumask_bits(dstp), value); 558 } 559 560 static __always_inline void __cpumask_assign_cpu(int cpu, struct cpumask *dstp, bool value) 561 { 562 __assign_bit(cpumask_check(cpu), cpumask_bits(dstp), value); 563 } 564 565 /** 566 * cpumask_test_cpu - test for a cpu in a cpumask 567 * @cpu: cpu number (< nr_cpu_ids) 568 * @cpumask: the cpumask pointer 569 * 570 * Return: true if @cpu is set in @cpumask, else returns false 571 */ 572 static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask) 573 { 574 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); 575 } 576 577 /** 578 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask 579 * @cpu: cpu number (< nr_cpu_ids) 580 * @cpumask: the cpumask pointer 581 * 582 * test_and_set_bit wrapper for cpumasks. 583 * 584 * Return: true if @cpu is set in old bitmap of @cpumask, else returns false 585 */ 586 static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) 587 { 588 return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); 589 } 590 591 /** 592 * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask 593 * @cpu: cpu number (< nr_cpu_ids) 594 * @cpumask: the cpumask pointer 595 * 596 * test_and_clear_bit wrapper for cpumasks. 597 * 598 * Return: true if @cpu is set in old bitmap of @cpumask, else returns false 599 */ 600 static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) 601 { 602 return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask)); 603 } 604 605 /** 606 * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask 607 * @dstp: the cpumask pointer 608 */ 609 static inline void cpumask_setall(struct cpumask *dstp) 610 { 611 if (small_const_nbits(small_cpumask_bits)) { 612 cpumask_bits(dstp)[0] = BITMAP_LAST_WORD_MASK(nr_cpumask_bits); 613 return; 614 } 615 bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits); 616 } 617 618 /** 619 * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask 620 * @dstp: the cpumask pointer 621 */ 622 static inline void cpumask_clear(struct cpumask *dstp) 623 { 624 bitmap_zero(cpumask_bits(dstp), large_cpumask_bits); 625 } 626 627 /** 628 * cpumask_and - *dstp = *src1p & *src2p 629 * @dstp: the cpumask result 630 * @src1p: the first input 631 * @src2p: the second input 632 * 633 * Return: false if *@dstp is empty, else returns true 634 */ 635 static inline bool cpumask_and(struct cpumask *dstp, 636 const struct cpumask *src1p, 637 const struct cpumask *src2p) 638 { 639 return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), 640 cpumask_bits(src2p), small_cpumask_bits); 641 } 642 643 /** 644 * cpumask_or - *dstp = *src1p | *src2p 645 * @dstp: the cpumask result 646 * @src1p: the first input 647 * @src2p: the second input 648 */ 649 static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, 650 const struct cpumask *src2p) 651 { 652 bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p), 653 cpumask_bits(src2p), small_cpumask_bits); 654 } 655 656 /** 657 * cpumask_xor - *dstp = *src1p ^ *src2p 658 * @dstp: the cpumask result 659 * @src1p: the first input 660 * @src2p: the second input 661 */ 662 static inline void cpumask_xor(struct cpumask *dstp, 663 const struct cpumask *src1p, 664 const struct cpumask *src2p) 665 { 666 bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p), 667 cpumask_bits(src2p), small_cpumask_bits); 668 } 669 670 /** 671 * cpumask_andnot - *dstp = *src1p & ~*src2p 672 * @dstp: the cpumask result 673 * @src1p: the first input 674 * @src2p: the second input 675 * 676 * Return: false if *@dstp is empty, else returns true 677 */ 678 static inline bool cpumask_andnot(struct cpumask *dstp, 679 const struct cpumask *src1p, 680 const struct cpumask *src2p) 681 { 682 return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), 683 cpumask_bits(src2p), small_cpumask_bits); 684 } 685 686 /** 687 * cpumask_equal - *src1p == *src2p 688 * @src1p: the first input 689 * @src2p: the second input 690 * 691 * Return: true if the cpumasks are equal, false if not 692 */ 693 static inline bool cpumask_equal(const struct cpumask *src1p, 694 const struct cpumask *src2p) 695 { 696 return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), 697 small_cpumask_bits); 698 } 699 700 /** 701 * cpumask_or_equal - *src1p | *src2p == *src3p 702 * @src1p: the first input 703 * @src2p: the second input 704 * @src3p: the third input 705 * 706 * Return: true if first cpumask ORed with second cpumask == third cpumask, 707 * otherwise false 708 */ 709 static inline bool cpumask_or_equal(const struct cpumask *src1p, 710 const struct cpumask *src2p, 711 const struct cpumask *src3p) 712 { 713 return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p), 714 cpumask_bits(src3p), small_cpumask_bits); 715 } 716 717 /** 718 * cpumask_intersects - (*src1p & *src2p) != 0 719 * @src1p: the first input 720 * @src2p: the second input 721 * 722 * Return: true if first cpumask ANDed with second cpumask is non-empty, 723 * otherwise false 724 */ 725 static inline bool cpumask_intersects(const struct cpumask *src1p, 726 const struct cpumask *src2p) 727 { 728 return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p), 729 small_cpumask_bits); 730 } 731 732 /** 733 * cpumask_subset - (*src1p & ~*src2p) == 0 734 * @src1p: the first input 735 * @src2p: the second input 736 * 737 * Return: true if *@src1p is a subset of *@src2p, else returns false 738 */ 739 static inline bool cpumask_subset(const struct cpumask *src1p, 740 const struct cpumask *src2p) 741 { 742 return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), 743 small_cpumask_bits); 744 } 745 746 /** 747 * cpumask_empty - *srcp == 0 748 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear. 749 * 750 * Return: true if srcp is empty (has no bits set), else false 751 */ 752 static inline bool cpumask_empty(const struct cpumask *srcp) 753 { 754 return bitmap_empty(cpumask_bits(srcp), small_cpumask_bits); 755 } 756 757 /** 758 * cpumask_full - *srcp == 0xFFFFFFFF... 759 * @srcp: the cpumask to that all cpus < nr_cpu_ids are set. 760 * 761 * Return: true if srcp is full (has all bits set), else false 762 */ 763 static inline bool cpumask_full(const struct cpumask *srcp) 764 { 765 return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits); 766 } 767 768 /** 769 * cpumask_weight - Count of bits in *srcp 770 * @srcp: the cpumask to count bits (< nr_cpu_ids) in. 771 * 772 * Return: count of bits set in *srcp 773 */ 774 static inline unsigned int cpumask_weight(const struct cpumask *srcp) 775 { 776 return bitmap_weight(cpumask_bits(srcp), small_cpumask_bits); 777 } 778 779 /** 780 * cpumask_weight_and - Count of bits in (*srcp1 & *srcp2) 781 * @srcp1: the cpumask to count bits (< nr_cpu_ids) in. 782 * @srcp2: the cpumask to count bits (< nr_cpu_ids) in. 783 * 784 * Return: count of bits set in both *srcp1 and *srcp2 785 */ 786 static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1, 787 const struct cpumask *srcp2) 788 { 789 return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits); 790 } 791 792 /** 793 * cpumask_weight_andnot - Count of bits in (*srcp1 & ~*srcp2) 794 * @srcp1: the cpumask to count bits (< nr_cpu_ids) in. 795 * @srcp2: the cpumask to count bits (< nr_cpu_ids) in. 796 * 797 * Return: count of bits set in both *srcp1 and *srcp2 798 */ 799 static inline unsigned int cpumask_weight_andnot(const struct cpumask *srcp1, 800 const struct cpumask *srcp2) 801 { 802 return bitmap_weight_andnot(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits); 803 } 804 805 /** 806 * cpumask_shift_right - *dstp = *srcp >> n 807 * @dstp: the cpumask result 808 * @srcp: the input to shift 809 * @n: the number of bits to shift by 810 */ 811 static inline void cpumask_shift_right(struct cpumask *dstp, 812 const struct cpumask *srcp, int n) 813 { 814 bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n, 815 small_cpumask_bits); 816 } 817 818 /** 819 * cpumask_shift_left - *dstp = *srcp << n 820 * @dstp: the cpumask result 821 * @srcp: the input to shift 822 * @n: the number of bits to shift by 823 */ 824 static inline void cpumask_shift_left(struct cpumask *dstp, 825 const struct cpumask *srcp, int n) 826 { 827 bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n, 828 nr_cpumask_bits); 829 } 830 831 /** 832 * cpumask_copy - *dstp = *srcp 833 * @dstp: the result 834 * @srcp: the input cpumask 835 */ 836 static inline void cpumask_copy(struct cpumask *dstp, 837 const struct cpumask *srcp) 838 { 839 bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), large_cpumask_bits); 840 } 841 842 /** 843 * cpumask_any - pick a "random" cpu from *srcp 844 * @srcp: the input cpumask 845 * 846 * Return: >= nr_cpu_ids if no cpus set. 847 */ 848 #define cpumask_any(srcp) cpumask_first(srcp) 849 850 /** 851 * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2 852 * @mask1: the first input cpumask 853 * @mask2: the second input cpumask 854 * 855 * Return: >= nr_cpu_ids if no cpus set. 856 */ 857 #define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2)) 858 859 /** 860 * cpumask_of - the cpumask containing just a given cpu 861 * @cpu: the cpu (<= nr_cpu_ids) 862 */ 863 #define cpumask_of(cpu) (get_cpu_mask(cpu)) 864 865 /** 866 * cpumask_parse_user - extract a cpumask from a user string 867 * @buf: the buffer to extract from 868 * @len: the length of the buffer 869 * @dstp: the cpumask to set. 870 * 871 * Return: -errno, or 0 for success. 872 */ 873 static inline int cpumask_parse_user(const char __user *buf, int len, 874 struct cpumask *dstp) 875 { 876 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); 877 } 878 879 /** 880 * cpumask_parselist_user - extract a cpumask from a user string 881 * @buf: the buffer to extract from 882 * @len: the length of the buffer 883 * @dstp: the cpumask to set. 884 * 885 * Return: -errno, or 0 for success. 886 */ 887 static inline int cpumask_parselist_user(const char __user *buf, int len, 888 struct cpumask *dstp) 889 { 890 return bitmap_parselist_user(buf, len, cpumask_bits(dstp), 891 nr_cpumask_bits); 892 } 893 894 /** 895 * cpumask_parse - extract a cpumask from a string 896 * @buf: the buffer to extract from 897 * @dstp: the cpumask to set. 898 * 899 * Return: -errno, or 0 for success. 900 */ 901 static inline int cpumask_parse(const char *buf, struct cpumask *dstp) 902 { 903 return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits); 904 } 905 906 /** 907 * cpulist_parse - extract a cpumask from a user string of ranges 908 * @buf: the buffer to extract from 909 * @dstp: the cpumask to set. 910 * 911 * Return: -errno, or 0 for success. 912 */ 913 static inline int cpulist_parse(const char *buf, struct cpumask *dstp) 914 { 915 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); 916 } 917 918 /** 919 * cpumask_size - calculate size to allocate for a 'struct cpumask' in bytes 920 * 921 * Return: size to allocate for a &struct cpumask in bytes 922 */ 923 static inline unsigned int cpumask_size(void) 924 { 925 return bitmap_size(large_cpumask_bits); 926 } 927 928 /* 929 * cpumask_var_t: struct cpumask for stack usage. 930 * 931 * Oh, the wicked games we play! In order to make kernel coding a 932 * little more difficult, we typedef cpumask_var_t to an array or a 933 * pointer: doing &mask on an array is a noop, so it still works. 934 * 935 * i.e. 936 * cpumask_var_t tmpmask; 937 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) 938 * return -ENOMEM; 939 * 940 * ... use 'tmpmask' like a normal struct cpumask * ... 941 * 942 * free_cpumask_var(tmpmask); 943 * 944 * 945 * However, one notable exception is there. alloc_cpumask_var() allocates 946 * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has 947 * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t. 948 * 949 * cpumask_var_t tmpmask; 950 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) 951 * return -ENOMEM; 952 * 953 * var = *tmpmask; 954 * 955 * This code makes NR_CPUS length memcopy and brings to a memory corruption. 956 * cpumask_copy() provide safe copy functionality. 957 * 958 * Note that there is another evil here: If you define a cpumask_var_t 959 * as a percpu variable then the way to obtain the address of the cpumask 960 * structure differently influences what this_cpu_* operation needs to be 961 * used. Please use this_cpu_cpumask_var_t in those cases. The direct use 962 * of this_cpu_ptr() or this_cpu_read() will lead to failures when the 963 * other type of cpumask_var_t implementation is configured. 964 * 965 * Please also note that __cpumask_var_read_mostly can be used to declare 966 * a cpumask_var_t variable itself (not its content) as read mostly. 967 */ 968 #ifdef CONFIG_CPUMASK_OFFSTACK 969 typedef struct cpumask *cpumask_var_t; 970 971 #define this_cpu_cpumask_var_ptr(x) this_cpu_read(x) 972 #define __cpumask_var_read_mostly __read_mostly 973 974 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); 975 976 static inline 977 bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) 978 { 979 return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node); 980 } 981 982 /** 983 * alloc_cpumask_var - allocate a struct cpumask 984 * @mask: pointer to cpumask_var_t where the cpumask is returned 985 * @flags: GFP_ flags 986 * 987 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is 988 * a nop returning a constant 1 (in <linux/cpumask.h>). 989 * 990 * See alloc_cpumask_var_node. 991 * 992 * Return: %true if allocation succeeded, %false if not 993 */ 994 static inline 995 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 996 { 997 return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE); 998 } 999 1000 static inline 1001 bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 1002 { 1003 return alloc_cpumask_var(mask, flags | __GFP_ZERO); 1004 } 1005 1006 void alloc_bootmem_cpumask_var(cpumask_var_t *mask); 1007 void free_cpumask_var(cpumask_var_t mask); 1008 void free_bootmem_cpumask_var(cpumask_var_t mask); 1009 1010 static inline bool cpumask_available(cpumask_var_t mask) 1011 { 1012 return mask != NULL; 1013 } 1014 1015 #else 1016 typedef struct cpumask cpumask_var_t[1]; 1017 1018 #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x) 1019 #define __cpumask_var_read_mostly 1020 1021 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 1022 { 1023 return true; 1024 } 1025 1026 static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, 1027 int node) 1028 { 1029 return true; 1030 } 1031 1032 static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 1033 { 1034 cpumask_clear(*mask); 1035 return true; 1036 } 1037 1038 static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, 1039 int node) 1040 { 1041 cpumask_clear(*mask); 1042 return true; 1043 } 1044 1045 static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) 1046 { 1047 } 1048 1049 static inline void free_cpumask_var(cpumask_var_t mask) 1050 { 1051 } 1052 1053 static inline void free_bootmem_cpumask_var(cpumask_var_t mask) 1054 { 1055 } 1056 1057 static inline bool cpumask_available(cpumask_var_t mask) 1058 { 1059 return true; 1060 } 1061 #endif /* CONFIG_CPUMASK_OFFSTACK */ 1062 1063 DEFINE_FREE(free_cpumask_var, struct cpumask *, if (_T) free_cpumask_var(_T)); 1064 1065 /* It's common to want to use cpu_all_mask in struct member initializers, 1066 * so it has to refer to an address rather than a pointer. */ 1067 extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); 1068 #define cpu_all_mask to_cpumask(cpu_all_bits) 1069 1070 /* First bits of cpu_bit_bitmap are in fact unset. */ 1071 #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) 1072 1073 #if NR_CPUS == 1 1074 /* Uniprocessor: the possible/online/present masks are always "1" */ 1075 #define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) 1076 #define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) 1077 #define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) 1078 #else 1079 #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) 1080 #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) 1081 #define for_each_enabled_cpu(cpu) for_each_cpu((cpu), cpu_enabled_mask) 1082 #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) 1083 #endif 1084 1085 /* Wrappers for arch boot code to manipulate normally-constant masks */ 1086 void init_cpu_present(const struct cpumask *src); 1087 void init_cpu_possible(const struct cpumask *src); 1088 void init_cpu_online(const struct cpumask *src); 1089 1090 static inline void 1091 set_cpu_possible(unsigned int cpu, bool possible) 1092 { 1093 if (possible) 1094 cpumask_set_cpu(cpu, &__cpu_possible_mask); 1095 else 1096 cpumask_clear_cpu(cpu, &__cpu_possible_mask); 1097 } 1098 1099 static inline void 1100 set_cpu_enabled(unsigned int cpu, bool can_be_onlined) 1101 { 1102 if (can_be_onlined) 1103 cpumask_set_cpu(cpu, &__cpu_enabled_mask); 1104 else 1105 cpumask_clear_cpu(cpu, &__cpu_enabled_mask); 1106 } 1107 1108 static inline void 1109 set_cpu_present(unsigned int cpu, bool present) 1110 { 1111 if (present) 1112 cpumask_set_cpu(cpu, &__cpu_present_mask); 1113 else 1114 cpumask_clear_cpu(cpu, &__cpu_present_mask); 1115 } 1116 1117 void set_cpu_online(unsigned int cpu, bool online); 1118 1119 static inline void 1120 set_cpu_active(unsigned int cpu, bool active) 1121 { 1122 if (active) 1123 cpumask_set_cpu(cpu, &__cpu_active_mask); 1124 else 1125 cpumask_clear_cpu(cpu, &__cpu_active_mask); 1126 } 1127 1128 static inline void 1129 set_cpu_dying(unsigned int cpu, bool dying) 1130 { 1131 if (dying) 1132 cpumask_set_cpu(cpu, &__cpu_dying_mask); 1133 else 1134 cpumask_clear_cpu(cpu, &__cpu_dying_mask); 1135 } 1136 1137 /** 1138 * to_cpumask - convert a NR_CPUS bitmap to a struct cpumask * 1139 * @bitmap: the bitmap 1140 * 1141 * There are a few places where cpumask_var_t isn't appropriate and 1142 * static cpumasks must be used (eg. very early boot), yet we don't 1143 * expose the definition of 'struct cpumask'. 1144 * 1145 * This does the conversion, and can be used as a constant initializer. 1146 */ 1147 #define to_cpumask(bitmap) \ 1148 ((struct cpumask *)(1 ? (bitmap) \ 1149 : (void *)sizeof(__check_is_bitmap(bitmap)))) 1150 1151 static inline int __check_is_bitmap(const unsigned long *bitmap) 1152 { 1153 return 1; 1154 } 1155 1156 /* 1157 * Special-case data structure for "single bit set only" constant CPU masks. 1158 * 1159 * We pre-generate all the 64 (or 32) possible bit positions, with enough 1160 * padding to the left and the right, and return the constant pointer 1161 * appropriately offset. 1162 */ 1163 extern const unsigned long 1164 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; 1165 1166 static inline const struct cpumask *get_cpu_mask(unsigned int cpu) 1167 { 1168 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; 1169 p -= cpu / BITS_PER_LONG; 1170 return to_cpumask(p); 1171 } 1172 1173 #if NR_CPUS > 1 1174 /** 1175 * num_online_cpus() - Read the number of online CPUs 1176 * 1177 * Despite the fact that __num_online_cpus is of type atomic_t, this 1178 * interface gives only a momentary snapshot and is not protected against 1179 * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held 1180 * region. 1181 * 1182 * Return: momentary snapshot of the number of online CPUs 1183 */ 1184 static __always_inline unsigned int num_online_cpus(void) 1185 { 1186 return raw_atomic_read(&__num_online_cpus); 1187 } 1188 #define num_possible_cpus() cpumask_weight(cpu_possible_mask) 1189 #define num_enabled_cpus() cpumask_weight(cpu_enabled_mask) 1190 #define num_present_cpus() cpumask_weight(cpu_present_mask) 1191 #define num_active_cpus() cpumask_weight(cpu_active_mask) 1192 1193 static inline bool cpu_online(unsigned int cpu) 1194 { 1195 return cpumask_test_cpu(cpu, cpu_online_mask); 1196 } 1197 1198 static inline bool cpu_enabled(unsigned int cpu) 1199 { 1200 return cpumask_test_cpu(cpu, cpu_enabled_mask); 1201 } 1202 1203 static inline bool cpu_possible(unsigned int cpu) 1204 { 1205 return cpumask_test_cpu(cpu, cpu_possible_mask); 1206 } 1207 1208 static inline bool cpu_present(unsigned int cpu) 1209 { 1210 return cpumask_test_cpu(cpu, cpu_present_mask); 1211 } 1212 1213 static inline bool cpu_active(unsigned int cpu) 1214 { 1215 return cpumask_test_cpu(cpu, cpu_active_mask); 1216 } 1217 1218 static inline bool cpu_dying(unsigned int cpu) 1219 { 1220 return cpumask_test_cpu(cpu, cpu_dying_mask); 1221 } 1222 1223 #else 1224 1225 #define num_online_cpus() 1U 1226 #define num_possible_cpus() 1U 1227 #define num_enabled_cpus() 1U 1228 #define num_present_cpus() 1U 1229 #define num_active_cpus() 1U 1230 1231 static inline bool cpu_online(unsigned int cpu) 1232 { 1233 return cpu == 0; 1234 } 1235 1236 static inline bool cpu_possible(unsigned int cpu) 1237 { 1238 return cpu == 0; 1239 } 1240 1241 static inline bool cpu_enabled(unsigned int cpu) 1242 { 1243 return cpu == 0; 1244 } 1245 1246 static inline bool cpu_present(unsigned int cpu) 1247 { 1248 return cpu == 0; 1249 } 1250 1251 static inline bool cpu_active(unsigned int cpu) 1252 { 1253 return cpu == 0; 1254 } 1255 1256 static inline bool cpu_dying(unsigned int cpu) 1257 { 1258 return false; 1259 } 1260 1261 #endif /* NR_CPUS > 1 */ 1262 1263 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) 1264 1265 #if NR_CPUS <= BITS_PER_LONG 1266 #define CPU_BITS_ALL \ 1267 { \ 1268 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ 1269 } 1270 1271 #else /* NR_CPUS > BITS_PER_LONG */ 1272 1273 #define CPU_BITS_ALL \ 1274 { \ 1275 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ 1276 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ 1277 } 1278 #endif /* NR_CPUS > BITS_PER_LONG */ 1279 1280 /** 1281 * cpumap_print_to_pagebuf - copies the cpumask into the buffer either 1282 * as comma-separated list of cpus or hex values of cpumask 1283 * @list: indicates whether the cpumap must be list 1284 * @mask: the cpumask to copy 1285 * @buf: the buffer to copy into 1286 * 1287 * Return: the length of the (null-terminated) @buf string, zero if 1288 * nothing is copied. 1289 */ 1290 static inline ssize_t 1291 cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) 1292 { 1293 return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask), 1294 nr_cpu_ids); 1295 } 1296 1297 /** 1298 * cpumap_print_bitmask_to_buf - copies the cpumask into the buffer as 1299 * hex values of cpumask 1300 * 1301 * @buf: the buffer to copy into 1302 * @mask: the cpumask to copy 1303 * @off: in the string from which we are copying, we copy to @buf 1304 * @count: the maximum number of bytes to print 1305 * 1306 * The function prints the cpumask into the buffer as hex values of 1307 * cpumask; Typically used by bin_attribute to export cpumask bitmask 1308 * ABI. 1309 * 1310 * Return: the length of how many bytes have been copied, excluding 1311 * terminating '\0'. 1312 */ 1313 static inline ssize_t 1314 cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask, 1315 loff_t off, size_t count) 1316 { 1317 return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask), 1318 nr_cpu_ids, off, count) - 1; 1319 } 1320 1321 /** 1322 * cpumap_print_list_to_buf - copies the cpumask into the buffer as 1323 * comma-separated list of cpus 1324 * @buf: the buffer to copy into 1325 * @mask: the cpumask to copy 1326 * @off: in the string from which we are copying, we copy to @buf 1327 * @count: the maximum number of bytes to print 1328 * 1329 * Everything is same with the above cpumap_print_bitmask_to_buf() 1330 * except the print format. 1331 * 1332 * Return: the length of how many bytes have been copied, excluding 1333 * terminating '\0'. 1334 */ 1335 static inline ssize_t 1336 cpumap_print_list_to_buf(char *buf, const struct cpumask *mask, 1337 loff_t off, size_t count) 1338 { 1339 return bitmap_print_list_to_buf(buf, cpumask_bits(mask), 1340 nr_cpu_ids, off, count) - 1; 1341 } 1342 1343 #if NR_CPUS <= BITS_PER_LONG 1344 #define CPU_MASK_ALL \ 1345 (cpumask_t) { { \ 1346 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ 1347 } } 1348 #else 1349 #define CPU_MASK_ALL \ 1350 (cpumask_t) { { \ 1351 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ 1352 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ 1353 } } 1354 #endif /* NR_CPUS > BITS_PER_LONG */ 1355 1356 #define CPU_MASK_NONE \ 1357 (cpumask_t) { { \ 1358 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ 1359 } } 1360 1361 #define CPU_MASK_CPU0 \ 1362 (cpumask_t) { { \ 1363 [0] = 1UL \ 1364 } } 1365 1366 /* 1367 * Provide a valid theoretical max size for cpumap and cpulist sysfs files 1368 * to avoid breaking userspace which may allocate a buffer based on the size 1369 * reported by e.g. fstat. 1370 * 1371 * for cpumap NR_CPUS * 9/32 - 1 should be an exact length. 1372 * 1373 * For cpulist 7 is (ceil(log10(NR_CPUS)) + 1) allowing for NR_CPUS to be up 1374 * to 2 orders of magnitude larger than 8192. And then we divide by 2 to 1375 * cover a worst-case of every other cpu being on one of two nodes for a 1376 * very large NR_CPUS. 1377 * 1378 * Use PAGE_SIZE as a minimum for smaller configurations while avoiding 1379 * unsigned comparison to -1. 1380 */ 1381 #define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \ 1382 ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE) 1383 #define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE) 1384 1385 #endif /* __LINUX_CPUMASK_H */ 1386