1 #ifndef __LINUX_CPUMASK_H 2 #define __LINUX_CPUMASK_H 3 4 /* 5 * Cpumasks provide a bitmap suitable for representing the 6 * set of CPU's in a system, one bit position per CPU number. 7 * 8 * The new cpumask_ ops take a "struct cpumask *"; the old ones 9 * use cpumask_t. 10 * 11 * See detailed comments in the file linux/bitmap.h describing the 12 * data type on which these cpumasks are based. 13 * 14 * For details of cpumask_scnprintf() and cpumask_parse_user(), 15 * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c. 16 * For details of cpulist_scnprintf() and cpulist_parse(), see 17 * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c. 18 * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c 19 * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c. 20 * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. 21 * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. 22 * 23 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 24 * Note: The alternate operations with the suffix "_nr" are used 25 * to limit the range of the loop to nr_cpu_ids instead of 26 * NR_CPUS when NR_CPUS > 64 for performance reasons. 27 * If NR_CPUS is <= 64 then most assembler bitmask 28 * operators execute faster with a constant range, so 29 * the operator will continue to use NR_CPUS. 30 * 31 * Another consideration is that nr_cpu_ids is initialized 32 * to NR_CPUS and isn't lowered until the possible cpus are 33 * discovered (including any disabled cpus). So early uses 34 * will span the entire range of NR_CPUS. 35 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 36 * 37 * The obsolescent cpumask operations are: 38 * 39 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask 40 * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask 41 * void cpus_setall(mask) set all bits 42 * void cpus_clear(mask) clear all bits 43 * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask 44 * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask 45 * 46 * void cpus_and(dst, src1, src2) dst = src1 & src2 [intersection] 47 * void cpus_or(dst, src1, src2) dst = src1 | src2 [union] 48 * void cpus_xor(dst, src1, src2) dst = src1 ^ src2 49 * void cpus_andnot(dst, src1, src2) dst = src1 & ~src2 50 * void cpus_complement(dst, src) dst = ~src 51 * 52 * int cpus_equal(mask1, mask2) Does mask1 == mask2? 53 * int cpus_intersects(mask1, mask2) Do mask1 and mask2 intersect? 54 * int cpus_subset(mask1, mask2) Is mask1 a subset of mask2? 55 * int cpus_empty(mask) Is mask empty (no bits sets)? 56 * int cpus_full(mask) Is mask full (all bits sets)? 57 * int cpus_weight(mask) Hamming weigh - number of set bits 58 * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS 59 * 60 * void cpus_shift_right(dst, src, n) Shift right 61 * void cpus_shift_left(dst, src, n) Shift left 62 * 63 * int first_cpu(mask) Number lowest set bit, or NR_CPUS 64 * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS 65 * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids 66 * 67 * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set 68 * (can be used as an lvalue) 69 * CPU_MASK_ALL Initializer - all bits set 70 * CPU_MASK_NONE Initializer - no bits set 71 * unsigned long *cpus_addr(mask) Array of unsigned long's in mask 72 * 73 * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t 74 * variables, and CPUMASK_PTR provides pointers to each field. 75 * 76 * The structure should be defined something like this: 77 * struct my_cpumasks { 78 * cpumask_t mask1; 79 * cpumask_t mask2; 80 * }; 81 * 82 * Usage is then: 83 * CPUMASK_ALLOC(my_cpumasks); 84 * CPUMASK_PTR(mask1, my_cpumasks); 85 * CPUMASK_PTR(mask2, my_cpumasks); 86 * 87 * --- DO NOT reference cpumask_t pointers until this check --- 88 * if (my_cpumasks == NULL) 89 * "kmalloc failed"... 90 * 91 * References are now pointers to the cpumask_t variables (*mask1, ...) 92 * 93 *if NR_CPUS > BITS_PER_LONG 94 * CPUMASK_ALLOC(m) Declares and allocates struct m *m = 95 * kmalloc(sizeof(*m), GFP_KERNEL) 96 * CPUMASK_FREE(m) Macro for kfree(m) 97 *else 98 * CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m 99 * CPUMASK_FREE(m) Nop 100 *endif 101 * CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v) 102 * ------------------------------------------------------------------------ 103 * 104 * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing 105 * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask 106 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing 107 * int cpulist_parse(buf, map) Parse ascii string as cpulist 108 * int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit) 109 * void cpus_remap(dst, src, old, new) *dst = map(old, new)(src) 110 * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap 111 * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz 112 * 113 * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS 114 * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids 115 * 116 * int num_online_cpus() Number of online CPUs 117 * int num_possible_cpus() Number of all possible CPUs 118 * int num_present_cpus() Number of present CPUs 119 * 120 * int cpu_online(cpu) Is some cpu online? 121 * int cpu_possible(cpu) Is some cpu possible? 122 * int cpu_present(cpu) Is some cpu present (can schedule)? 123 * 124 * int any_online_cpu(mask) First online cpu in mask 125 * 126 * for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map 127 * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map 128 * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map 129 * 130 * Subtlety: 131 * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway) 132 * to generate slightly worse code. Note for example the additional 133 * 40 lines of assembly code compiling the "for each possible cpu" 134 * loops buried in the disk_stat_read() macros calls when compiling 135 * drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple 136 * one-line #define for cpu_isset(), instead of wrapping an inline 137 * inside a macro, the way we do the other calls. 138 */ 139 140 #include <linux/kernel.h> 141 #include <linux/threads.h> 142 #include <linux/bitmap.h> 143 144 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 145 extern cpumask_t _unused_cpumask_arg_; 146 147 #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) 148 static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) 149 { 150 set_bit(cpu, dstp->bits); 151 } 152 153 #define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst)) 154 static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp) 155 { 156 clear_bit(cpu, dstp->bits); 157 } 158 159 #define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) 160 static inline void __cpus_setall(cpumask_t *dstp, int nbits) 161 { 162 bitmap_fill(dstp->bits, nbits); 163 } 164 165 #define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) 166 static inline void __cpus_clear(cpumask_t *dstp, int nbits) 167 { 168 bitmap_zero(dstp->bits, nbits); 169 } 170 171 /* No static inline type checking - see Subtlety (1) above. */ 172 #define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) 173 174 #define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask)) 175 static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) 176 { 177 return test_and_set_bit(cpu, addr->bits); 178 } 179 180 #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) 181 static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, 182 const cpumask_t *src2p, int nbits) 183 { 184 bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); 185 } 186 187 #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) 188 static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, 189 const cpumask_t *src2p, int nbits) 190 { 191 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); 192 } 193 194 #define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) 195 static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, 196 const cpumask_t *src2p, int nbits) 197 { 198 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); 199 } 200 201 #define cpus_andnot(dst, src1, src2) \ 202 __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) 203 static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, 204 const cpumask_t *src2p, int nbits) 205 { 206 bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); 207 } 208 209 #define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS) 210 static inline void __cpus_complement(cpumask_t *dstp, 211 const cpumask_t *srcp, int nbits) 212 { 213 bitmap_complement(dstp->bits, srcp->bits, nbits); 214 } 215 216 #define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) 217 static inline int __cpus_equal(const cpumask_t *src1p, 218 const cpumask_t *src2p, int nbits) 219 { 220 return bitmap_equal(src1p->bits, src2p->bits, nbits); 221 } 222 223 #define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) 224 static inline int __cpus_intersects(const cpumask_t *src1p, 225 const cpumask_t *src2p, int nbits) 226 { 227 return bitmap_intersects(src1p->bits, src2p->bits, nbits); 228 } 229 230 #define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) 231 static inline int __cpus_subset(const cpumask_t *src1p, 232 const cpumask_t *src2p, int nbits) 233 { 234 return bitmap_subset(src1p->bits, src2p->bits, nbits); 235 } 236 237 #define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) 238 static inline int __cpus_empty(const cpumask_t *srcp, int nbits) 239 { 240 return bitmap_empty(srcp->bits, nbits); 241 } 242 243 #define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS) 244 static inline int __cpus_full(const cpumask_t *srcp, int nbits) 245 { 246 return bitmap_full(srcp->bits, nbits); 247 } 248 249 #define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) 250 static inline int __cpus_weight(const cpumask_t *srcp, int nbits) 251 { 252 return bitmap_weight(srcp->bits, nbits); 253 } 254 255 #define cpus_shift_right(dst, src, n) \ 256 __cpus_shift_right(&(dst), &(src), (n), NR_CPUS) 257 static inline void __cpus_shift_right(cpumask_t *dstp, 258 const cpumask_t *srcp, int n, int nbits) 259 { 260 bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); 261 } 262 263 #define cpus_shift_left(dst, src, n) \ 264 __cpus_shift_left(&(dst), &(src), (n), NR_CPUS) 265 static inline void __cpus_shift_left(cpumask_t *dstp, 266 const cpumask_t *srcp, int n, int nbits) 267 { 268 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); 269 } 270 271 /* 272 * Special-case data structure for "single bit set only" constant CPU masks. 273 * 274 * We pre-generate all the 64 (or 32) possible bit positions, with enough 275 * padding to the left and the right, and return the constant pointer 276 * appropriately offset. 277 */ 278 extern const unsigned long 279 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; 280 281 static inline const cpumask_t *get_cpu_mask(unsigned int cpu) 282 { 283 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; 284 p -= cpu / BITS_PER_LONG; 285 return (const cpumask_t *)p; 286 } 287 288 /* 289 * In cases where we take the address of the cpumask immediately, 290 * gcc optimizes it out (it's a constant) and there's no huge stack 291 * variable created: 292 */ 293 #define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) 294 295 296 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) 297 298 #if NR_CPUS <= BITS_PER_LONG 299 300 #define CPU_MASK_ALL \ 301 (cpumask_t) { { \ 302 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 303 } } 304 305 #define CPU_MASK_ALL_PTR (&CPU_MASK_ALL) 306 307 #else 308 309 #define CPU_MASK_ALL \ 310 (cpumask_t) { { \ 311 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ 312 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 313 } } 314 315 /* cpu_mask_all is in init/main.c */ 316 extern cpumask_t cpu_mask_all; 317 #define CPU_MASK_ALL_PTR (&cpu_mask_all) 318 319 #endif 320 321 #define CPU_MASK_NONE \ 322 (cpumask_t) { { \ 323 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ 324 } } 325 326 #define CPU_MASK_CPU0 \ 327 (cpumask_t) { { \ 328 [0] = 1UL \ 329 } } 330 331 #define cpus_addr(src) ((src).bits) 332 333 #if NR_CPUS > BITS_PER_LONG 334 #define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL) 335 #define CPUMASK_FREE(m) kfree(m) 336 #else 337 #define CPUMASK_ALLOC(m) struct m _m, *m = &_m 338 #define CPUMASK_FREE(m) 339 #endif 340 #define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) 341 342 #define cpumask_scnprintf(buf, len, src) \ 343 __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) 344 static inline int __cpumask_scnprintf(char *buf, int len, 345 const cpumask_t *srcp, int nbits) 346 { 347 return bitmap_scnprintf(buf, len, srcp->bits, nbits); 348 } 349 350 #define cpumask_parse_user(ubuf, ulen, dst) \ 351 __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS) 352 static inline int __cpumask_parse_user(const char __user *buf, int len, 353 cpumask_t *dstp, int nbits) 354 { 355 return bitmap_parse_user(buf, len, dstp->bits, nbits); 356 } 357 358 #define cpulist_scnprintf(buf, len, src) \ 359 __cpulist_scnprintf((buf), (len), &(src), NR_CPUS) 360 static inline int __cpulist_scnprintf(char *buf, int len, 361 const cpumask_t *srcp, int nbits) 362 { 363 return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); 364 } 365 366 #define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS) 367 static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits) 368 { 369 return bitmap_parselist(buf, dstp->bits, nbits); 370 } 371 372 #define cpu_remap(oldbit, old, new) \ 373 __cpu_remap((oldbit), &(old), &(new), NR_CPUS) 374 static inline int __cpu_remap(int oldbit, 375 const cpumask_t *oldp, const cpumask_t *newp, int nbits) 376 { 377 return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); 378 } 379 380 #define cpus_remap(dst, src, old, new) \ 381 __cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS) 382 static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp, 383 const cpumask_t *oldp, const cpumask_t *newp, int nbits) 384 { 385 bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); 386 } 387 388 #define cpus_onto(dst, orig, relmap) \ 389 __cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS) 390 static inline void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp, 391 const cpumask_t *relmapp, int nbits) 392 { 393 bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); 394 } 395 396 #define cpus_fold(dst, orig, sz) \ 397 __cpus_fold(&(dst), &(orig), sz, NR_CPUS) 398 static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, 399 int sz, int nbits) 400 { 401 bitmap_fold(dstp->bits, origp->bits, sz, nbits); 402 } 403 404 #if NR_CPUS == 1 405 406 #define nr_cpu_ids 1 407 #define first_cpu(src) ({ (void)(src); 0; }) 408 #define next_cpu(n, src) ({ (void)(src); 1; }) 409 #define any_online_cpu(mask) 0 410 #define for_each_cpu_mask(cpu, mask) \ 411 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 412 413 #else /* NR_CPUS > 1 */ 414 415 extern int nr_cpu_ids; 416 int __first_cpu(const cpumask_t *srcp); 417 int __next_cpu(int n, const cpumask_t *srcp); 418 int __any_online_cpu(const cpumask_t *mask); 419 420 #define first_cpu(src) __first_cpu(&(src)) 421 #define next_cpu(n, src) __next_cpu((n), &(src)) 422 #define any_online_cpu(mask) __any_online_cpu(&(mask)) 423 #define for_each_cpu_mask(cpu, mask) \ 424 for ((cpu) = -1; \ 425 (cpu) = next_cpu((cpu), (mask)), \ 426 (cpu) < NR_CPUS; ) 427 #endif 428 429 #if NR_CPUS <= 64 430 431 #define next_cpu_nr(n, src) next_cpu(n, src) 432 #define cpus_weight_nr(cpumask) cpus_weight(cpumask) 433 #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) 434 435 #else /* NR_CPUS > 64 */ 436 437 int __next_cpu_nr(int n, const cpumask_t *srcp); 438 #define next_cpu_nr(n, src) __next_cpu_nr((n), &(src)) 439 #define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids) 440 #define for_each_cpu_mask_nr(cpu, mask) \ 441 for ((cpu) = -1; \ 442 (cpu) = next_cpu_nr((cpu), (mask)), \ 443 (cpu) < nr_cpu_ids; ) 444 445 #endif /* NR_CPUS > 64 */ 446 447 /* 448 * The following particular system cpumasks and operations manage 449 * possible, present, active and online cpus. Each of them is a fixed size 450 * bitmap of size NR_CPUS. 451 * 452 * #ifdef CONFIG_HOTPLUG_CPU 453 * cpu_possible_map - has bit 'cpu' set iff cpu is populatable 454 * cpu_present_map - has bit 'cpu' set iff cpu is populated 455 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler 456 * cpu_active_map - has bit 'cpu' set iff cpu available to migration 457 * #else 458 * cpu_possible_map - has bit 'cpu' set iff cpu is populated 459 * cpu_present_map - copy of cpu_possible_map 460 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler 461 * #endif 462 * 463 * In either case, NR_CPUS is fixed at compile time, as the static 464 * size of these bitmaps. The cpu_possible_map is fixed at boot 465 * time, as the set of CPU id's that it is possible might ever 466 * be plugged in at anytime during the life of that system boot. 467 * The cpu_present_map is dynamic(*), representing which CPUs 468 * are currently plugged in. And cpu_online_map is the dynamic 469 * subset of cpu_present_map, indicating those CPUs available 470 * for scheduling. 471 * 472 * If HOTPLUG is enabled, then cpu_possible_map is forced to have 473 * all NR_CPUS bits set, otherwise it is just the set of CPUs that 474 * ACPI reports present at boot. 475 * 476 * If HOTPLUG is enabled, then cpu_present_map varies dynamically, 477 * depending on what ACPI reports as currently plugged in, otherwise 478 * cpu_present_map is just a copy of cpu_possible_map. 479 * 480 * (*) Well, cpu_present_map is dynamic in the hotplug case. If not 481 * hotplug, it's a copy of cpu_possible_map, hence fixed at boot. 482 * 483 * Subtleties: 484 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode 485 * assumption that their single CPU is online. The UP 486 * cpu_{online,possible,present}_maps are placebos. Changing them 487 * will have no useful affect on the following num_*_cpus() 488 * and cpu_*() macros in the UP case. This ugliness is a UP 489 * optimization - don't waste any instructions or memory references 490 * asking if you're online or how many CPUs there are if there is 491 * only one CPU. 492 * 2) Most SMP arch's #define some of these maps to be some 493 * other map specific to that arch. Therefore, the following 494 * must be #define macros, not inlines. To see why, examine 495 * the assembly code produced by the following. Note that 496 * set1() writes phys_x_map, but set2() writes x_map: 497 * int x_map, phys_x_map; 498 * #define set1(a) x_map = a 499 * inline void set2(int a) { x_map = a; } 500 * #define x_map phys_x_map 501 * main(){ set1(3); set2(5); } 502 */ 503 504 extern cpumask_t cpu_possible_map; 505 extern cpumask_t cpu_online_map; 506 extern cpumask_t cpu_present_map; 507 extern cpumask_t cpu_active_map; 508 509 #if NR_CPUS > 1 510 #define num_online_cpus() cpus_weight_nr(cpu_online_map) 511 #define num_possible_cpus() cpus_weight_nr(cpu_possible_map) 512 #define num_present_cpus() cpus_weight_nr(cpu_present_map) 513 #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) 514 #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) 515 #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) 516 #define cpu_active(cpu) cpu_isset((cpu), cpu_active_map) 517 #else 518 #define num_online_cpus() 1 519 #define num_possible_cpus() 1 520 #define num_present_cpus() 1 521 #define cpu_online(cpu) ((cpu) == 0) 522 #define cpu_possible(cpu) ((cpu) == 0) 523 #define cpu_present(cpu) ((cpu) == 0) 524 #define cpu_active(cpu) ((cpu) == 0) 525 #endif 526 527 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) 528 529 #define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map) 530 #define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) 531 #define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) 532 533 /* These are the new versions of the cpumask operators: passed by pointer. 534 * The older versions will be implemented in terms of these, then deleted. */ 535 #define cpumask_bits(maskp) ((maskp)->bits) 536 537 #if NR_CPUS <= BITS_PER_LONG 538 #define CPU_BITS_ALL \ 539 { \ 540 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 541 } 542 543 /* This produces more efficient code. */ 544 #define nr_cpumask_bits NR_CPUS 545 546 #else /* NR_CPUS > BITS_PER_LONG */ 547 548 #define CPU_BITS_ALL \ 549 { \ 550 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ 551 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 552 } 553 554 #define nr_cpumask_bits nr_cpu_ids 555 #endif /* NR_CPUS > BITS_PER_LONG */ 556 557 /* verify cpu argument to cpumask_* operators */ 558 static inline unsigned int cpumask_check(unsigned int cpu) 559 { 560 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 561 WARN_ON_ONCE(cpu >= nr_cpumask_bits); 562 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ 563 return cpu; 564 } 565 566 #if NR_CPUS == 1 567 /* Uniprocessor. Assume all masks are "1". */ 568 static inline unsigned int cpumask_first(const struct cpumask *srcp) 569 { 570 return 0; 571 } 572 573 /* Valid inputs for n are -1 and 0. */ 574 static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) 575 { 576 return n+1; 577 } 578 579 static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) 580 { 581 return n+1; 582 } 583 584 static inline unsigned int cpumask_next_and(int n, 585 const struct cpumask *srcp, 586 const struct cpumask *andp) 587 { 588 return n+1; 589 } 590 591 /* cpu must be a valid cpu, ie 0, so there's no other choice. */ 592 static inline unsigned int cpumask_any_but(const struct cpumask *mask, 593 unsigned int cpu) 594 { 595 return 1; 596 } 597 598 #define for_each_cpu(cpu, mask) \ 599 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 600 #define for_each_cpu_and(cpu, mask, and) \ 601 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and) 602 #else 603 /** 604 * cpumask_first - get the first cpu in a cpumask 605 * @srcp: the cpumask pointer 606 * 607 * Returns >= nr_cpu_ids if no cpus set. 608 */ 609 static inline unsigned int cpumask_first(const struct cpumask *srcp) 610 { 611 return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits); 612 } 613 614 /** 615 * cpumask_next - get the next cpu in a cpumask 616 * @n: the cpu prior to the place to search (ie. return will be > @n) 617 * @srcp: the cpumask pointer 618 * 619 * Returns >= nr_cpu_ids if no further cpus set. 620 */ 621 static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) 622 { 623 /* -1 is a legal arg here. */ 624 if (n != -1) 625 cpumask_check(n); 626 return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); 627 } 628 629 /** 630 * cpumask_next_zero - get the next unset cpu in a cpumask 631 * @n: the cpu prior to the place to search (ie. return will be > @n) 632 * @srcp: the cpumask pointer 633 * 634 * Returns >= nr_cpu_ids if no further cpus unset. 635 */ 636 static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) 637 { 638 /* -1 is a legal arg here. */ 639 if (n != -1) 640 cpumask_check(n); 641 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); 642 } 643 644 int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); 645 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); 646 647 /** 648 * for_each_cpu - iterate over every cpu in a mask 649 * @cpu: the (optionally unsigned) integer iterator 650 * @mask: the cpumask pointer 651 * 652 * After the loop, cpu is >= nr_cpu_ids. 653 */ 654 #define for_each_cpu(cpu, mask) \ 655 for ((cpu) = -1; \ 656 (cpu) = cpumask_next((cpu), (mask)), \ 657 (cpu) < nr_cpu_ids;) 658 659 /** 660 * for_each_cpu_and - iterate over every cpu in both masks 661 * @cpu: the (optionally unsigned) integer iterator 662 * @mask: the first cpumask pointer 663 * @and: the second cpumask pointer 664 * 665 * This saves a temporary CPU mask in many places. It is equivalent to: 666 * struct cpumask tmp; 667 * cpumask_and(&tmp, &mask, &and); 668 * for_each_cpu(cpu, &tmp) 669 * ... 670 * 671 * After the loop, cpu is >= nr_cpu_ids. 672 */ 673 #define for_each_cpu_and(cpu, mask, and) \ 674 for ((cpu) = -1; \ 675 (cpu) = cpumask_next_and((cpu), (mask), (and)), \ 676 (cpu) < nr_cpu_ids;) 677 #endif /* SMP */ 678 679 #define CPU_BITS_NONE \ 680 { \ 681 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ 682 } 683 684 #define CPU_BITS_CPU0 \ 685 { \ 686 [0] = 1UL \ 687 } 688 689 /** 690 * cpumask_set_cpu - set a cpu in a cpumask 691 * @cpu: cpu number (< nr_cpu_ids) 692 * @dstp: the cpumask pointer 693 */ 694 static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) 695 { 696 set_bit(cpumask_check(cpu), cpumask_bits(dstp)); 697 } 698 699 /** 700 * cpumask_clear_cpu - clear a cpu in a cpumask 701 * @cpu: cpu number (< nr_cpu_ids) 702 * @dstp: the cpumask pointer 703 */ 704 static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) 705 { 706 clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); 707 } 708 709 /** 710 * cpumask_test_cpu - test for a cpu in a cpumask 711 * @cpu: cpu number (< nr_cpu_ids) 712 * @cpumask: the cpumask pointer 713 * 714 * No static inline type checking - see Subtlety (1) above. 715 */ 716 #define cpumask_test_cpu(cpu, cpumask) \ 717 test_bit(cpumask_check(cpu), (cpumask)->bits) 718 719 /** 720 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask 721 * @cpu: cpu number (< nr_cpu_ids) 722 * @cpumask: the cpumask pointer 723 * 724 * test_and_set_bit wrapper for cpumasks. 725 */ 726 static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) 727 { 728 return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); 729 } 730 731 /** 732 * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask 733 * @dstp: the cpumask pointer 734 */ 735 static inline void cpumask_setall(struct cpumask *dstp) 736 { 737 bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits); 738 } 739 740 /** 741 * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask 742 * @dstp: the cpumask pointer 743 */ 744 static inline void cpumask_clear(struct cpumask *dstp) 745 { 746 bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits); 747 } 748 749 /** 750 * cpumask_and - *dstp = *src1p & *src2p 751 * @dstp: the cpumask result 752 * @src1p: the first input 753 * @src2p: the second input 754 */ 755 static inline void cpumask_and(struct cpumask *dstp, 756 const struct cpumask *src1p, 757 const struct cpumask *src2p) 758 { 759 bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), 760 cpumask_bits(src2p), nr_cpumask_bits); 761 } 762 763 /** 764 * cpumask_or - *dstp = *src1p | *src2p 765 * @dstp: the cpumask result 766 * @src1p: the first input 767 * @src2p: the second input 768 */ 769 static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, 770 const struct cpumask *src2p) 771 { 772 bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p), 773 cpumask_bits(src2p), nr_cpumask_bits); 774 } 775 776 /** 777 * cpumask_xor - *dstp = *src1p ^ *src2p 778 * @dstp: the cpumask result 779 * @src1p: the first input 780 * @src2p: the second input 781 */ 782 static inline void cpumask_xor(struct cpumask *dstp, 783 const struct cpumask *src1p, 784 const struct cpumask *src2p) 785 { 786 bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p), 787 cpumask_bits(src2p), nr_cpumask_bits); 788 } 789 790 /** 791 * cpumask_andnot - *dstp = *src1p & ~*src2p 792 * @dstp: the cpumask result 793 * @src1p: the first input 794 * @src2p: the second input 795 */ 796 static inline void cpumask_andnot(struct cpumask *dstp, 797 const struct cpumask *src1p, 798 const struct cpumask *src2p) 799 { 800 bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), 801 cpumask_bits(src2p), nr_cpumask_bits); 802 } 803 804 /** 805 * cpumask_complement - *dstp = ~*srcp 806 * @dstp: the cpumask result 807 * @srcp: the input to invert 808 */ 809 static inline void cpumask_complement(struct cpumask *dstp, 810 const struct cpumask *srcp) 811 { 812 bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp), 813 nr_cpumask_bits); 814 } 815 816 /** 817 * cpumask_equal - *src1p == *src2p 818 * @src1p: the first input 819 * @src2p: the second input 820 */ 821 static inline bool cpumask_equal(const struct cpumask *src1p, 822 const struct cpumask *src2p) 823 { 824 return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), 825 nr_cpumask_bits); 826 } 827 828 /** 829 * cpumask_intersects - (*src1p & *src2p) != 0 830 * @src1p: the first input 831 * @src2p: the second input 832 */ 833 static inline bool cpumask_intersects(const struct cpumask *src1p, 834 const struct cpumask *src2p) 835 { 836 return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p), 837 nr_cpumask_bits); 838 } 839 840 /** 841 * cpumask_subset - (*src1p & ~*src2p) == 0 842 * @src1p: the first input 843 * @src2p: the second input 844 */ 845 static inline int cpumask_subset(const struct cpumask *src1p, 846 const struct cpumask *src2p) 847 { 848 return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), 849 nr_cpumask_bits); 850 } 851 852 /** 853 * cpumask_empty - *srcp == 0 854 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear. 855 */ 856 static inline bool cpumask_empty(const struct cpumask *srcp) 857 { 858 return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits); 859 } 860 861 /** 862 * cpumask_full - *srcp == 0xFFFFFFFF... 863 * @srcp: the cpumask to that all cpus < nr_cpu_ids are set. 864 */ 865 static inline bool cpumask_full(const struct cpumask *srcp) 866 { 867 return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits); 868 } 869 870 /** 871 * cpumask_weight - Count of bits in *srcp 872 * @srcp: the cpumask to count bits (< nr_cpu_ids) in. 873 */ 874 static inline unsigned int cpumask_weight(const struct cpumask *srcp) 875 { 876 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits); 877 } 878 879 /** 880 * cpumask_shift_right - *dstp = *srcp >> n 881 * @dstp: the cpumask result 882 * @srcp: the input to shift 883 * @n: the number of bits to shift by 884 */ 885 static inline void cpumask_shift_right(struct cpumask *dstp, 886 const struct cpumask *srcp, int n) 887 { 888 bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n, 889 nr_cpumask_bits); 890 } 891 892 /** 893 * cpumask_shift_left - *dstp = *srcp << n 894 * @dstp: the cpumask result 895 * @srcp: the input to shift 896 * @n: the number of bits to shift by 897 */ 898 static inline void cpumask_shift_left(struct cpumask *dstp, 899 const struct cpumask *srcp, int n) 900 { 901 bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n, 902 nr_cpumask_bits); 903 } 904 905 /** 906 * cpumask_copy - *dstp = *srcp 907 * @dstp: the result 908 * @srcp: the input cpumask 909 */ 910 static inline void cpumask_copy(struct cpumask *dstp, 911 const struct cpumask *srcp) 912 { 913 bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); 914 } 915 916 /** 917 * cpumask_any - pick a "random" cpu from *srcp 918 * @srcp: the input cpumask 919 * 920 * Returns >= nr_cpu_ids if no cpus set. 921 */ 922 #define cpumask_any(srcp) cpumask_first(srcp) 923 924 /** 925 * cpumask_first_and - return the first cpu from *srcp1 & *srcp2 926 * @src1p: the first input 927 * @src2p: the second input 928 * 929 * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). 930 */ 931 #define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p)) 932 933 /** 934 * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2 935 * @mask1: the first input cpumask 936 * @mask2: the second input cpumask 937 * 938 * Returns >= nr_cpu_ids if no cpus set. 939 */ 940 #define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2)) 941 942 /** 943 * cpumask_of - the cpumask containing just a given cpu 944 * @cpu: the cpu (<= nr_cpu_ids) 945 */ 946 #define cpumask_of(cpu) (get_cpu_mask(cpu)) 947 948 /** 949 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * 950 * @bitmap: the bitmap 951 * 952 * There are a few places where cpumask_var_t isn't appropriate and 953 * static cpumasks must be used (eg. very early boot), yet we don't 954 * expose the definition of 'struct cpumask'. 955 * 956 * This does the conversion, and can be used as a constant initializer. 957 */ 958 #define to_cpumask(bitmap) \ 959 ((struct cpumask *)(1 ? (bitmap) \ 960 : (void *)sizeof(__check_is_bitmap(bitmap)))) 961 962 static inline int __check_is_bitmap(const unsigned long *bitmap) 963 { 964 return 1; 965 } 966 967 /** 968 * cpumask_size - size to allocate for a 'struct cpumask' in bytes 969 * 970 * This will eventually be a runtime variable, depending on nr_cpu_ids. 971 */ 972 static inline size_t cpumask_size(void) 973 { 974 /* FIXME: Once all cpumask assignments are eliminated, this 975 * can be nr_cpumask_bits */ 976 return BITS_TO_LONGS(NR_CPUS) * sizeof(long); 977 } 978 979 /* 980 * cpumask_var_t: struct cpumask for stack usage. 981 * 982 * Oh, the wicked games we play! In order to make kernel coding a 983 * little more difficult, we typedef cpumask_var_t to an array or a 984 * pointer: doing &mask on an array is a noop, so it still works. 985 * 986 * ie. 987 * cpumask_var_t tmpmask; 988 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) 989 * return -ENOMEM; 990 * 991 * ... use 'tmpmask' like a normal struct cpumask * ... 992 * 993 * free_cpumask_var(tmpmask); 994 */ 995 #ifdef CONFIG_CPUMASK_OFFSTACK 996 typedef struct cpumask *cpumask_var_t; 997 998 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); 999 void alloc_bootmem_cpumask_var(cpumask_var_t *mask); 1000 void free_cpumask_var(cpumask_var_t mask); 1001 void free_bootmem_cpumask_var(cpumask_var_t mask); 1002 1003 #else 1004 typedef struct cpumask cpumask_var_t[1]; 1005 1006 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 1007 { 1008 return true; 1009 } 1010 1011 static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) 1012 { 1013 } 1014 1015 static inline void free_cpumask_var(cpumask_var_t mask) 1016 { 1017 } 1018 1019 static inline void free_bootmem_cpumask_var(cpumask_var_t mask) 1020 { 1021 } 1022 #endif /* CONFIG_CPUMASK_OFFSTACK */ 1023 1024 /* The pointer versions of the maps, these will become the primary versions. */ 1025 #define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map) 1026 #define cpu_online_mask ((const struct cpumask *)&cpu_online_map) 1027 #define cpu_present_mask ((const struct cpumask *)&cpu_present_map) 1028 #define cpu_active_mask ((const struct cpumask *)&cpu_active_map) 1029 1030 /* It's common to want to use cpu_all_mask in struct member initializers, 1031 * so it has to refer to an address rather than a pointer. */ 1032 extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); 1033 #define cpu_all_mask to_cpumask(cpu_all_bits) 1034 1035 /* First bits of cpu_bit_bitmap are in fact unset. */ 1036 #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) 1037 1038 /* Wrappers for arch boot code to manipulate normally-constant masks */ 1039 static inline void set_cpu_possible(unsigned int cpu, bool possible) 1040 { 1041 if (possible) 1042 cpumask_set_cpu(cpu, &cpu_possible_map); 1043 else 1044 cpumask_clear_cpu(cpu, &cpu_possible_map); 1045 } 1046 1047 static inline void set_cpu_present(unsigned int cpu, bool present) 1048 { 1049 if (present) 1050 cpumask_set_cpu(cpu, &cpu_present_map); 1051 else 1052 cpumask_clear_cpu(cpu, &cpu_present_map); 1053 } 1054 1055 static inline void set_cpu_online(unsigned int cpu, bool online) 1056 { 1057 if (online) 1058 cpumask_set_cpu(cpu, &cpu_online_map); 1059 else 1060 cpumask_clear_cpu(cpu, &cpu_online_map); 1061 } 1062 1063 static inline void set_cpu_active(unsigned int cpu, bool active) 1064 { 1065 if (active) 1066 cpumask_set_cpu(cpu, &cpu_active_map); 1067 else 1068 cpumask_clear_cpu(cpu, &cpu_active_map); 1069 } 1070 1071 static inline void init_cpu_present(const struct cpumask *src) 1072 { 1073 cpumask_copy(&cpu_present_map, src); 1074 } 1075 1076 static inline void init_cpu_possible(const struct cpumask *src) 1077 { 1078 cpumask_copy(&cpu_possible_map, src); 1079 } 1080 1081 static inline void init_cpu_online(const struct cpumask *src) 1082 { 1083 cpumask_copy(&cpu_online_map, src); 1084 } 1085 #endif /* __LINUX_CPUMASK_H */ 1086