1 // The template and inlines for the -*- C++ -*- internal _Array helper class. 2 3 // Copyright (C) 1997, 1998, 1999, 2000, 2003 4 // Free Software Foundation, Inc. 5 // 6 // This file is part of the GNU ISO C++ Library. This library is free 7 // software; you can redistribute it and/or modify it under the 8 // terms of the GNU General Public License as published by the 9 // Free Software Foundation; either version 2, or (at your option) 10 // any later version. 11 12 // This library is distributed in the hope that it will be useful, 13 // but WITHOUT ANY WARRANTY; without even the implied warranty of 14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 // GNU General Public License for more details. 16 17 // You should have received a copy of the GNU General Public License along 18 // with this library; see the file COPYING. If not, write to the Free 19 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, 20 // USA. 21 22 // As a special exception, you may use this file as part of a free software 23 // library without restriction. Specifically, if other files instantiate 24 // templates or use macros or inline functions from this file, or you compile 25 // this file and link it with other files to produce an executable, this 26 // file does not by itself cause the resulting executable to be covered by 27 // the GNU General Public License. This exception does not however 28 // invalidate any other reasons why the executable file might be covered by 29 // the GNU General Public License. 30 31 // Written by Gabriel Dos Reis <[email protected]> 32 33 /** @file valarray_array.h 34 * This is an internal header file, included by other library headers. 35 * You should not attempt to use it directly. 36 */ 37 38 #ifndef _VALARRAY_ARRAY_H 39 #define _VALARRAY_ARRAY_H 1 40 41 #pragma GCC system_header 42 43 #include <bits/c++config.h> 44 #include <bits/cpp_type_traits.h> 45 #include <cstdlib> 46 #include <cstring> 47 #include <new> 48 49 namespace std 50 { 51 // 52 // Helper functions on raw pointers 53 // 54 55 // We get memory by the old fashion way 56 inline void* 57 __valarray_get_memory(size_t __n) 58 { return operator new(__n); } 59 60 template<typename _Tp> 61 inline _Tp*__restrict__ 62 __valarray_get_storage(size_t __n) 63 { 64 return static_cast<_Tp*__restrict__> 65 (std::__valarray_get_memory(__n * sizeof(_Tp))); 66 } 67 68 // Return memory to the system 69 inline void 70 __valarray_release_memory(void* __p) 71 { operator delete(__p); } 72 73 // Turn a raw-memory into an array of _Tp filled with _Tp() 74 // This is required in 'valarray<T> v(n);' 75 template<typename _Tp, bool> 76 struct _Array_default_ctor 77 { 78 // Please note that this isn't exception safe. But 79 // valarrays aren't required to be exception safe. 80 inline static void 81 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 82 { while (__b != __e) new(__b++) _Tp(); } 83 }; 84 85 template<typename _Tp> 86 struct _Array_default_ctor<_Tp, true> 87 { 88 // For fundamental types, it suffices to say 'memset()' 89 inline static void 90 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 91 { std::memset(__b, 0, (__e - __b)*sizeof(_Tp)); } 92 }; 93 94 template<typename _Tp> 95 inline void 96 __valarray_default_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 97 { 98 _Array_default_ctor<_Tp, __is_fundamental<_Tp>::_M_type>:: 99 _S_do_it(__b, __e); 100 } 101 102 // Turn a raw-memory into an array of _Tp filled with __t 103 // This is the required in valarray<T> v(n, t). Also 104 // used in valarray<>::resize(). 105 template<typename _Tp, bool> 106 struct _Array_init_ctor 107 { 108 // Please note that this isn't exception safe. But 109 // valarrays aren't required to be exception safe. 110 inline static void 111 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t) 112 { while (__b != __e) new(__b++) _Tp(__t); } 113 }; 114 115 template<typename _Tp> 116 struct _Array_init_ctor<_Tp, true> 117 { 118 inline static void 119 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t) 120 { while (__b != __e) *__b++ = __t; } 121 }; 122 123 template<typename _Tp> 124 inline void 125 __valarray_fill_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e, 126 const _Tp __t) 127 { 128 _Array_init_ctor<_Tp, __is_fundamental<_Tp>::_M_type>:: 129 _S_do_it(__b, __e, __t); 130 } 131 132 // 133 // copy-construct raw array [__o, *) from plain array [__b, __e) 134 // We can't just say 'memcpy()' 135 // 136 template<typename _Tp, bool> 137 struct _Array_copy_ctor 138 { 139 // Please note that this isn't exception safe. But 140 // valarrays aren't required to be exception safe. 141 inline static void 142 _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e, 143 _Tp* __restrict__ __o) 144 { while (__b != __e) new(__o++) _Tp(*__b++); } 145 }; 146 147 template<typename _Tp> 148 struct _Array_copy_ctor<_Tp, true> 149 { 150 inline static void 151 _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e, 152 _Tp* __restrict__ __o) 153 { std::memcpy(__o, __b, (__e - __b)*sizeof(_Tp)); } 154 }; 155 156 template<typename _Tp> 157 inline void 158 __valarray_copy_construct(const _Tp* __restrict__ __b, 159 const _Tp* __restrict__ __e, 160 _Tp* __restrict__ __o) 161 { 162 _Array_copy_ctor<_Tp, __is_fundamental<_Tp>::_M_type>:: 163 _S_do_it(__b, __e, __o); 164 } 165 166 // copy-construct raw array [__o, *) from strided array __a[<__n : __s>] 167 template<typename _Tp> 168 inline void 169 __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n, 170 size_t __s, _Tp* __restrict__ __o) 171 { 172 if (__is_fundamental<_Tp>::_M_type) 173 while (__n--) { *__o++ = *__a; __a += __s; } 174 else 175 while (__n--) { new(__o++) _Tp(*__a); __a += __s; } 176 } 177 178 // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]] 179 template<typename _Tp> 180 inline void 181 __valarray_copy_construct (const _Tp* __restrict__ __a, 182 const size_t* __restrict__ __i, 183 _Tp* __restrict__ __o, size_t __n) 184 { 185 if (__is_fundamental<_Tp>::_M_type) 186 while (__n--) *__o++ = __a[*__i++]; 187 else 188 while (__n--) new (__o++) _Tp(__a[*__i++]); 189 } 190 191 // Do the necessary cleanup when we're done with arrays. 192 template<typename _Tp> 193 inline void 194 __valarray_destroy_elements(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 195 { 196 if (!__is_fundamental<_Tp>::_M_type) 197 while (__b != __e) { __b->~_Tp(); ++__b; } 198 } 199 200 // Fill a plain array __a[<__n>] with __t 201 template<typename _Tp> 202 inline void 203 __valarray_fill (_Tp* __restrict__ __a, size_t __n, const _Tp& __t) 204 { while (__n--) *__a++ = __t; } 205 206 // fill strided array __a[<__n-1 : __s>] with __t 207 template<typename _Tp> 208 inline void 209 __valarray_fill (_Tp* __restrict__ __a, size_t __n, 210 size_t __s, const _Tp& __t) 211 { for (size_t __i=0; __i<__n; ++__i, __a+=__s) *__a = __t; } 212 213 // fill indir ect array __a[__i[<__n>]] with __i 214 template<typename _Tp> 215 inline void 216 __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i, 217 size_t __n, const _Tp& __t) 218 { for (size_t __j=0; __j<__n; ++__j, ++__i) __a[*__i] = __t; } 219 220 // copy plain array __a[<__n>] in __b[<__n>] 221 // For non-fundamental types, it is wrong to say 'memcpy()' 222 template<typename _Tp, bool> 223 struct _Array_copier 224 { 225 inline static void 226 _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) 227 { while (__n--) *__b++ = *__a++; } 228 }; 229 230 template<typename _Tp> 231 struct _Array_copier<_Tp, true> 232 { 233 inline static void 234 _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) 235 { std::memcpy (__b, __a, __n * sizeof (_Tp)); } 236 }; 237 238 // Copy a plain array __a[<__n>] into a play array __b[<>] 239 template<typename _Tp> 240 inline void 241 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, 242 _Tp* __restrict__ __b) 243 { 244 _Array_copier<_Tp, __is_fundamental<_Tp>::_M_type>:: 245 _S_do_it(__a, __n, __b); 246 } 247 248 // Copy strided array __a[<__n : __s>] in plain __b[<__n>] 249 template<typename _Tp> 250 inline void 251 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, size_t __s, 252 _Tp* __restrict__ __b) 253 { for (size_t __i=0; __i<__n; ++__i, ++__b, __a += __s) *__b = *__a; } 254 255 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] 256 template<typename _Tp> 257 inline void 258 __valarray_copy(const _Tp* __restrict__ __a, _Tp* __restrict__ __b, 259 size_t __n, size_t __s) 260 { for (size_t __i=0; __i<__n; ++__i, ++__a, __b+=__s) *__b = *__a; } 261 262 // Copy strided array __src[<__n : __s1>] into another 263 // strided array __dst[< : __s2>]. Their sizes must match. 264 template<typename _Tp> 265 inline void 266 __valarray_copy(const _Tp* __restrict__ __src, size_t __n, size_t __s1, 267 _Tp* __restrict__ __dst, size_t __s2) 268 { 269 for (size_t __i = 0; __i < __n; ++__i) 270 __dst[__i * __s2] = __src [ __i * __s1]; 271 } 272 273 274 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] 275 template<typename _Tp> 276 inline void 277 __valarray_copy (const _Tp* __restrict__ __a, 278 const size_t* __restrict__ __i, 279 _Tp* __restrict__ __b, size_t __n) 280 { for (size_t __j=0; __j<__n; ++__j, ++__b, ++__i) *__b = __a[*__i]; } 281 282 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] 283 template<typename _Tp> 284 inline void 285 __valarray_copy (const _Tp* __restrict__ __a, size_t __n, 286 _Tp* __restrict__ __b, const size_t* __restrict__ __i) 287 { for (size_t __j=0; __j<__n; ++__j, ++__a, ++__i) __b[*__i] = *__a; } 288 289 // Copy the __n first elements of an indexed array __src[<__i>] into 290 // another indexed array __dst[<__j>]. 291 template<typename _Tp> 292 inline void 293 __valarray_copy(const _Tp* __restrict__ __src, size_t __n, 294 const size_t* __restrict__ __i, 295 _Tp* __restrict__ __dst, const size_t* __restrict__ __j) 296 { 297 for (size_t __k = 0; __k < __n; ++__k) 298 __dst[*__j++] = __src[*__i++]; 299 } 300 301 // 302 // Compute the sum of elements in range [__f, __l) 303 // This is a naive algorithm. It suffers from cancelling. 304 // In the future try to specialize 305 // for _Tp = float, double, long double using a more accurate 306 // algorithm. 307 // 308 template<typename _Tp> 309 inline _Tp 310 __valarray_sum(const _Tp* __restrict__ __f, const _Tp* __restrict__ __l) 311 { 312 _Tp __r = _Tp(); 313 while (__f != __l) __r += *__f++; 314 return __r; 315 } 316 317 // Compute the product of all elements in range [__f, __l) 318 template<typename _Tp> 319 inline _Tp 320 __valarray_product(const _Tp* __restrict__ __f, 321 const _Tp* __restrict__ __l) 322 { 323 _Tp __r = _Tp(1); 324 while (__f != __l) __r = __r * *__f++; 325 return __r; 326 } 327 328 // Compute the min/max of an array-expression 329 template<typename _Ta> 330 inline typename _Ta::value_type 331 __valarray_min(const _Ta& __a) 332 { 333 size_t __s = __a.size(); 334 typedef typename _Ta::value_type _Value_type; 335 _Value_type __r = __s == 0 ? _Value_type() : __a[0]; 336 for (size_t __i = 1; __i < __s; ++__i) 337 { 338 _Value_type __t = __a[__i]; 339 if (__t < __r) 340 __r = __t; 341 } 342 return __r; 343 } 344 345 template<typename _Ta> 346 inline typename _Ta::value_type 347 __valarray_max(const _Ta& __a) 348 { 349 size_t __s = __a.size(); 350 typedef typename _Ta::value_type _Value_type; 351 _Value_type __r = __s == 0 ? _Value_type() : __a[0]; 352 for (size_t __i = 1; __i < __s; ++__i) 353 { 354 _Value_type __t = __a[__i]; 355 if (__t > __r) 356 __r = __t; 357 } 358 return __r; 359 } 360 361 // 362 // Helper class _Array, first layer of valarray abstraction. 363 // All operations on valarray should be forwarded to this class 364 // whenever possible. -- gdr 365 // 366 367 template<typename _Tp> 368 struct _Array 369 { 370 explicit _Array (size_t); 371 explicit _Array (_Tp* const __restrict__); 372 explicit _Array (const valarray<_Tp>&); 373 _Array (const _Tp* __restrict__, size_t); 374 375 _Tp* begin () const; 376 377 _Tp* const __restrict__ _M_data; 378 }; 379 380 template<typename _Tp> 381 inline void 382 __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t) 383 { std::__valarray_fill (__a._M_data, __n, __t); } 384 385 template<typename _Tp> 386 inline void 387 __valarray_fill (_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t) 388 { std::__valarray_fill (__a._M_data, __n, __s, __t); } 389 390 template<typename _Tp> 391 inline void 392 __valarray_fill (_Array<_Tp> __a, _Array<size_t> __i, 393 size_t __n, const _Tp& __t) 394 { std::__valarray_fill (__a._M_data, __i._M_data, __n, __t); } 395 396 // Copy a plain array __a[<__n>] into a play array __b[<>] 397 template<typename _Tp> 398 inline void 399 __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) 400 { std::__valarray_copy(__a._M_data, __n, __b._M_data); } 401 402 // Copy strided array __a[<__n : __s>] in plain __b[<__n>] 403 template<typename _Tp> 404 inline void 405 __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b) 406 { std::__valarray_copy(__a._M_data, __n, __s, __b._M_data); } 407 408 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] 409 template<typename _Tp> 410 inline void 411 __valarray_copy(_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s) 412 { __valarray_copy(__a._M_data, __b._M_data, __n, __s); } 413 414 // Copy strided array __src[<__n : __s1>] into another 415 // strided array __dst[< : __s2>]. Their sizes must match. 416 template<typename _Tp> 417 inline void 418 __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s1, 419 _Array<_Tp> __b, size_t __s2) 420 { std::__valarray_copy(__a._M_data, __n, __s1, __b._M_data, __s2); } 421 422 423 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] 424 template<typename _Tp> 425 inline void 426 __valarray_copy(_Array<_Tp> __a, _Array<size_t> __i, 427 _Array<_Tp> __b, size_t __n) 428 { std::__valarray_copy(__a._M_data, __i._M_data, __b._M_data, __n); } 429 430 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] 431 template<typename _Tp> 432 inline void 433 __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b, 434 _Array<size_t> __i) 435 { std::__valarray_copy(__a._M_data, __n, __b._M_data, __i._M_data); } 436 437 // Copy the __n first elements of an indexed array __src[<__i>] into 438 // another indexed array __dst[<__j>]. 439 template<typename _Tp> 440 inline void 441 __valarray_copy(_Array<_Tp> __src, size_t __n, _Array<size_t> __i, 442 _Array<_Tp> __dst, _Array<size_t> __j) 443 { 444 std::__valarray_copy(__src._M_data, __n, __i._M_data, 445 __dst._M_data, __j._M_data); 446 } 447 448 template<typename _Tp> 449 inline 450 _Array<_Tp>::_Array (size_t __n) 451 : _M_data(__valarray_get_storage<_Tp>(__n)) 452 { std::__valarray_default_construct(_M_data, _M_data + __n); } 453 454 template<typename _Tp> 455 inline 456 _Array<_Tp>::_Array (_Tp* const __restrict__ __p) : _M_data (__p) {} 457 458 template<typename _Tp> 459 inline _Array<_Tp>::_Array (const valarray<_Tp>& __v) 460 : _M_data (__v._M_data) {} 461 462 template<typename _Tp> 463 inline 464 _Array<_Tp>::_Array (const _Tp* __restrict__ __b, size_t __s) 465 : _M_data(__valarray_get_storage<_Tp>(__s)) 466 { std::__valarray_copy_construct(__b, __s, _M_data); } 467 468 template<typename _Tp> 469 inline _Tp* 470 _Array<_Tp>::begin () const 471 { return _M_data; } 472 473 #define _DEFINE_ARRAY_FUNCTION(_Op, _Name) \ 474 template<typename _Tp> \ 475 inline void \ 476 _Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, const _Tp& __t) \ 477 { \ 478 for (_Tp* __p=__a._M_data; __p<__a._M_data+__n; ++__p) \ 479 *__p _Op##= __t; \ 480 } \ 481 \ 482 template<typename _Tp> \ 483 inline void \ 484 _Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \ 485 { \ 486 _Tp* __p = __a._M_data; \ 487 for (_Tp* __q=__b._M_data; __q<__b._M_data+__n; ++__p, ++__q) \ 488 *__p _Op##= *__q; \ 489 } \ 490 \ 491 template<typename _Tp, class _Dom> \ 492 void \ 493 _Array_augmented_##_Name (_Array<_Tp> __a, \ 494 const _Expr<_Dom,_Tp>& __e, size_t __n) \ 495 { \ 496 _Tp* __p (__a._M_data); \ 497 for (size_t __i=0; __i<__n; ++__i, ++__p) *__p _Op##= __e[__i]; \ 498 } \ 499 \ 500 template<typename _Tp> \ 501 inline void \ 502 _Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, size_t __s, \ 503 _Array<_Tp> __b) \ 504 { \ 505 _Tp* __q (__b._M_data); \ 506 for (_Tp* __p=__a._M_data; __p<__a._M_data+__s*__n; __p+=__s, ++__q) \ 507 *__p _Op##= *__q; \ 508 } \ 509 \ 510 template<typename _Tp> \ 511 inline void \ 512 _Array_augmented_##_Name (_Array<_Tp> __a, _Array<_Tp> __b, \ 513 size_t __n, size_t __s) \ 514 { \ 515 _Tp* __q (__b._M_data); \ 516 for (_Tp* __p=__a._M_data; __p<__a._M_data+__n; ++__p, __q+=__s) \ 517 *__p _Op##= *__q; \ 518 } \ 519 \ 520 template<typename _Tp, class _Dom> \ 521 void \ 522 _Array_augmented_##_Name (_Array<_Tp> __a, size_t __s, \ 523 const _Expr<_Dom,_Tp>& __e, size_t __n) \ 524 { \ 525 _Tp* __p (__a._M_data); \ 526 for (size_t __i=0; __i<__n; ++__i, __p+=__s) *__p _Op##= __e[__i]; \ 527 } \ 528 \ 529 template<typename _Tp> \ 530 inline void \ 531 _Array_augmented_##_Name (_Array<_Tp> __a, _Array<size_t> __i, \ 532 _Array<_Tp> __b, size_t __n) \ 533 { \ 534 _Tp* __q (__b._M_data); \ 535 for (size_t* __j=__i._M_data; __j<__i._M_data+__n; ++__j, ++__q) \ 536 __a._M_data[*__j] _Op##= *__q; \ 537 } \ 538 \ 539 template<typename _Tp> \ 540 inline void \ 541 _Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, \ 542 _Array<_Tp> __b, _Array<size_t> __i) \ 543 { \ 544 _Tp* __p (__a._M_data); \ 545 for (size_t* __j=__i._M_data; __j<__i._M_data+__n; ++__j, ++__p) \ 546 *__p _Op##= __b._M_data[*__j]; \ 547 } \ 548 \ 549 template<typename _Tp, class _Dom> \ 550 void \ 551 _Array_augmented_##_Name (_Array<_Tp> __a, _Array<size_t> __i, \ 552 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 553 { \ 554 size_t* __j (__i._M_data); \ 555 for (size_t __k=0; __k<__n; ++__k, ++__j) \ 556 __a._M_data[*__j] _Op##= __e[__k]; \ 557 } \ 558 \ 559 template<typename _Tp> \ 560 void \ 561 _Array_augmented_##_Name (_Array<_Tp> __a, _Array<bool> __m, \ 562 _Array<_Tp> __b, size_t __n) \ 563 { \ 564 bool* ok (__m._M_data); \ 565 _Tp* __p (__a._M_data); \ 566 for (_Tp* __q=__b._M_data; __q<__b._M_data+__n; ++__q, ++ok, ++__p) { \ 567 while (! *ok) { \ 568 ++ok; \ 569 ++__p; \ 570 } \ 571 *__p _Op##= *__q; \ 572 } \ 573 } \ 574 \ 575 template<typename _Tp> \ 576 void \ 577 _Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, \ 578 _Array<_Tp> __b, _Array<bool> __m) \ 579 { \ 580 bool* ok (__m._M_data); \ 581 _Tp* __q (__b._M_data); \ 582 for (_Tp* __p=__a._M_data; __p<__a._M_data+__n; ++__p, ++ok, ++__q) { \ 583 while (! *ok) { \ 584 ++ok; \ 585 ++__q; \ 586 } \ 587 *__p _Op##= *__q; \ 588 } \ 589 } \ 590 \ 591 template<typename _Tp, class _Dom> \ 592 void \ 593 _Array_augmented_##_Name (_Array<_Tp> __a, _Array<bool> __m, \ 594 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 595 { \ 596 bool* ok(__m._M_data); \ 597 _Tp* __p (__a._M_data); \ 598 for (size_t __i=0; __i<__n; ++__i, ++ok, ++__p) { \ 599 while (! *ok) { \ 600 ++ok; \ 601 ++__p; \ 602 } \ 603 *__p _Op##= __e[__i]; \ 604 } \ 605 } 606 607 _DEFINE_ARRAY_FUNCTION(+, __plus) 608 _DEFINE_ARRAY_FUNCTION(-, __minus) 609 _DEFINE_ARRAY_FUNCTION(*, __multiplies) 610 _DEFINE_ARRAY_FUNCTION(/, __divides) 611 _DEFINE_ARRAY_FUNCTION(%, __modulus) 612 _DEFINE_ARRAY_FUNCTION(^, __bitwise_xor) 613 _DEFINE_ARRAY_FUNCTION(|, __bitwise_or) 614 _DEFINE_ARRAY_FUNCTION(&, __bitwise_and) 615 _DEFINE_ARRAY_FUNCTION(<<, __shift_left) 616 _DEFINE_ARRAY_FUNCTION(>>, __shift_right) 617 618 #undef _DEFINE_VALARRAY_FUNCTION 619 } // namespace std 620 621 #ifndef _GLIBCXX_EXPORT_TEMPLATE 622 # include <bits/valarray_array.tcc> 623 #endif 624 625 #endif /* _ARRAY_H */ 626