valarray_array.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698
  1. // The template and inlines for the -*- C++ -*- internal _Array helper class.
  2. // Copyright (C) 1997-2018 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file bits/valarray_array.h
  21. * This is an internal header file, included by other library headers.
  22. * Do not attempt to use it directly. @headername{valarray}
  23. */
  24. // Written by Gabriel Dos Reis <Gabriel.Dos-Reis@DPTMaths.ENS-Cachan.Fr>
  25. #ifndef _VALARRAY_ARRAY_H
  26. #define _VALARRAY_ARRAY_H 1
  27. #pragma GCC system_header
  28. #include <bits/c++config.h>
  29. #include <bits/cpp_type_traits.h>
  30. #include <cstdlib>
  31. #include <new>
  32. namespace std _GLIBCXX_VISIBILITY(default)
  33. {
  34. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  35. //
  36. // Helper functions on raw pointers
  37. //
  38. // We get memory by the old fashion way
  39. inline void*
  40. __valarray_get_memory(size_t __n)
  41. { return operator new(__n); }
  42. template<typename _Tp>
  43. inline _Tp*__restrict__
  44. __valarray_get_storage(size_t __n)
  45. {
  46. return static_cast<_Tp*__restrict__>
  47. (std::__valarray_get_memory(__n * sizeof(_Tp)));
  48. }
  49. // Return memory to the system
  50. inline void
  51. __valarray_release_memory(void* __p)
  52. { operator delete(__p); }
  53. // Turn a raw-memory into an array of _Tp filled with _Tp()
  54. // This is required in 'valarray<T> v(n);'
  55. template<typename _Tp, bool>
  56. struct _Array_default_ctor
  57. {
  58. // Please note that this isn't exception safe. But
  59. // valarrays aren't required to be exception safe.
  60. inline static void
  61. _S_do_it(_Tp* __b, _Tp* __e)
  62. {
  63. while (__b != __e)
  64. new(__b++) _Tp();
  65. }
  66. };
  67. template<typename _Tp>
  68. struct _Array_default_ctor<_Tp, true>
  69. {
  70. // For fundamental types, it suffices to say 'memset()'
  71. inline static void
  72. _S_do_it(_Tp* __b, _Tp* __e)
  73. { __builtin_memset(__b, 0, (__e - __b) * sizeof(_Tp)); }
  74. };
  75. template<typename _Tp>
  76. inline void
  77. __valarray_default_construct(_Tp* __b, _Tp* __e)
  78. {
  79. _Array_default_ctor<_Tp, __is_scalar<_Tp>::__value>::_S_do_it(__b, __e);
  80. }
  81. // Turn a raw-memory into an array of _Tp filled with __t
  82. // This is the required in valarray<T> v(n, t). Also
  83. // used in valarray<>::resize().
  84. template<typename _Tp, bool>
  85. struct _Array_init_ctor
  86. {
  87. // Please note that this isn't exception safe. But
  88. // valarrays aren't required to be exception safe.
  89. inline static void
  90. _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t)
  91. {
  92. while (__b != __e)
  93. new(__b++) _Tp(__t);
  94. }
  95. };
  96. template<typename _Tp>
  97. struct _Array_init_ctor<_Tp, true>
  98. {
  99. inline static void
  100. _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t)
  101. {
  102. while (__b != __e)
  103. *__b++ = __t;
  104. }
  105. };
  106. template<typename _Tp>
  107. inline void
  108. __valarray_fill_construct(_Tp* __b, _Tp* __e, const _Tp __t)
  109. {
  110. _Array_init_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __t);
  111. }
  112. //
  113. // copy-construct raw array [__o, *) from plain array [__b, __e)
  114. // We can't just say 'memcpy()'
  115. //
  116. template<typename _Tp, bool>
  117. struct _Array_copy_ctor
  118. {
  119. // Please note that this isn't exception safe. But
  120. // valarrays aren't required to be exception safe.
  121. inline static void
  122. _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o)
  123. {
  124. while (__b != __e)
  125. new(__o++) _Tp(*__b++);
  126. }
  127. };
  128. template<typename _Tp>
  129. struct _Array_copy_ctor<_Tp, true>
  130. {
  131. inline static void
  132. _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o)
  133. {
  134. if (__b)
  135. __builtin_memcpy(__o, __b, (__e - __b) * sizeof(_Tp));
  136. }
  137. };
  138. template<typename _Tp>
  139. inline void
  140. __valarray_copy_construct(const _Tp* __b, const _Tp* __e,
  141. _Tp* __restrict__ __o)
  142. {
  143. _Array_copy_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __o);
  144. }
  145. // copy-construct raw array [__o, *) from strided array __a[<__n : __s>]
  146. template<typename _Tp>
  147. inline void
  148. __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n,
  149. size_t __s, _Tp* __restrict__ __o)
  150. {
  151. if (__is_trivial(_Tp))
  152. while (__n--)
  153. {
  154. *__o++ = *__a;
  155. __a += __s;
  156. }
  157. else
  158. while (__n--)
  159. {
  160. new(__o++) _Tp(*__a);
  161. __a += __s;
  162. }
  163. }
  164. // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]]
  165. template<typename _Tp>
  166. inline void
  167. __valarray_copy_construct (const _Tp* __restrict__ __a,
  168. const size_t* __restrict__ __i,
  169. _Tp* __restrict__ __o, size_t __n)
  170. {
  171. if (__is_trivial(_Tp))
  172. while (__n--)
  173. *__o++ = __a[*__i++];
  174. else
  175. while (__n--)
  176. new (__o++) _Tp(__a[*__i++]);
  177. }
  178. // Do the necessary cleanup when we're done with arrays.
  179. template<typename _Tp>
  180. inline void
  181. __valarray_destroy_elements(_Tp* __b, _Tp* __e)
  182. {
  183. if (!__is_trivial(_Tp))
  184. while (__b != __e)
  185. {
  186. __b->~_Tp();
  187. ++__b;
  188. }
  189. }
  190. // Fill a plain array __a[<__n>] with __t
  191. template<typename _Tp>
  192. inline void
  193. __valarray_fill(_Tp* __restrict__ __a, size_t __n, const _Tp& __t)
  194. {
  195. while (__n--)
  196. *__a++ = __t;
  197. }
  198. // fill strided array __a[<__n-1 : __s>] with __t
  199. template<typename _Tp>
  200. inline void
  201. __valarray_fill(_Tp* __restrict__ __a, size_t __n,
  202. size_t __s, const _Tp& __t)
  203. {
  204. for (size_t __i = 0; __i < __n; ++__i, __a += __s)
  205. *__a = __t;
  206. }
  207. // fill indirect array __a[__i[<__n>]] with __i
  208. template<typename _Tp>
  209. inline void
  210. __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i,
  211. size_t __n, const _Tp& __t)
  212. {
  213. for (size_t __j = 0; __j < __n; ++__j, ++__i)
  214. __a[*__i] = __t;
  215. }
  216. // copy plain array __a[<__n>] in __b[<__n>]
  217. // For non-fundamental types, it is wrong to say 'memcpy()'
  218. template<typename _Tp, bool>
  219. struct _Array_copier
  220. {
  221. inline static void
  222. _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
  223. {
  224. while(__n--)
  225. *__b++ = *__a++;
  226. }
  227. };
  228. template<typename _Tp>
  229. struct _Array_copier<_Tp, true>
  230. {
  231. inline static void
  232. _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
  233. {
  234. if (__n != 0)
  235. __builtin_memcpy(__b, __a, __n * sizeof (_Tp));
  236. }
  237. };
  238. // Copy a plain array __a[<__n>] into a play array __b[<>]
  239. template<typename _Tp>
  240. inline void
  241. __valarray_copy(const _Tp* __restrict__ __a, size_t __n,
  242. _Tp* __restrict__ __b)
  243. {
  244. _Array_copier<_Tp, __is_trivial(_Tp)>::_S_do_it(__a, __n, __b);
  245. }
  246. // Copy strided array __a[<__n : __s>] in plain __b[<__n>]
  247. template<typename _Tp>
  248. inline void
  249. __valarray_copy(const _Tp* __restrict__ __a, size_t __n, size_t __s,
  250. _Tp* __restrict__ __b)
  251. {
  252. for (size_t __i = 0; __i < __n; ++__i, ++__b, __a += __s)
  253. *__b = *__a;
  254. }
  255. // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>]
  256. template<typename _Tp>
  257. inline void
  258. __valarray_copy(const _Tp* __restrict__ __a, _Tp* __restrict__ __b,
  259. size_t __n, size_t __s)
  260. {
  261. for (size_t __i = 0; __i < __n; ++__i, ++__a, __b += __s)
  262. *__b = *__a;
  263. }
  264. // Copy strided array __src[<__n : __s1>] into another
  265. // strided array __dst[< : __s2>]. Their sizes must match.
  266. template<typename _Tp>
  267. inline void
  268. __valarray_copy(const _Tp* __restrict__ __src, size_t __n, size_t __s1,
  269. _Tp* __restrict__ __dst, size_t __s2)
  270. {
  271. for (size_t __i = 0; __i < __n; ++__i)
  272. __dst[__i * __s2] = __src[__i * __s1];
  273. }
  274. // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>]
  275. template<typename _Tp>
  276. inline void
  277. __valarray_copy(const _Tp* __restrict__ __a,
  278. const size_t* __restrict__ __i,
  279. _Tp* __restrict__ __b, size_t __n)
  280. {
  281. for (size_t __j = 0; __j < __n; ++__j, ++__b, ++__i)
  282. *__b = __a[*__i];
  283. }
  284. // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]]
  285. template<typename _Tp>
  286. inline void
  287. __valarray_copy(const _Tp* __restrict__ __a, size_t __n,
  288. _Tp* __restrict__ __b, const size_t* __restrict__ __i)
  289. {
  290. for (size_t __j = 0; __j < __n; ++__j, ++__a, ++__i)
  291. __b[*__i] = *__a;
  292. }
  293. // Copy the __n first elements of an indexed array __src[<__i>] into
  294. // another indexed array __dst[<__j>].
  295. template<typename _Tp>
  296. inline void
  297. __valarray_copy(const _Tp* __restrict__ __src, size_t __n,
  298. const size_t* __restrict__ __i,
  299. _Tp* __restrict__ __dst, const size_t* __restrict__ __j)
  300. {
  301. for (size_t __k = 0; __k < __n; ++__k)
  302. __dst[*__j++] = __src[*__i++];
  303. }
  304. //
  305. // Compute the sum of elements in range [__f, __l) which must not be empty.
  306. // This is a naive algorithm. It suffers from cancelling.
  307. // In the future try to specialize for _Tp = float, double, long double
  308. // using a more accurate algorithm.
  309. //
  310. template<typename _Tp>
  311. inline _Tp
  312. __valarray_sum(const _Tp* __f, const _Tp* __l)
  313. {
  314. _Tp __r = *__f++;
  315. while (__f != __l)
  316. __r += *__f++;
  317. return __r;
  318. }
  319. // Compute the product of all elements in range [__f, __l)
  320. template<typename _Tp>
  321. inline _Tp
  322. __valarray_product(const _Tp* __f, const _Tp* __l)
  323. {
  324. _Tp __r = _Tp(1);
  325. while (__f != __l)
  326. __r = __r * *__f++;
  327. return __r;
  328. }
  329. // Compute the min/max of an array-expression
  330. template<typename _Ta>
  331. inline typename _Ta::value_type
  332. __valarray_min(const _Ta& __a)
  333. {
  334. size_t __s = __a.size();
  335. typedef typename _Ta::value_type _Value_type;
  336. _Value_type __r = __s == 0 ? _Value_type() : __a[0];
  337. for (size_t __i = 1; __i < __s; ++__i)
  338. {
  339. _Value_type __t = __a[__i];
  340. if (__t < __r)
  341. __r = __t;
  342. }
  343. return __r;
  344. }
  345. template<typename _Ta>
  346. inline typename _Ta::value_type
  347. __valarray_max(const _Ta& __a)
  348. {
  349. size_t __s = __a.size();
  350. typedef typename _Ta::value_type _Value_type;
  351. _Value_type __r = __s == 0 ? _Value_type() : __a[0];
  352. for (size_t __i = 1; __i < __s; ++__i)
  353. {
  354. _Value_type __t = __a[__i];
  355. if (__t > __r)
  356. __r = __t;
  357. }
  358. return __r;
  359. }
  360. //
  361. // Helper class _Array, first layer of valarray abstraction.
  362. // All operations on valarray should be forwarded to this class
  363. // whenever possible. -- gdr
  364. //
  365. template<typename _Tp>
  366. struct _Array
  367. {
  368. explicit _Array(size_t);
  369. explicit _Array(_Tp* const __restrict__);
  370. explicit _Array(const valarray<_Tp>&);
  371. _Array(const _Tp* __restrict__, size_t);
  372. _Tp* begin() const;
  373. _Tp* const __restrict__ _M_data;
  374. };
  375. // Copy-construct plain array __b[<__n>] from indexed array __a[__i[<__n>]]
  376. template<typename _Tp>
  377. inline void
  378. __valarray_copy_construct(_Array<_Tp> __a, _Array<size_t> __i,
  379. _Array<_Tp> __b, size_t __n)
  380. { std::__valarray_copy_construct(__a._M_data, __i._M_data,
  381. __b._M_data, __n); }
  382. // Copy-construct plain array __b[<__n>] from strided array __a[<__n : __s>]
  383. template<typename _Tp>
  384. inline void
  385. __valarray_copy_construct(_Array<_Tp> __a, size_t __n, size_t __s,
  386. _Array<_Tp> __b)
  387. { std::__valarray_copy_construct(__a._M_data, __n, __s, __b._M_data); }
  388. template<typename _Tp>
  389. inline void
  390. __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t)
  391. { std::__valarray_fill(__a._M_data, __n, __t); }
  392. template<typename _Tp>
  393. inline void
  394. __valarray_fill(_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t)
  395. { std::__valarray_fill(__a._M_data, __n, __s, __t); }
  396. template<typename _Tp>
  397. inline void
  398. __valarray_fill(_Array<_Tp> __a, _Array<size_t> __i,
  399. size_t __n, const _Tp& __t)
  400. { std::__valarray_fill(__a._M_data, __i._M_data, __n, __t); }
  401. // Copy a plain array __a[<__n>] into a play array __b[<>]
  402. template<typename _Tp>
  403. inline void
  404. __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b)
  405. { std::__valarray_copy(__a._M_data, __n, __b._M_data); }
  406. // Copy strided array __a[<__n : __s>] in plain __b[<__n>]
  407. template<typename _Tp>
  408. inline void
  409. __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b)
  410. { std::__valarray_copy(__a._M_data, __n, __s, __b._M_data); }
  411. // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>]
  412. template<typename _Tp>
  413. inline void
  414. __valarray_copy(_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s)
  415. { __valarray_copy(__a._M_data, __b._M_data, __n, __s); }
  416. // Copy strided array __src[<__n : __s1>] into another
  417. // strided array __dst[< : __s2>]. Their sizes must match.
  418. template<typename _Tp>
  419. inline void
  420. __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s1,
  421. _Array<_Tp> __b, size_t __s2)
  422. { std::__valarray_copy(__a._M_data, __n, __s1, __b._M_data, __s2); }
  423. // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>]
  424. template<typename _Tp>
  425. inline void
  426. __valarray_copy(_Array<_Tp> __a, _Array<size_t> __i,
  427. _Array<_Tp> __b, size_t __n)
  428. { std::__valarray_copy(__a._M_data, __i._M_data, __b._M_data, __n); }
  429. // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]]
  430. template<typename _Tp>
  431. inline void
  432. __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b,
  433. _Array<size_t> __i)
  434. { std::__valarray_copy(__a._M_data, __n, __b._M_data, __i._M_data); }
  435. // Copy the __n first elements of an indexed array __src[<__i>] into
  436. // another indexed array __dst[<__j>].
  437. template<typename _Tp>
  438. inline void
  439. __valarray_copy(_Array<_Tp> __src, size_t __n, _Array<size_t> __i,
  440. _Array<_Tp> __dst, _Array<size_t> __j)
  441. {
  442. std::__valarray_copy(__src._M_data, __n, __i._M_data,
  443. __dst._M_data, __j._M_data);
  444. }
  445. template<typename _Tp>
  446. inline
  447. _Array<_Tp>::_Array(size_t __n)
  448. : _M_data(__valarray_get_storage<_Tp>(__n))
  449. { std::__valarray_default_construct(_M_data, _M_data + __n); }
  450. template<typename _Tp>
  451. inline
  452. _Array<_Tp>::_Array(_Tp* const __restrict__ __p)
  453. : _M_data (__p) {}
  454. template<typename _Tp>
  455. inline
  456. _Array<_Tp>::_Array(const valarray<_Tp>& __v)
  457. : _M_data (__v._M_data) {}
  458. template<typename _Tp>
  459. inline
  460. _Array<_Tp>::_Array(const _Tp* __restrict__ __b, size_t __s)
  461. : _M_data(__valarray_get_storage<_Tp>(__s))
  462. { std::__valarray_copy_construct(__b, __s, _M_data); }
  463. template<typename _Tp>
  464. inline _Tp*
  465. _Array<_Tp>::begin () const
  466. { return _M_data; }
  467. #define _DEFINE_ARRAY_FUNCTION(_Op, _Name) \
  468. template<typename _Tp> \
  469. inline void \
  470. _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, const _Tp& __t) \
  471. { \
  472. for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; ++__p) \
  473. *__p _Op##= __t; \
  474. } \
  475. \
  476. template<typename _Tp> \
  477. inline void \
  478. _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \
  479. { \
  480. _Tp* __p = __a._M_data; \
  481. for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; ++__p, ++__q) \
  482. *__p _Op##= *__q; \
  483. } \
  484. \
  485. template<typename _Tp, class _Dom> \
  486. void \
  487. _Array_augmented_##_Name(_Array<_Tp> __a, \
  488. const _Expr<_Dom, _Tp>& __e, size_t __n) \
  489. { \
  490. _Tp* __p(__a._M_data); \
  491. for (size_t __i = 0; __i < __n; ++__i, ++__p) \
  492. *__p _Op##= __e[__i]; \
  493. } \
  494. \
  495. template<typename _Tp> \
  496. inline void \
  497. _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, size_t __s, \
  498. _Array<_Tp> __b) \
  499. { \
  500. _Tp* __q(__b._M_data); \
  501. for (_Tp* __p = __a._M_data; __p < __a._M_data + __s * __n; \
  502. __p += __s, ++__q) \
  503. *__p _Op##= *__q; \
  504. } \
  505. \
  506. template<typename _Tp> \
  507. inline void \
  508. _Array_augmented_##_Name(_Array<_Tp> __a, _Array<_Tp> __b, \
  509. size_t __n, size_t __s) \
  510. { \
  511. _Tp* __q(__b._M_data); \
  512. for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \
  513. ++__p, __q += __s) \
  514. *__p _Op##= *__q; \
  515. } \
  516. \
  517. template<typename _Tp, class _Dom> \
  518. void \
  519. _Array_augmented_##_Name(_Array<_Tp> __a, size_t __s, \
  520. const _Expr<_Dom, _Tp>& __e, size_t __n) \
  521. { \
  522. _Tp* __p(__a._M_data); \
  523. for (size_t __i = 0; __i < __n; ++__i, __p += __s) \
  524. *__p _Op##= __e[__i]; \
  525. } \
  526. \
  527. template<typename _Tp> \
  528. inline void \
  529. _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \
  530. _Array<_Tp> __b, size_t __n) \
  531. { \
  532. _Tp* __q(__b._M_data); \
  533. for (size_t* __j = __i._M_data; __j < __i._M_data + __n; \
  534. ++__j, ++__q) \
  535. __a._M_data[*__j] _Op##= *__q; \
  536. } \
  537. \
  538. template<typename _Tp> \
  539. inline void \
  540. _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \
  541. _Array<_Tp> __b, _Array<size_t> __i) \
  542. { \
  543. _Tp* __p(__a._M_data); \
  544. for (size_t* __j = __i._M_data; __j<__i._M_data + __n; \
  545. ++__j, ++__p) \
  546. *__p _Op##= __b._M_data[*__j]; \
  547. } \
  548. \
  549. template<typename _Tp, class _Dom> \
  550. void \
  551. _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \
  552. const _Expr<_Dom, _Tp>& __e, size_t __n) \
  553. { \
  554. size_t* __j(__i._M_data); \
  555. for (size_t __k = 0; __k<__n; ++__k, ++__j) \
  556. __a._M_data[*__j] _Op##= __e[__k]; \
  557. } \
  558. \
  559. template<typename _Tp> \
  560. void \
  561. _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \
  562. _Array<_Tp> __b, size_t __n) \
  563. { \
  564. bool* __ok(__m._M_data); \
  565. _Tp* __p(__a._M_data); \
  566. for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; \
  567. ++__q, ++__ok, ++__p) \
  568. { \
  569. while (! *__ok) \
  570. { \
  571. ++__ok; \
  572. ++__p; \
  573. } \
  574. *__p _Op##= *__q; \
  575. } \
  576. } \
  577. \
  578. template<typename _Tp> \
  579. void \
  580. _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \
  581. _Array<_Tp> __b, _Array<bool> __m) \
  582. { \
  583. bool* __ok(__m._M_data); \
  584. _Tp* __q(__b._M_data); \
  585. for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \
  586. ++__p, ++__ok, ++__q) \
  587. { \
  588. while (! *__ok) \
  589. { \
  590. ++__ok; \
  591. ++__q; \
  592. } \
  593. *__p _Op##= *__q; \
  594. } \
  595. } \
  596. \
  597. template<typename _Tp, class _Dom> \
  598. void \
  599. _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \
  600. const _Expr<_Dom, _Tp>& __e, size_t __n) \
  601. { \
  602. bool* __ok(__m._M_data); \
  603. _Tp* __p(__a._M_data); \
  604. for (size_t __i = 0; __i < __n; ++__i, ++__ok, ++__p) \
  605. { \
  606. while (! *__ok) \
  607. { \
  608. ++__ok; \
  609. ++__p; \
  610. } \
  611. *__p _Op##= __e[__i]; \
  612. } \
  613. }
  614. _DEFINE_ARRAY_FUNCTION(+, __plus)
  615. _DEFINE_ARRAY_FUNCTION(-, __minus)
  616. _DEFINE_ARRAY_FUNCTION(*, __multiplies)
  617. _DEFINE_ARRAY_FUNCTION(/, __divides)
  618. _DEFINE_ARRAY_FUNCTION(%, __modulus)
  619. _DEFINE_ARRAY_FUNCTION(^, __bitwise_xor)
  620. _DEFINE_ARRAY_FUNCTION(|, __bitwise_or)
  621. _DEFINE_ARRAY_FUNCTION(&, __bitwise_and)
  622. _DEFINE_ARRAY_FUNCTION(<<, __shift_left)
  623. _DEFINE_ARRAY_FUNCTION(>>, __shift_right)
  624. #undef _DEFINE_ARRAY_FUNCTION
  625. _GLIBCXX_END_NAMESPACE_VERSION
  626. } // namespace
  627. # include <bits/valarray_array.tcc>
  628. #endif /* _ARRAY_H */