The Gaudi Framework  master (181af51f)
Loading...
Searching...
No Matches
instrset.h
Go to the documentation of this file.
1/**************************** instrset.h **********************************
2 * Author: Agner Fog
3 * Date created: 2012-05-30
4 * Last modified: 2020-11-11
5 * Version: 2.01.02
6 * Project: vector class library
7 * Description:
8 * Header file for various compiler-specific tasks as well as common
9 * macros and templates. This file contains:
10 *
11 * > Selection of the supported instruction set
12 * > Defines compiler version macros
13 * > Undefines certain macros that prevent function overloading
14 * > Helper functions that depend on instruction set, compiler, or platform
15 * > Common templates for permute, blend, etc.
16 *
17 * For instructions, see vcl_manual.pdf
18 *
19 * (c) Copyright 2012-2020 Agner Fog.
20 * Apache License version 2.0 or later.
21 ******************************************************************************/
22
23#pragma once
24
25// Allow the use of floating point permute instructions on integer vectors.
26// Some CPU's have an extra latency of 1 or 2 clock cycles for this, but
27// it may still be faster than alternative implementations:
28#define ALLOW_FP_PERMUTE true
29
30// Macro to indicate 64 bit mode
31#if ( defined( _M_AMD64 ) || defined( _M_X64 ) || defined( __amd64 ) ) && !defined( __x86_64__ )
32# define __x86_64__ 1 // There are many different macros for this, decide on only one
33#endif
34
35// The following values of INSTRSET are currently defined:
36// 2: SSE2
37// 3: SSE3
38// 4: SSSE3
39// 5: SSE4.1
40// 6: SSE4.2
41// 7: AVX
42// 8: AVX2
43// 9: AVX512F
44// 10: AVX512BW/DQ/VL
45// In the future, INSTRSET = 11 may include AVX512VBMI and AVX512VBMI2, but this
46// decision cannot be made before the market situation for CPUs with these
47// instruction sets is known (these future instruction set extensions are already
48// used in some VCL functions and tested with an emulator)
49
50// Find instruction set from compiler macros if INSTRSET is not defined.
51// Note: Most of these macros are not defined in Microsoft compilers
52#ifndef INSTRSET
53# if defined( __AVX512VL__ ) && defined( __AVX512BW__ ) && defined( __AVX512DQ__ )
54# define INSTRSET 10
55# elif defined( __AVX512F__ ) || defined( __AVX512__ )
56# define INSTRSET 9
57# elif defined( __AVX2__ )
58# define INSTRSET 8
59# elif defined( __AVX__ )
60# define INSTRSET 7
61# elif defined( __SSE4_2__ )
62# define INSTRSET 6
63# elif defined( __SSE4_1__ )
64# define INSTRSET 5
65# elif defined( __SSSE3__ )
66# define INSTRSET 4
67# elif defined( __SSE3__ )
68# define INSTRSET 3
69# elif defined( __SSE2__ ) || defined( __x86_64__ )
70# define INSTRSET 2
71# elif defined( __SSE__ )
72# define INSTRSET 1
73# elif defined( _M_IX86_FP ) // Defined in MS compiler. 1: SSE, 2: SSE2
74# define INSTRSET _M_IX86_FP
75# else
76# define INSTRSET 0
77# endif // instruction set defines
78#endif // INSTRSET
79
80// Include the appropriate header file for intrinsic functions
81#if INSTRSET > 7 // AVX2 and later
82# if defined( __GNUC__ ) && !defined( __INTEL_COMPILER )
83# include <x86intrin.h> // x86intrin.h includes header files for whatever instruction
84 // sets are specified on the compiler command line, such as:
85 // xopintrin.h, fma4intrin.h
86# else
87# include <immintrin.h> // MS/Intel version of immintrin.h covers AVX and later
88# endif // __GNUC__
89#elif INSTRSET == 7
90# include <immintrin.h> // AVX
91#elif INSTRSET == 6
92# include <nmmintrin.h> // SSE4.2
93#elif INSTRSET == 5
94# include <smmintrin.h> // SSE4.1
95#elif INSTRSET == 4
96# include <tmmintrin.h> // SSSE3
97#elif INSTRSET == 3
98# include <pmmintrin.h> // SSE3
99#elif INSTRSET == 2
100# include <emmintrin.h> // SSE2
101#elif INSTRSET == 1
102# include <xmmintrin.h> // SSE
103#endif // INSTRSET
104
105#if INSTRSET >= 8 && !defined( __FMA__ )
106// Assume that all processors that have AVX2 also have FMA3
107# if defined( __GNUC__ ) && !defined( __INTEL_COMPILER )
108// Prevent error message in g++ and Clang when using FMA intrinsics with avx2:
109# if !defined( DISABLE_WARNING_AVX2_WITHOUT_FMA )
110# pragma message "It is recommended to specify also option -mfma when using -mavx2 or higher"
111# endif
112# elif !defined( __clang__ )
113# define __FMA__ 1
114# endif
115#endif
116
117// AMD instruction sets
118#if defined( __XOP__ ) || defined( __FMA4__ )
119# ifdef __GNUC__
120# include <x86intrin.h> // AMD XOP (Gnu)
121# else
122# include <ammintrin.h> // AMD XOP (Microsoft)
123# endif // __GNUC__
124#elif defined( __SSE4A__ ) // AMD SSE4A
125# include <ammintrin.h>
126#endif // __XOP__
127
128// FMA3 instruction set
129#if defined( __FMA__ ) && ( defined( __GNUC__ ) || defined( __clang__ ) ) && !defined( __INTEL_COMPILER )
130# include <fmaintrin.h>
131#endif // __FMA__
132
133// FMA4 instruction set
134#if defined( __FMA4__ ) && ( defined( __GNUC__ ) || defined( __clang__ ) )
135# include <fma4intrin.h> // must have both x86intrin.h and fma4intrin.h, don't know why
136#endif // __FMA4__
137
138#include <stdint.h> // Define integer types with known size
139#include <stdlib.h> // define abs(int)
140
141#ifdef _MSC_VER // Microsoft compiler or compatible Intel compiler
142# include <intrin.h> // define _BitScanReverse(int), __cpuid(int[4],int), _xgetbv(int)
143#endif // _MSC_VER
144
145// functions in instrset_detect.cpp:
146#ifdef VCL_NAMESPACE
147namespace VCL_NAMESPACE {
148#endif
149 int instrset_detect( void ); // tells which instruction sets are supported
150 bool hasFMA3( void ); // true if FMA3 instructions supported
151 bool hasFMA4( void ); // true if FMA4 instructions supported
152 bool hasXOP( void ); // true if XOP instructions supported
153 bool hasAVX512ER( void ); // true if AVX512ER instructions supported
154 bool hasAVX512VBMI( void ); // true if AVX512VBMI instructions supported
155 bool hasAVX512VBMI2( void ); // true if AVX512VBMI2 instructions supported
156#ifdef VCL_NAMESPACE
157}
158#endif
159
160// functions in physical_processors.cpp:
161int physicalProcessors( int* logical_processors = 0 );
162
163// GCC version
164#if defined( __GNUC__ ) && !defined( GCC_VERSION ) && !defined( __clang__ )
165# define GCC_VERSION ( (__GNUC__)*10000 + (__GNUC_MINOR__)*100 + ( __GNUC_PATCHLEVEL__ ) )
166#endif
167
168// Clang version
169#if defined( __clang__ )
170# define CLANG_VERSION ( (__clang_major__)*10000 + (__clang_minor__)*100 + ( __clang_patchlevel__ ) )
171// Problem: The version number is not consistent across platforms
172// http://llvm.org/bugs/show_bug.cgi?id=12643
173// Apple bug 18746972
174#endif
175
176// Fix problem with non-overloadable macros named min and max in WinDef.h
177#ifdef _MSC_VER
178# if defined( _WINDEF_ ) && defined( min ) && defined( max )
179# undef min
180# undef max
181# endif
182# ifndef NOMINMAX
183# define NOMINMAX
184# endif
185#endif
186
187/* Intel compiler problem:
188The Intel compiler currently cannot compile version 2.00 of VCL. It seems to have
189a problem with constexpr function returns not being constant enough.
190*/
191#if defined( __INTEL_COMPILER ) && __INTEL_COMPILER < 9999
192# error The Intel compiler version 19.00 cannot compile VCL version 2. Use Version 1.xx of VCL instead
193#endif
194
195/* Clang problem:
196The Clang compiler treats the intrinsic vector types __m128, __m128i, and __m128d as identical.
197See the bug report at https://bugs.llvm.org/show_bug.cgi?id=17164
198Additional problem: The version number is not consistent across platforms. The Apple build has
199different version numbers. We have to rely on __apple_build_version__ on the Mac platform:
200http://llvm.org/bugs/show_bug.cgi?id=12643
201We have to make switches here when - hopefully - the error some day has been fixed.
202We need different version checks with and whithout __apple_build_version__
203*/
204#if ( defined( __clang__ ) || defined( __apple_build_version__ ) ) && !defined( __INTEL_COMPILER )
205# define FIX_CLANG_VECTOR_ALIAS_AMBIGUITY
206#endif
207
208#if defined( GCC_VERSION ) && GCC_VERSION < 99999 && !defined( __clang__ )
209# define ZEXT_MISSING // Gcc 7.4.0 does not have _mm256_zextsi128_si256 and similar functions
210#endif
211
212#ifdef VCL_NAMESPACE
213namespace VCL_NAMESPACE {
214#endif
215
216 // Constant for indicating don't care in permute and blend functions.
217 // V_DC is -256 in Vector class library version 1.xx
218 // V_DC can be any value less than -1 in Vector class library version 2.00
219 constexpr int V_DC = -256;
220
221 /*****************************************************************************
222 *
223 * Helper functions that depend on instruction set, compiler, or platform
224 *
225 *****************************************************************************/
226
227 // Define interface to cpuid instruction.
228 // input: functionnumber = leaf (eax), ecxleaf = subleaf(ecx)
229 // output: output[0] = eax, output[1] = ebx, output[2] = ecx, output[3] = edx
230 static inline void cpuid( int output[4], int functionnumber, int ecxleaf = 0 ) {
231#ifdef __x86_64__
232# if defined( __GNUC__ ) || defined( __clang__ ) // use inline assembly, Gnu/AT&T syntax
233 int a, b, c, d;
234 __asm( "cpuid" : "=a"( a ), "=b"( b ), "=c"( c ), "=d"( d ) : "a"( functionnumber ), "c"( ecxleaf ) : );
235 output[0] = a;
236 output[1] = b;
237 output[2] = c;
238 output[3] = d;
239
240# elif defined( _MSC_VER ) // Microsoft compiler, intrin.h included
241 __cpuidex( output, functionnumber, ecxleaf ); // intrinsic function for CPUID
242
243# else // unknown platform. try inline assembly with masm/intel syntax
244 __asm {
245 mov eax, functionnumber
246 mov ecx, ecxleaf
247 cpuid;
248 mov esi, output
249 mov[esi], eax
250 mov[esi + 4], ebx
251 mov[esi + 8], ecx
252 mov[esi + 12], edx
253 }
254# endif // compiler/platform
255#endif // __x86_64__
256 }
257
258// Define popcount function. Gives sum of bits
259#if INSTRSET >= 6 // SSE4.2
260 // popcnt instruction is not officially part of the SSE4.2 instruction set,
261 // but available in all known processors with SSE4.2
262 static inline uint32_t vml_popcnt( uint32_t a ) {
263 return (uint32_t)_mm_popcnt_u32( a ); // Intel intrinsic. Supported by gcc and clang
264 }
265# ifdef __x86_64__
266 static inline int64_t vml_popcnt( uint64_t a ) {
267 return _mm_popcnt_u64( a ); // Intel intrinsic.
268 }
269# else // 32 bit mode
270 static inline int64_t vml_popcnt( uint64_t a ) {
271 return _mm_popcnt_u32( uint32_t( a >> 32 ) ) + _mm_popcnt_u32( uint32_t( a ) );
272 }
273# endif
274#else // no SSE4.2
275static inline uint32_t vml_popcnt( uint32_t a ) {
276 // popcnt instruction not available
277 uint32_t b = a - ( ( a >> 1 ) & 0x55555555 );
278 uint32_t c = ( b & 0x33333333 ) + ( ( b >> 2 ) & 0x33333333 );
279 uint32_t d = ( c + ( c >> 4 ) ) & 0x0F0F0F0F;
280 uint32_t e = d * 0x01010101;
281 return e >> 24;
282}
283
284static inline int32_t vml_popcnt( uint64_t a ) {
285 return vml_popcnt( uint32_t( a >> 32 ) ) + vml_popcnt( uint32_t( a ) );
286}
287
288#endif
289
290// Define bit-scan-forward function. Gives index to lowest set bit
291#if defined( __GNUC__ ) || defined( __clang__ )
292 // gcc and Clang have no bit_scan_forward intrinsic
293# if defined( __clang__ ) // fix clang bug
294 // Clang uses a k register as parameter a when inlined from horizontal_find_first
295 __attribute__( ( noinline ) )
296# endif
297 static uint32_t
298 bit_scan_forward( uint32_t a ) {
299 uint32_t r;
300 __asm( "bsfl %1, %0" : "=r"( r ) : "r"( a ) : );
301 return r;
302 }
303 static inline uint32_t bit_scan_forward( uint64_t a ) {
304 uint32_t lo = uint32_t( a );
305 if ( lo ) return bit_scan_forward( lo );
306 uint32_t hi = uint32_t( a >> 32 );
307 return bit_scan_forward( hi ) + 32;
308 }
309
310#else // other compilers
311static inline uint32_t bit_scan_forward( uint32_t a ) {
312 unsigned long r;
313 _BitScanForward( &r, a ); // defined in intrin.h for MS and Intel compilers
314 return r;
315}
316# ifdef __x86_64__
317static inline uint32_t bit_scan_forward( uint64_t a ) {
318 unsigned long r;
319 _BitScanForward64( &r, a ); // defined in intrin.h for MS and Intel compilers
320 return (uint32_t)r;
321}
322# else
323static inline uint32_t bit_scan_forward( uint64_t a ) {
324 uint32_t lo = uint32_t( a );
325 if ( lo ) return bit_scan_forward( lo );
326 uint32_t hi = uint32_t( a >> 32 );
327 return bit_scan_forward( hi ) + 32;
328}
329# endif
330#endif
331
332// Define bit-scan-reverse function. Gives index to highest set bit = floor(log2(a))
333#if defined( __GNUC__ ) || defined( __clang__ )
334 static inline uint32_t bit_scan_reverse( uint32_t a ) __attribute__( ( pure ) );
335 static inline uint32_t bit_scan_reverse( uint32_t a ) {
336 uint32_t r;
337 __asm( "bsrl %1, %0" : "=r"( r ) : "r"( a ) : );
338 return r;
339 }
340# ifdef __x86_64__
341 static inline uint32_t bit_scan_reverse( uint64_t a ) {
342 uint64_t r;
343 __asm( "bsrq %1, %0" : "=r"( r ) : "r"( a ) : );
344 return r;
345 }
346# else // 32 bit mode
347 static inline uint32_t bit_scan_reverse( uint64_t a ) {
348 uint64_t ahi = a >> 32;
349 if ( ahi == 0 )
350 return bit_scan_reverse( uint32_t( a ) );
351 else
352 return bit_scan_reverse( uint32_t( ahi ) ) + 32;
353 }
354# endif
355#else
356static inline uint32_t bit_scan_reverse( uint32_t a ) {
357 unsigned long r;
358 _BitScanReverse( &r, a ); // defined in intrin.h for MS and Intel compilers
359 return r;
360}
361# ifdef __x86_64__
362static inline uint32_t bit_scan_reverse( uint64_t a ) {
363 unsigned long r;
364 _BitScanReverse64( &r, a ); // defined in intrin.h for MS and Intel compilers
365 return r;
366}
367# else // 32 bit mode
368static inline uint32_t bit_scan_reverse( uint64_t a ) {
369 uint64_t ahi = a >> 32;
370 if ( ahi == 0 )
371 return bit_scan_reverse( uint32_t( a ) );
372 else
373 return bit_scan_reverse( uint32_t( ahi ) ) + 32;
374}
375# endif
376#endif
377
378 // Same function, for compile-time constants
379 constexpr int bit_scan_reverse_const( uint64_t const n ) {
380 if ( n == 0 ) return -1;
381 uint64_t a = n, b = 0, j = 64, k = 0;
382 do {
383 j >>= 1;
384 k = (uint64_t)1 << j;
385 if ( a >= k ) {
386 a >>= j;
387 b += j;
388 }
389 } while ( j > 0 );
390 return int( b );
391 }
392
393 /*****************************************************************************
394 *
395 * Common templates
396 *
397 *****************************************************************************/
398
399 // Template class to represent compile-time integer constant
400 template <int32_t n>
401 class Const_int_t {}; // represent compile-time signed integer constant
402 template <uint32_t n>
403 class Const_uint_t {}; // represent compile-time unsigned integer constant
404#define const_int( n ) ( Const_int_t<n>() ) // n must be compile-time integer constant
405#define const_uint( n ) ( Const_uint_t<n>() ) // n must be compile-time unsigned integer constant
406
407 // template for producing quiet NAN
408 template <class VTYPE>
409 static inline VTYPE nan_vec( uint32_t payload = 0x100 ) {
410 if constexpr ( ( VTYPE::elementtype() & 1 ) != 0 ) { // double
411 union {
412 uint64_t q;
413 double f;
414 } ud;
415 // n is left justified to avoid loss of NAN payload when converting to float
416 ud.q = 0x7FF8000000000000 | uint64_t( payload ) << 29;
417 return VTYPE( ud.f );
418 }
419 // float will be converted to double if necessary
420 union {
421 uint32_t i;
422 float f;
423 } uf;
424 uf.i = 0x7FC00000 | ( payload & 0x003FFFFF );
425 return VTYPE( uf.f );
426 }
427
428 // Test if a parameter is a compile-time constant
429 /* Unfortunately, this works only for macro parameters, not for inline function parameters.
430 I hope that some solution will appear in the future, but for now it appears to be
431 impossible to check if a function parameter is a compile-time constant.
432 This would be useful in operator / and in function pow:
433 #if defined(__GNUC__) || defined (__clang__)
434 #define is_constant(a) __builtin_constant_p(a)
435 #else
436 #define is_constant(a) false
437 #endif
438 */
439
440 /*****************************************************************************
441 *
442 * Helper functions for permute and blend functions
443 *
444 ******************************************************************************
445 Rules for constexpr functions:
446
447 > All variable declarations must include initialization
448
449 > Do not put variable declarations inside a for-clause, e.g. avoid: for (int i=0; ..
450 Instead, you have to declare the loop counter before the for-loop.
451
452 > Do not make constexpr functions that return vector types. This requires type
453 punning with a union, which is not allowed in constexpr functions under C++17.
454 It may be possible under C++20
455
456 *****************************************************************************/
457
458 // Define type for Encapsulated array to use as return type:
459 template <typename T, int N>
460 struct EList {
461 T a[N];
462 };
463
464 // get_inttype: get an integer of a size that matches the element size
465 // of vector class V with the value -1
466 template <typename V>
467 constexpr auto get_inttype() {
468 constexpr int elementsize = sizeof( V ) / V::size(); // size of vector elements
469
470 if constexpr ( elementsize >= 8 ) {
471 return -int64_t( 1 );
472 } else if constexpr ( elementsize >= 4 ) {
473 return int32_t( -1 );
474 } else if constexpr ( elementsize >= 2 ) {
475 return int16_t( -1 );
476 } else {
477 return int8_t( -1 );
478 }
479 }
480
481 // zero_mask: return a compact bit mask mask for zeroing using AVX512 mask.
482 // Parameter a is a reference to a constexpr int array of permutation indexes
483 template <int N>
484 constexpr auto zero_mask( int const ( &a )[N] ) {
485 uint64_t mask = 0;
486 int i = 0;
487
488 for ( i = 0; i < N; i++ ) {
489 if ( a[i] >= 0 ) mask |= uint64_t( 1 ) << i;
490 }
491 if constexpr ( N <= 8 )
492 return uint8_t( mask );
493 else if constexpr ( N <= 16 )
494 return uint16_t( mask );
495 else if constexpr ( N <= 32 )
496 return uint32_t( mask );
497 else
498 return mask;
499 }
500
501 // zero_mask_broad: return a broad byte mask for zeroing.
502 // Parameter a is a reference to a constexpr int array of permutation indexes
503 template <typename V>
504 constexpr auto zero_mask_broad( int const ( &A )[V::size()] ) {
505 constexpr int N = V::size(); // number of vector elements
506 typedef decltype( get_inttype<V>() ) Etype; // element type
507 EList<Etype, N> u = { { 0 } }; // list for return
508 int i = 0;
509 for ( i = 0; i < N; i++ ) { u.a[i] = A[i] >= 0 ? get_inttype<V>() : 0; }
510 return u; // return encapsulated array
511 }
512
513 // make_bit_mask: return a compact mask of bits from a list of N indexes:
514 // B contains options indicating how to gather the mask
515 // bit 0-7 in B indicates which bit in each index to collect
516 // bit 8 = 0x100: set 1 in the lower half of the bit mask if the indicated bit is 1.
517 // bit 8 = 0 : set 1 in the lower half of the bit mask if the indicated bit is 0.
518 // bit 9 = 0x200: set 1 in the upper half of the bit mask if the indicated bit is 1.
519 // bit 9 = 0 : set 1 in the upper half of the bit mask if the indicated bit is 0.
520 // bit 10 = 0x400: set 1 in the bit mask if the corresponding index is -1 or V_DC
521 // Parameter a is a reference to a constexpr int array of permutation indexes
522 template <int N, int B>
523 constexpr uint64_t make_bit_mask( int const ( &a )[N] ) {
524 uint64_t r = 0; // return value
525 uint8_t j = uint8_t( B & 0xFF ); // index to selected bit
526 uint64_t s = 0; // bit number i in r
527 uint64_t f = 0; // 1 if bit not flipped
528 int i = 0;
529 for ( i = 0; i < N; i++ ) {
530 int ix = a[i];
531 if ( ix < 0 ) { // -1 or V_DC
532 s = ( B >> 10 ) & 1;
533 } else {
534 s = ( (uint32_t)ix >> j ) & 1; // extract selected bit
535 if ( i < N / 2 ) {
536 f = ( B >> 8 ) & 1; // lower half
537 } else {
538 f = ( B >> 9 ) & 1; // upper half
539 }
540 s ^= f ^ 1; // flip bit if needed
541 }
542 r |= uint64_t( s ) << i; // set bit in return value
543 }
544 return r;
545 }
546
547 // make_broad_mask: Convert a bit mask m to a broad mask
548 // The return value will be a broad boolean mask with elementsize matching vector class V
549 template <typename V>
550 constexpr auto make_broad_mask( uint64_t const m ) {
551 constexpr int N = V::size(); // number of vector elements
552 typedef decltype( get_inttype<V>() ) Etype; // element type
553 EList<Etype, N> u = { { 0 } }; // list for returning
554 int i = 0;
555 for ( i = 0; i < N; i++ ) { u.a[i] = ( ( m >> i ) & 1 ) != 0 ? get_inttype<V>() : 0; }
556 return u; // return encapsulated array
557 }
558
559 // perm_mask_broad: return a mask for permutation by a vector register index.
560 // Parameter A is a reference to a constexpr int array of permutation indexes
561 template <typename V>
562 constexpr auto perm_mask_broad( int const ( &A )[V::size()] ) {
563 constexpr int N = V::size(); // number of vector elements
564 typedef decltype( get_inttype<V>() ) Etype; // vector element type
565 EList<Etype, N> u = { { 0 } }; // list for returning
566 int i = 0;
567 for ( i = 0; i < N; i++ ) { u.a[i] = Etype( A[i] ); }
568 return u; // return encapsulated array
569 }
570
571 // perm_flags: returns information about how a permute can be implemented.
572 // The return value is composed of these flag bits:
573 const int perm_zeroing = 1; // needs zeroing
574 const int perm_perm = 2; // permutation needed
575 const int perm_allzero = 4; // all is zero or don't care
576 const int perm_largeblock = 8; // fits permute with a larger block size (e.g permute Vec2q instead of Vec4i)
577 const int perm_addz = 0x10; // additional zeroing needed after permute with larger block size or shift
578 const int perm_addz2 = 0x20; // additional zeroing needed after perm_zext, perm_compress, or perm_expand
579 const int perm_cross_lane = 0x40; // permutation crossing 128-bit lanes
580 const int perm_same_pattern = 0x80; // same permute pattern in all 128-bit lanes
581 const int perm_punpckh = 0x100; // permutation pattern fits punpckh instruction
582 const int perm_punpckl = 0x200; // permutation pattern fits punpckl instruction
583 const int perm_rotate =
584 0x400; // permutation pattern fits rotation within lanes. 4 bit count returned in bit perm_rot_count
585 const int perm_shright =
586 0x1000; // permutation pattern fits shift right within lanes. 4 bit count returned in bit perm_rot_count
587 const int perm_shleft =
588 0x2000; // permutation pattern fits shift left within lanes. negative count returned in bit perm_rot_count
589 const int perm_rotate_big =
590 0x4000; // permutation pattern fits rotation across lanes. 6 bit count returned in bit perm_rot_count
591 const int perm_broadcast = 0x8000; // permutation pattern fits broadcast of a single element.
592 const int perm_zext = 0x10000; // permutation pattern fits zero extension
593 const int perm_compress = 0x20000; // permutation pattern fits vpcompress instruction
594 const int perm_expand = 0x40000; // permutation pattern fits vpexpand instruction
595 const int perm_outofrange = 0x10000000; // index out of range
596 const int perm_rot_count = 32; // rotate or shift count is in bits perm_rot_count to perm_rot_count+3
597 const int perm_ipattern =
598 40; // pattern for pshufd is in bit perm_ipattern to perm_ipattern + 7 if perm_same_pattern and elementsize >= 4
599
600 template <typename V>
601 constexpr uint64_t perm_flags( int const ( &a )[V::size()] ) {
602 // a is a reference to a constexpr array of permutation indexes
603 // V is a vector class
604 constexpr int N = V::size(); // number of elements
605 uint64_t r = perm_largeblock | perm_same_pattern | perm_allzero; // return value
606 uint32_t i = 0; // loop counter
607 int j = 0; // loop counter
608 int ix = 0; // index number i
609 const uint32_t nlanes = sizeof( V ) / 16; // number of 128-bit lanes
610 const uint32_t lanesize = N / nlanes; // elements per lane
611 const uint32_t elementsize = sizeof( V ) / N; // size of each vector element
612 uint32_t lane = 0; // current lane
613 uint32_t rot = 999; // rotate left count
614 int32_t broadc = 999; // index to broadcasted element
615 uint32_t patfail = 0; // remember certain patterns that do not fit
616 uint32_t addz2 = 0; // remember certain patterns need extra zeroing
617 int32_t compresslasti = -1; // last index in perm_compress fit
618 int32_t compresslastp = -1; // last position in perm_compress fit
619 int32_t expandlasti = -1; // last index in perm_expand fit
620 int32_t expandlastp = -1; // last position in perm_expand fit
621
622 int lanepattern[lanesize] = { 0 }; // pattern in each lane
623
624 for ( i = 0; i < N; i++ ) { // loop through indexes
625 ix = a[i]; // current index
626 // meaning of ix: -1 = set to zero, V_DC = don't care, non-negative value = permute.
627 if ( ix == -1 ) {
628 r |= perm_zeroing; // zeroing requested
629 } else if ( ix != V_DC && uint32_t( ix ) >= N ) {
630 r |= perm_outofrange; // index out of range
631 }
632 if ( ix >= 0 ) {
633 r &= ~perm_allzero; // not all zero
634 if ( ix != (int)i ) r |= perm_perm; // needs permutation
635 if ( broadc == 999 )
636 broadc = ix; // remember broadcast index
637 else if ( broadc != ix )
638 broadc = 1000; // does not fit broadcast
639 }
640 // check if pattern fits a larger block size:
641 // even indexes must be even, odd indexes must fit the preceding even index + 1
642 if ( ( i & 1 ) == 0 ) { // even index
643 if ( ix >= 0 && ( ix & 1 ) ) r &= ~perm_largeblock; // not even. does not fit larger block size
644 int iy = a[i + 1]; // next odd index
645 if ( iy >= 0 && ( iy & 1 ) == 0 ) r &= ~perm_largeblock; // not odd. does not fit larger block size
646 if ( ix >= 0 && iy >= 0 && iy != ix + 1 ) r &= ~perm_largeblock; // does not fit preceding index + 1
647 if ( ix == -1 && iy >= 0 ) r |= perm_addz; // needs additional zeroing at current block size
648 if ( iy == -1 && ix >= 0 ) r |= perm_addz; // needs additional zeroing at current block size
649 }
650 lane = i / lanesize; // current lane
651 if ( lane == 0 ) { // first lane, or no pattern yet
652 lanepattern[i] = ix; // save pattern
653 }
654 // check if crossing lanes
655 if ( ix >= 0 ) {
656 uint32_t lanei = (uint32_t)ix / lanesize; // source lane
657 if ( lanei != lane ) r |= perm_cross_lane; // crossing lane
658 }
659 // check if same pattern in all lanes
660 if ( lane != 0 && ix >= 0 ) { // not first lane
661 int j1 = i - int( lane * lanesize ); // index into lanepattern
662 int jx = ix - int( lane * lanesize ); // pattern within lane
663 if ( jx < 0 || jx >= (int)lanesize ) r &= ~perm_same_pattern; // source is in another lane
664 if ( lanepattern[j1] < 0 ) {
665 lanepattern[j1] = jx; // pattern not known from previous lane
666 } else {
667 if ( lanepattern[j1] != jx ) r &= ~perm_same_pattern; // not same pattern
668 }
669 }
670 if ( ix >= 0 ) {
671 // check if pattern fits zero extension (perm_zext)
672 if ( uint32_t( ix * 2 ) != i ) {
673 patfail |= 1; // does not fit zero extension
674 }
675 // check if pattern fits compress (perm_compress)
676 if ( ix > compresslasti && ix - compresslasti >= (int)i - compresslastp ) {
677 if ( (int)i - compresslastp > 1 ) addz2 |= 2; // perm_compress may need additional zeroing
678 compresslasti = ix;
679 compresslastp = i;
680 } else {
681 patfail |= 2; // does not fit perm_compress
682 }
683 // check if pattern fits expand (perm_expand)
684 if ( ix > expandlasti && ix - expandlasti <= (int)i - expandlastp ) {
685 if ( ix - expandlasti > 1 ) addz2 |= 4; // perm_expand may need additional zeroing
686 expandlasti = ix;
687 expandlastp = i;
688 } else {
689 patfail |= 4; // does not fit perm_compress
690 }
691 } else if ( ix == -1 ) {
692 if ( ( i & 1 ) == 0 ) addz2 |= 1; // zero extension needs additional zeroing
693 }
694 }
695 if ( !( r & perm_perm ) ) return r; // more checks are superfluous
696
697 if ( !( r & perm_largeblock ) ) r &= ~perm_addz; // remove irrelevant flag
698 if ( r & perm_cross_lane ) r &= ~perm_same_pattern; // remove irrelevant flag
699 if ( ( patfail & 1 ) == 0 ) {
700 r |= perm_zext; // fits zero extension
701 if ( ( addz2 & 1 ) != 0 ) r |= perm_addz2;
702 } else if ( ( patfail & 2 ) == 0 ) {
703 r |= perm_compress; // fits compression
704 if ( ( addz2 & 2 ) != 0 ) { // check if additional zeroing needed
705 for ( j = 0; j < compresslastp; j++ ) {
706 if ( a[j] == -1 ) r |= perm_addz2;
707 }
708 }
709 } else if ( ( patfail & 4 ) == 0 ) {
710 r |= perm_expand; // fits expansion
711 if ( ( addz2 & 4 ) != 0 ) { // check if additional zeroing needed
712 for ( j = 0; j < expandlastp; j++ ) {
713 if ( a[j] == -1 ) r |= perm_addz2;
714 }
715 }
716 }
717
718 if ( r & perm_same_pattern ) {
719 // same pattern in all lanes. check if it fits specific patterns
720 bool fit = true;
721 // fit shift or rotate
722 for ( i = 0; i < lanesize; i++ ) {
723 if ( lanepattern[i] >= 0 ) {
724 uint32_t rot1 = uint32_t( lanepattern[i] + lanesize - i ) % lanesize;
725 if ( rot == 999 ) {
726 rot = rot1;
727 } else { // check if fit
728 if ( rot != rot1 ) fit = false;
729 }
730 }
731 }
732 rot &= lanesize - 1; // prevent out of range values
733 if ( fit ) { // fits rotate, and possibly shift
734 uint64_t rot2 = ( rot * elementsize ) & 0xF; // rotate right count in bytes
735 r |= rot2 << perm_rot_count; // put shift/rotate count in output bit 16-19
736#if INSTRSET >= 4 // SSSE3
737 r |= perm_rotate; // allow palignr
738#endif
739 // fit shift left
740 fit = true;
741 for ( i = 0; i < lanesize - rot; i++ ) { // check if first rot elements are zero or don't care
742 if ( lanepattern[i] >= 0 ) fit = false;
743 }
744 if ( fit ) {
745 r |= perm_shleft;
746 for ( ; i < lanesize; i++ )
747 if ( lanepattern[i] == -1 ) r |= perm_addz; // additional zeroing needed
748 }
749 // fit shift right
750 fit = true;
751 for ( i = lanesize - (uint32_t)rot; i < lanesize;
752 i++ ) { // check if last (lanesize-rot) elements are zero or don't care
753 if ( lanepattern[i] >= 0 ) fit = false;
754 }
755 if ( fit ) {
756 r |= perm_shright;
757 for ( i = 0; i < lanesize - rot; i++ ) {
758 if ( lanepattern[i] == -1 ) r |= perm_addz; // additional zeroing needed
759 }
760 }
761 }
762 // fit punpckhi
763 fit = true;
764 uint32_t j2 = lanesize / 2;
765 for ( i = 0; i < lanesize; i++ ) {
766 if ( lanepattern[i] >= 0 && lanepattern[i] != (int)j2 ) fit = false;
767 if ( ( i & 1 ) != 0 ) j2++;
768 }
769 if ( fit ) r |= perm_punpckh;
770 // fit punpcklo
771 fit = true;
772 j2 = 0;
773 for ( i = 0; i < lanesize; i++ ) {
774 if ( lanepattern[i] >= 0 && lanepattern[i] != (int)j2 ) fit = false;
775 if ( ( i & 1 ) != 0 ) j2++;
776 }
777 if ( fit ) r |= perm_punpckl;
778 // fit pshufd
779 if ( elementsize >= 4 ) {
780 uint64_t p = 0;
781 for ( i = 0; i < lanesize; i++ ) {
782 if ( lanesize == 4 ) {
783 p |= ( lanepattern[i] & 3 ) << 2 * i;
784 } else { // lanesize = 2
785 p |= ( ( lanepattern[i] & 1 ) * 10 + 4 ) << 4 * i;
786 }
787 }
788 r |= p << perm_ipattern;
789 }
790 }
791#if INSTRSET >= 7
792 else { // not same pattern in all lanes
793 if constexpr ( nlanes > 1 ) { // Try if it fits big rotate
794 for ( i = 0; i < N; i++ ) {
795 ix = a[i];
796 if ( ix >= 0 ) {
797 uint32_t rot2 = ( ix + N - i ) % N; // rotate count
798 if ( rot == 999 ) {
799 rot = rot2; // save rotate count
800 } else if ( rot != rot2 ) {
801 rot = 1000;
802 break; // does not fit big rotate
803 }
804 }
805 }
806 if ( rot < N ) { // fits big rotate
807 r |= perm_rotate_big | (uint64_t)rot << perm_rot_count;
808 }
809 }
810 }
811#endif
812 if ( broadc < 999 && ( r & ( perm_rotate | perm_shright | perm_shleft | perm_rotate_big ) ) == 0 ) {
813 r |= perm_broadcast | (uint64_t)broadc << perm_rot_count; // fits broadcast
814 }
815 return r;
816 }
817
818 // compress_mask: returns a bit mask to use for compression instruction.
819 // It is presupposed that perm_flags indicates perm_compress.
820 // Additional zeroing is needed if perm_flags indicates perm_addz2
821 template <int N>
822 constexpr uint64_t compress_mask( int const ( &a )[N] ) {
823 // a is a reference to a constexpr array of permutation indexes
824 int ix = 0, lasti = -1, lastp = -1;
825 uint64_t m = 0;
826 int i = 0;
827 int j = 1; // loop counters
828 for ( i = 0; i < N; i++ ) {
829 ix = a[i]; // permutation index
830 if ( ix >= 0 ) {
831 m |= (uint64_t)1 << ix; // mask for compression source
832 for ( j = 1; j < i - lastp; j++ ) {
833 m |= (uint64_t)1 << ( lasti + j ); // dummy filling source
834 }
835 lastp = i;
836 lasti = ix;
837 }
838 }
839 return m;
840 }
841
842 // expand_mask: returns a bit mask to use for expansion instruction.
843 // It is presupposed that perm_flags indicates perm_expand.
844 // Additional zeroing is needed if perm_flags indicates perm_addz2
845 template <int N>
846 constexpr uint64_t expand_mask( int const ( &a )[N] ) {
847 // a is a reference to a constexpr array of permutation indexes
848 int ix = 0, lasti = -1, lastp = -1;
849 uint64_t m = 0;
850 int i = 0;
851 int j = 1;
852 for ( i = 0; i < N; i++ ) {
853 ix = a[i]; // permutation index
854 if ( ix >= 0 ) {
855 m |= (uint64_t)1 << i; // mask for expansion destination
856 for ( j = 1; j < ix - lasti; j++ ) {
857 m |= (uint64_t)1 << ( lastp + j ); // dummy filling destination
858 }
859 lastp = i;
860 lasti = ix;
861 }
862 }
863 return m;
864 }
865
866 // perm16_flags: returns information about how to permute a vector of 16-bit integers
867 // Note: It is presupposed that perm_flags reports perm_same_pattern
868 // The return value is composed of these bits:
869 // 1: data from low 64 bits to low 64 bits. pattern in bit 32-39
870 // 2: data from high 64 bits to high 64 bits. pattern in bit 40-47
871 // 4: data from high 64 bits to low 64 bits. pattern in bit 48-55
872 // 8: data from low 64 bits to high 64 bits. pattern in bit 56-63
873 template <typename V>
874 constexpr uint64_t perm16_flags( int const ( &a )[V::size()] ) {
875 // a is a reference to a constexpr array of permutation indexes
876 // V is a vector class
877 constexpr int N = V::size(); // number of elements
878
879 uint64_t retval = 0; // return value
880 uint32_t pat[4] = { 0, 0, 0, 0 }; // permute patterns
881 uint32_t i = 0; // loop counter
882 int ix = 0; // index number i
883 const uint32_t lanesize = 8; // elements per lane
884 uint32_t lane = 0; // current lane
885 int lanepattern[lanesize] = { 0 }; // pattern in each lane
886
887 for ( i = 0; i < N; i++ ) {
888 ix = a[i];
889 lane = i / lanesize; // current lane
890 if ( lane == 0 ) {
891 lanepattern[i] = ix; // save pattern
892 } else if ( ix >= 0 ) { // not first lane
893 uint32_t j = i - lane * lanesize; // index into lanepattern
894 int jx = ix - lane * lanesize; // pattern within lane
895 if ( lanepattern[j] < 0 ) {
896 lanepattern[j] = jx; // pattern not known from previous lane
897 }
898 }
899 }
900 // four patterns: low2low, high2high, high2low, low2high
901 for ( i = 0; i < 4; i++ ) {
902 // loop through low pattern
903 if ( lanepattern[i] >= 0 ) {
904 if ( lanepattern[i] < 4 ) { // low2low
905 retval |= 1;
906 pat[0] |= uint32_t( lanepattern[i] & 3 ) << ( 2 * i );
907 } else { // high2low
908 retval |= 4;
909 pat[2] |= uint32_t( lanepattern[i] & 3 ) << ( 2 * i );
910 }
911 }
912 // loop through high pattern
913 if ( lanepattern[i + 4] >= 0 ) {
914 if ( lanepattern[i + 4] < 4 ) { // low2high
915 retval |= 8;
916 pat[3] |= uint32_t( lanepattern[i + 4] & 3 ) << ( 2 * i );
917 } else { // high2high
918 retval |= 2;
919 pat[1] |= uint32_t( lanepattern[i + 4] & 3 ) << ( 2 * i );
920 }
921 }
922 }
923 // join return data
924 for ( i = 0; i < 4; i++ ) { retval |= (uint64_t)pat[i] << ( 32 + i * 8 ); }
925 return retval;
926 }
927
928 // pshufb_mask: return a broad byte mask for permutation within lanes
929 // for use with the pshufb instruction (_mm..._shuffle_epi8).
930 // The pshufb instruction provides fast permutation and zeroing,
931 // allowing different patterns in each lane but no crossing of lane boundaries
932 template <typename V, int oppos = 0>
933 constexpr auto pshufb_mask( int const ( &A )[V::size()] ) {
934 // Parameter a is a reference to a constexpr array of permutation indexes
935 // V is a vector class
936 // oppos = 1 for data from the opposite 128-bit lane in 256-bit vectors
937 constexpr uint32_t N = V::size(); // number of vector elements
938 constexpr uint32_t elementsize = sizeof( V ) / N; // size of each vector element
939 constexpr uint32_t nlanes = sizeof( V ) / 16; // number of 128 bit lanes in vector
940 constexpr uint32_t elements_per_lane = N / nlanes; // number of vector elements per lane
941
942 EList<int8_t, sizeof( V )> u = { { 0 } }; // list for returning
943
944 uint32_t i = 0; // loop counters
945 uint32_t j = 0;
946 int m = 0;
947 int k = 0;
948 uint32_t lane = 0;
949
950 for ( lane = 0; lane < nlanes; lane++ ) { // loop through lanes
951 for ( i = 0; i < elements_per_lane; i++ ) { // loop through elements in lane
952 // permutation index for element within lane
953 int8_t p = -1;
954 int ix = A[m];
955 if ( ix >= 0 ) {
956 ix ^= oppos * elements_per_lane; // flip bit if opposite lane
957 }
958 ix -= int( lane * elements_per_lane ); // index relative to lane
959 if ( ix >= 0 && ix < (int)elements_per_lane ) { // index points to desired lane
960 p = ix * elementsize;
961 }
962 for ( j = 0; j < elementsize; j++ ) { // loop through bytes in element
963 u.a[k++] = p < 0 ? -1 : p + j; // store byte permutation index
964 }
965 m++;
966 }
967 }
968 return u; // return encapsulated array
969 }
970
971 // largeblock_perm: return indexes for replacing a permute or blend with
972 // a certain block size by a permute or blend with the double block size.
973 // Note: it is presupposed that perm_flags() indicates perm_largeblock
974 // It is required that additional zeroing is added if perm_flags() indicates perm_addz
975 template <int N>
976 constexpr EList<int, N / 2> largeblock_perm( int const ( &a )[N] ) {
977 // Parameter a is a reference to a constexpr array of permutation indexes
978 EList<int, N / 2> list = { { 0 } }; // result indexes
979 int ix = 0; // even index
980 int iy = 0; // odd index
981 int iz = 0; // combined index
982 bool fit_addz = false; // additional zeroing needed at the lower block level
983 int i = 0; // loop counter
984
985 // check if additional zeroing is needed at current block size
986 for ( i = 0; i < N; i += 2 ) {
987 ix = a[i]; // even index
988 iy = a[i + 1]; // odd index
989 if ( ( ix == -1 && iy >= 0 ) || ( iy == -1 && ix >= 0 ) ) { fit_addz = true; }
990 }
991
992 // loop through indexes
993 for ( i = 0; i < N; i += 2 ) {
994 ix = a[i]; // even index
995 iy = a[i + 1]; // odd index
996 if ( ix >= 0 ) {
997 iz = ix / 2; // half index
998 } else if ( iy >= 0 ) {
999 iz = iy / 2;
1000 } else {
1001 iz = ix | iy; // -1 or V_DC. -1 takes precedence
1002 if ( fit_addz ) iz = V_DC; // V_DC, because result will be zeroed later
1003 }
1004 list.a[i / 2] = iz; // save to list
1005 }
1006 return list;
1007 }
1008
1009 // blend_flags: returns information about how a blend function can be implemented
1010 // The return value is composed of these flag bits:
1011 const int blend_zeroing = 1; // needs zeroing
1012 const int blend_allzero = 2; // all is zero or don't care
1013 const int blend_largeblock = 4; // fits blend with a larger block size (e.g permute Vec2q instead of Vec4i)
1014 const int blend_addz = 8; // additional zeroing needed after blend with larger block size or shift
1015 const int blend_a = 0x10; // has data from a
1016 const int blend_b = 0x20; // has data from b
1017 const int blend_perma = 0x40; // permutation of a needed
1018 const int blend_permb = 0x80; // permutation of b needed
1019 const int blend_cross_lane = 0x100; // permutation crossing 128-bit lanes
1020 const int blend_same_pattern = 0x200; // same permute/blend pattern in all 128-bit lanes
1021 const int blend_punpckhab = 0x1000; // pattern fits punpckh(a,b)
1022 const int blend_punpckhba = 0x2000; // pattern fits punpckh(b,a)
1023 const int blend_punpcklab = 0x4000; // pattern fits punpckl(a,b)
1024 const int blend_punpcklba = 0x8000; // pattern fits punpckl(b,a)
1025 const int blend_rotateab = 0x10000; // pattern fits palignr(a,b)
1026 const int blend_rotateba = 0x20000; // pattern fits palignr(b,a)
1027 const int blend_shufab = 0x40000; // pattern fits shufps/shufpd(a,b)
1028 const int blend_shufba = 0x80000; // pattern fits shufps/shufpd(b,a)
1029 const int blend_rotate_big = 0x100000; // pattern fits rotation across lanes. count returned in bits blend_rotpattern
1030 const int blend_outofrange = 0x10000000; // index out of range
1031 const int blend_shufpattern = 32; // pattern for shufps/shufpd is in bit blend_shufpattern to blend_shufpattern + 7
1032 const int blend_rotpattern = 40; // pattern for palignr is in bit blend_rotpattern to blend_rotpattern + 7
1033
1034 template <typename V>
1035 constexpr uint64_t blend_flags( int const ( &a )[V::size()] ) {
1036 // a is a reference to a constexpr array of permutation indexes
1037 // V is a vector class
1038 constexpr int N = V::size(); // number of elements
1039 uint64_t r = blend_largeblock | blend_same_pattern | blend_allzero; // return value
1040 uint32_t iu = 0; // loop counter
1041 int32_t ii = 0; // loop counter
1042 int ix = 0; // index number i
1043 const uint32_t nlanes = sizeof( V ) / 16; // number of 128-bit lanes
1044 const uint32_t lanesize = N / nlanes; // elements per lane
1045 uint32_t lane = 0; // current lane
1046 uint32_t rot = 999; // rotate left count
1047 int lanepattern[lanesize] = { 0 }; // pattern in each lane
1048 if ( lanesize == 2 && N <= 8 ) {
1049 r |= blend_shufab | blend_shufba; // check if it fits shufpd
1050 }
1051
1052 for ( ii = 0; ii < N; ii++ ) { // loop through indexes
1053 ix = a[ii]; // index
1054 if ( ix < 0 ) {
1055 if ( ix == -1 )
1056 r |= blend_zeroing; // set to zero
1057 else if ( ix != V_DC ) {
1058 r = blend_outofrange;
1059 break; // illegal index
1060 }
1061 } else { // ix >= 0
1062 r &= ~blend_allzero;
1063 if ( ix < N ) {
1064 r |= blend_a; // data from a
1065 if ( ix != ii ) r |= blend_perma; // permutation of a
1066 } else if ( ix < 2 * N ) {
1067 r |= blend_b; // data from b
1068 if ( ix != ii + N ) r |= blend_permb; // permutation of b
1069 } else {
1070 r = blend_outofrange;
1071 break; // illegal index
1072 }
1073 }
1074 // check if pattern fits a larger block size:
1075 // even indexes must be even, odd indexes must fit the preceding even index + 1
1076 if ( ( ii & 1 ) == 0 ) { // even index
1077 if ( ix >= 0 && ( ix & 1 ) ) r &= ~blend_largeblock; // not even. does not fit larger block size
1078 int iy = a[ii + 1]; // next odd index
1079 if ( iy >= 0 && ( iy & 1 ) == 0 ) r &= ~blend_largeblock; // not odd. does not fit larger block size
1080 if ( ix >= 0 && iy >= 0 && iy != ix + 1 ) r &= ~blend_largeblock; // does not fit preceding index + 1
1081 if ( ix == -1 && iy >= 0 ) r |= blend_addz; // needs additional zeroing at current block size
1082 if ( iy == -1 && ix >= 0 ) r |= blend_addz; // needs additional zeroing at current block size
1083 }
1084 lane = (uint32_t)ii / lanesize; // current lane
1085 if ( lane == 0 ) { // first lane, or no pattern yet
1086 lanepattern[ii] = ix; // save pattern
1087 }
1088 // check if crossing lanes
1089 if ( ix >= 0 ) {
1090 uint32_t lanei = uint32_t( ix & ~N ) / lanesize; // source lane
1091 if ( lanei != lane ) {
1092 r |= blend_cross_lane; // crossing lane
1093 }
1094 if ( lanesize == 2 ) { // check if it fits pshufd
1095 if ( lanei != lane ) r &= ~( blend_shufab | blend_shufba );
1096 if ( ( ( ( ix & N ) != 0 ) ^ ii ) & 1 )
1097 r &= ~blend_shufab;
1098 else
1099 r &= ~blend_shufba;
1100 }
1101 }
1102 // check if same pattern in all lanes
1103 if ( lane != 0 && ix >= 0 ) { // not first lane
1104 int j = ii - int( lane * lanesize ); // index into lanepattern
1105 int jx = ix - int( lane * lanesize ); // pattern within lane
1106 if ( jx < 0 || ( jx & ~N ) >= (int)lanesize ) r &= ~blend_same_pattern; // source is in another lane
1107 if ( lanepattern[j] < 0 ) {
1108 lanepattern[j] = jx; // pattern not known from previous lane
1109 } else {
1110 if ( lanepattern[j] != jx ) r &= ~blend_same_pattern; // not same pattern
1111 }
1112 }
1113 }
1114 if ( !( r & blend_largeblock ) ) r &= ~blend_addz; // remove irrelevant flag
1115 if ( r & blend_cross_lane ) r &= ~blend_same_pattern; // remove irrelevant flag
1116 if ( !( r & ( blend_perma | blend_permb ) ) ) {
1117 return r; // no permutation. more checks are superfluous
1118 }
1119 if ( r & blend_same_pattern ) {
1120 // same pattern in all lanes. check if it fits unpack patterns
1122 for ( iu = 0; iu < lanesize; iu++ ) { // loop through lanepattern
1123 ix = lanepattern[iu];
1124 if ( ix >= 0 ) {
1125 if ( (uint32_t)ix != iu / 2 + ( iu & 1 ) * N ) r &= ~blend_punpcklab;
1126 if ( (uint32_t)ix != iu / 2 + ( ( iu & 1 ) ^ 1 ) * N ) r &= ~blend_punpcklba;
1127 if ( (uint32_t)ix != ( iu + lanesize ) / 2 + ( iu & 1 ) * N ) r &= ~blend_punpckhab;
1128 if ( (uint32_t)ix != ( iu + lanesize ) / 2 + ( ( iu & 1 ) ^ 1 ) * N ) r &= ~blend_punpckhba;
1129 }
1130 }
1131#if INSTRSET >= 4 // SSSE3. check if it fits palignr
1132 for ( iu = 0; iu < lanesize; iu++ ) {
1133 ix = lanepattern[iu];
1134 if ( ix >= 0 ) {
1135 uint32_t t = ix & ~N;
1136 if ( ix & N ) t += lanesize;
1137 uint32_t tb = ( t + 2 * lanesize - iu ) % ( lanesize * 2 );
1138 if ( rot == 999 ) {
1139 rot = tb;
1140 } else { // check if fit
1141 if ( rot != tb ) rot = 1000;
1142 }
1143 }
1144 }
1145 if ( rot < 999 ) { // firs palignr
1146 if ( rot < lanesize ) {
1147 r |= blend_rotateba;
1148 } else {
1149 r |= blend_rotateab;
1150 }
1151 const uint32_t elementsize = sizeof( V ) / N;
1152 r |= uint64_t( ( rot & ( lanesize - 1 ) ) * elementsize ) << blend_rotpattern;
1153 }
1154#endif
1155 if ( lanesize == 4 ) {
1156 // check if it fits shufps
1158 for ( ii = 0; ii < 2; ii++ ) {
1159 ix = lanepattern[ii];
1160 if ( ix >= 0 ) {
1161 if ( ix & N )
1162 r &= ~blend_shufab;
1163 else
1164 r &= ~blend_shufba;
1165 }
1166 }
1167 for ( ; ii < 4; ii++ ) {
1168 ix = lanepattern[ii];
1169 if ( ix >= 0 ) {
1170 if ( ix & N )
1171 r &= ~blend_shufba;
1172 else
1173 r &= ~blend_shufab;
1174 }
1175 }
1176 if ( r & ( blend_shufab | blend_shufba ) ) { // fits shufps/shufpd
1177 uint8_t shufpattern = 0; // get pattern
1178 for ( iu = 0; iu < lanesize; iu++ ) { shufpattern |= ( lanepattern[iu] & 3 ) << iu * 2; }
1179 r |= (uint64_t)shufpattern << blend_shufpattern; // return pattern
1180 }
1181 }
1182 } else if ( nlanes > 1 ) { // not same pattern in all lanes
1183 rot = 999; // check if it fits big rotate
1184 for ( ii = 0; ii < N; ii++ ) {
1185 ix = a[ii];
1186 if ( ix >= 0 ) {
1187 uint32_t rot2 = ( ix + 2 * N - ii ) % ( 2 * N ); // rotate count
1188 if ( rot == 999 ) {
1189 rot = rot2; // save rotate count
1190 } else if ( rot != rot2 ) {
1191 rot = 1000;
1192 break; // does not fit big rotate
1193 }
1194 }
1195 }
1196 if ( rot < 2 * N ) { // fits big rotate
1197 r |= blend_rotate_big | (uint64_t)rot << blend_rotpattern;
1198 }
1199 }
1200 if ( lanesize == 2 && ( r & ( blend_shufab | blend_shufba ) ) ) { // fits shufpd. Get pattern
1201 for ( ii = 0; ii < N; ii++ ) { r |= uint64_t( a[ii] & 1 ) << ( blend_shufpattern + ii ); }
1202 }
1203 return r;
1204 }
1205
1206 // blend_perm_indexes: return an Indexlist for implementing a blend function as
1207 // two permutations. N = vector size.
1208 // dozero = 0: let unused elements be don't care. The two permutation results must be blended
1209 // dozero = 1: zero unused elements in each permuation. The two permutation results can be OR'ed
1210 // dozero = 2: indexes that are -1 or V_DC are preserved
1211 template <int N, int dozero>
1212 constexpr EList<int, 2 * N> blend_perm_indexes( int const ( &a )[N] ) {
1213 // a is a reference to a constexpr array of permutation indexes
1214 EList<int, 2 * N> list = { { 0 } }; // list to return
1215 int u = dozero ? -1 : V_DC; // value to use for unused entries
1216 int j = 0;
1217
1218 for ( j = 0; j < N; j++ ) { // loop through indexes
1219 int ix = a[j]; // current index
1220 if ( ix < 0 ) { // zero or don't care
1221 if ( dozero == 2 ) {
1222 // list.a[j] = list.a[j + N] = ix; // fails in gcc in complicated cases
1223 list.a[j] = ix;
1224 list.a[j + N] = ix;
1225 } else {
1226 // list.a[j] = list.a[j + N] = u;
1227 list.a[j] = u;
1228 list.a[j + N] = u;
1229 }
1230 } else if ( ix < N ) { // value from a
1231 list.a[j] = ix;
1232 list.a[j + N] = u;
1233 } else {
1234 list.a[j] = u; // value from b
1235 list.a[j + N] = ix - N;
1236 }
1237 }
1238 return list;
1239 }
1240
1241 // largeblock_indexes: return indexes for replacing a permute or blend with a
1242 // certain block size by a permute or blend with the double block size.
1243 // Note: it is presupposed that perm_flags or blend_flags indicates _largeblock
1244 // It is required that additional zeroing is added if perm_flags or blend_flags
1245 // indicates _addz
1246 template <int N>
1247 constexpr EList<int, N / 2> largeblock_indexes( int const ( &a )[N] ) {
1248 // Parameter a is a reference to a constexpr array of N permutation indexes
1249 EList<int, N / 2> list = { { 0 } }; // list to return
1250
1251 bool fit_addz = false; // additional zeroing needed at the lower block level
1252 int ix = 0; // even index
1253 int iy = 0; // odd index
1254 int iz = 0; // combined index
1255 int i = 0; // loop counter
1256
1257 for ( i = 0; i < N; i += 2 ) {
1258 ix = a[i]; // even index
1259 iy = a[i + 1]; // odd index
1260 if ( ix >= 0 ) {
1261 iz = ix / 2; // half index
1262 } else if ( iy >= 0 ) {
1263 iz = iy / 2; // half index
1264 } else
1265 iz = ix | iy; // -1 or V_DC. -1 takes precedence
1266 list.a[i / 2] = iz; // save to list
1267 // check if additional zeroing is needed at current block size
1268 if ( ( ix == -1 && iy >= 0 ) || ( iy == -1 && ix >= 0 ) ) { fit_addz = true; }
1269 }
1270 // replace -1 by V_DC if fit_addz
1271 if ( fit_addz ) {
1272 for ( i = 0; i < N / 2; i++ ) {
1273 if ( list.a[i] < 0 ) list.a[i] = V_DC;
1274 }
1275 }
1276 return list;
1277 }
1278
1279 /****************************************************************************************
1280 *
1281 * Vector blend helper function templates
1282 *
1283 * These templates are for emulating a blend with a vector size that is not supported by
1284 * the instruction set, using multiple blends or permutations of half the vector size
1285 *
1286 ****************************************************************************************/
1287
1288 // Make dummy blend function templates to avoid error messages when the blend funtions are not yet defined
1289 template <typename dummy>
1290 void blend2() {}
1291 template <typename dummy>
1292 void blend4() {}
1293 template <typename dummy>
1294 void blend8() {}
1295 template <typename dummy>
1296 void blend16() {}
1297 template <typename dummy>
1298 void blend32() {}
1299
1300 // blend_half_indexes: return an Indexlist for emulating a blend function as
1301 // blends or permutations from multiple sources
1302 // dozero = 0: let unused elements be don't care. Multiple permutation results must be blended
1303 // dozero = 1: zero unused elements in each permuation. Multiple permutation results can be OR'ed
1304 // dozero = 2: indexes that are -1 or V_DC are preserved
1305 // src1, src2: sources to blend in a partial implementation
1306 template <int N, int dozero, int src1, int src2>
1307 constexpr EList<int, N> blend_half_indexes( int const ( &a )[N] ) {
1308 // a is a reference to a constexpr array of permutation indexes
1309 EList<int, N> list = { { 0 } }; // list to return
1310 int u = dozero ? -1 : V_DC; // value to use for unused entries
1311 int j = 0; // loop counter
1312
1313 for ( j = 0; j < N; j++ ) { // loop through indexes
1314 int ix = a[j]; // current index
1315 if ( ix < 0 ) { // zero or don't care
1316 list.a[j] = ( dozero == 2 ) ? ix : u;
1317 } else {
1318 int src = ix / N; // source
1319 if ( src == src1 ) {
1320 list.a[j] = ix & ( N - 1 );
1321 } else if ( src == src2 ) {
1322 list.a[j] = ( ix & ( N - 1 ) ) + N;
1323 } else
1324 list.a[j] = u;
1325 }
1326 }
1327 return list;
1328 }
1329
1330 // selectblend: select one of four sources for blending
1331 template <typename W, int s>
1332 static inline auto selectblend( W const a, W const b ) {
1333 if constexpr ( s == 0 )
1334 return a.get_low();
1335 else if constexpr ( s == 1 )
1336 return a.get_high();
1337 else if constexpr ( s == 2 )
1338 return b.get_low();
1339 else
1340 return b.get_high();
1341 }
1342
1343 // blend_half: Emulate a blend with a vector size that is not supported
1344 // by multiple blends with half the vector size.
1345 // blend_half is called twice, to give the low and high half of the result
1346 // Parameters: W: type of full-size vector
1347 // i0...: indexes for low or high half
1348 // a, b: full size input vectors
1349 // return value: half-size vector for lower or upper part
1350 template <typename W, int... i0>
1351 auto blend_half( W const& a, W const& b ) {
1352 typedef decltype( a.get_low() ) V; // type for half-size vector
1353 constexpr int N = V::size(); // size of half-size vector
1354 static_assert( sizeof...( i0 ) == N, "wrong number of indexes in blend_half" );
1355 constexpr int ind[N] = { i0... }; // array of indexes
1356
1357 // lambda to find which of the four possible sources are used
1358 // return: EList<int, 5> containing a list of up to 4 sources. The last element is the number of sources used
1359 auto listsources = []( int const n, int const( &ind )[N] ) constexpr {
1360 bool source_used[4] = { false, false, false, false }; // list of sources used
1361 int i = 0;
1362 for ( i = 0; i < n; i++ ) {
1363 int ix = ind[i]; // index
1364 if ( ix >= 0 ) {
1365 int src = ix / n; // source used
1366 source_used[src & 3] = true;
1367 }
1368 }
1369 // return a list of sources used. The last element is the number of sources used
1370 EList<int, 5> sources = { { 0 } };
1371 int nsrc = 0; // number of sources
1372 for ( i = 0; i < 4; i++ ) {
1373 if ( source_used[i] ) { sources.a[nsrc++] = i; }
1374 }
1375 sources.a[4] = nsrc;
1376 return sources;
1377 };
1378 // list of sources used
1379 constexpr EList<int, 5> sources = listsources( N, ind );
1380 constexpr int nsrc = sources.a[4]; // number of sources used
1381
1382 if constexpr ( nsrc == 0 ) { // no sources
1383 return V( 0 );
1384 }
1385 // get indexes for the first one or two sources
1386 constexpr int uindex = ( nsrc > 2 ) ? 1 : 2; // unused elements set to zero if two blends are combined
1387 constexpr EList<int, N> L = blend_half_indexes<N, uindex, sources.a[0], sources.a[1]>( ind );
1388 V x0;
1389 V src0 = selectblend<W, sources.a[0]>( a, b ); // first source
1390 V src1 = selectblend<W, sources.a[1]>( a, b ); // second source
1391 if constexpr ( N == 2 ) {
1392 x0 = blend2<L.a[0], L.a[1]>( src0, src1 );
1393 } else if constexpr ( N == 4 ) {
1394 x0 = blend4<L.a[0], L.a[1], L.a[2], L.a[3]>( src0, src1 );
1395 } else if constexpr ( N == 8 ) {
1396 x0 = blend8<L.a[0], L.a[1], L.a[2], L.a[3], L.a[4], L.a[5], L.a[6], L.a[7]>( src0, src1 );
1397 } else if constexpr ( N == 16 ) {
1398 x0 = blend16<L.a[0], L.a[1], L.a[2], L.a[3], L.a[4], L.a[5], L.a[6], L.a[7], L.a[8], L.a[9], L.a[10], L.a[11],
1399 L.a[12], L.a[13], L.a[14], L.a[15]>( src0, src1 );
1400 } else if constexpr ( N == 32 ) {
1401 x0 = blend32<L.a[0], L.a[1], L.a[2], L.a[3], L.a[4], L.a[5], L.a[6], L.a[7], L.a[8], L.a[9], L.a[10], L.a[11],
1402 L.a[12], L.a[13], L.a[14], L.a[15], L.a[16], L.a[17], L.a[18], L.a[19], L.a[20], L.a[21], L.a[22],
1403 L.a[23], L.a[24], L.a[25], L.a[26], L.a[27], L.a[28], L.a[29], L.a[30], L.a[31]>( src0, src1 );
1404 }
1405 if constexpr ( nsrc > 2 ) { // get last one or two sources
1406 constexpr EList<int, N> M = blend_half_indexes<N, 1, sources.a[2], sources.a[3]>( ind );
1407 V x1;
1408 V src2 = selectblend<W, sources.a[2]>( a, b ); // third source
1409 V src3 = selectblend<W, sources.a[3]>( a, b ); // fourth source
1410 if constexpr ( N == 2 ) {
1411 x1 = blend2<M.a[0], M.a[1]>( src0, src1 );
1412 } else if constexpr ( N == 4 ) {
1413 x1 = blend4<M.a[0], M.a[1], M.a[2], M.a[3]>( src2, src3 );
1414 } else if constexpr ( N == 8 ) {
1415 x1 = blend8<M.a[0], M.a[1], M.a[2], M.a[3], M.a[4], M.a[5], M.a[6], M.a[7]>( src2, src3 );
1416 } else if constexpr ( N == 16 ) {
1417 x1 = blend16<M.a[0], M.a[1], M.a[2], M.a[3], M.a[4], M.a[5], M.a[6], M.a[7], M.a[8], M.a[9], M.a[10], M.a[11],
1418 M.a[12], M.a[13], M.a[14], M.a[15]>( src2, src3 );
1419 } else if constexpr ( N == 32 ) {
1420 x1 = blend32<M.a[0], M.a[1], M.a[2], M.a[3], M.a[4], M.a[5], M.a[6], M.a[7], M.a[8], M.a[9], M.a[10], M.a[11],
1421 M.a[12], M.a[13], M.a[14], M.a[15], M.a[16], M.a[17], M.a[18], M.a[19], M.a[20], M.a[21], M.a[22],
1422 M.a[23], M.a[24], M.a[25], M.a[26], M.a[27], M.a[28], M.a[29], M.a[30], M.a[31]>( src2, src3 );
1423 }
1424 x0 |= x1; // combine result of two blends. Unused elements are zero
1425 }
1426 return x0;
1427 }
1428
1429#ifdef VCL_NAMESPACE
1430}
1431#endif
const int perm_compress
Definition instrset.h:593
const int perm_shleft
Definition instrset.h:587
const int blend_rotpattern
Definition instrset.h:1032
const int blend_permb
Definition instrset.h:1018
const int blend_punpckhba
Definition instrset.h:1022
constexpr uint64_t blend_flags(int const (&a)[V::size()])
Definition instrset.h:1035
const int blend_shufpattern
Definition instrset.h:1031
const int blend_rotateab
Definition instrset.h:1025
const int blend_cross_lane
Definition instrset.h:1019
void blend8()
Definition instrset.h:1294
constexpr auto perm_mask_broad(int const (&A)[V::size()])
Definition instrset.h:562
const int perm_addz2
Definition instrset.h:578
const int blend_rotateba
Definition instrset.h:1026
const int blend_largeblock
Definition instrset.h:1013
constexpr int bit_scan_reverse_const(uint64_t const n)
Definition instrset.h:379
const int perm_punpckl
Definition instrset.h:582
const int perm_rotate_big
Definition instrset.h:589
constexpr uint64_t expand_mask(int const (&a)[N])
Definition instrset.h:846
constexpr auto zero_mask(int const (&a)[N])
Definition instrset.h:484
constexpr auto pshufb_mask(int const (&A)[V::size()])
Definition instrset.h:933
const int blend_a
Definition instrset.h:1015
bool hasAVX512VBMI2(void)
constexpr auto zero_mask_broad(int const (&A)[V::size()])
Definition instrset.h:504
void blend2()
Definition instrset.h:1290
constexpr EList< int, 2 *N > blend_perm_indexes(int const (&a)[N])
Definition instrset.h:1212
const int blend_addz
Definition instrset.h:1014
bool hasFMA3(void)
const int blend_shufba
Definition instrset.h:1028
const int perm_broadcast
Definition instrset.h:591
const int blend_rotate_big
Definition instrset.h:1029
constexpr uint64_t perm_flags(int const (&a)[V::size()])
Definition instrset.h:601
const int blend_perma
Definition instrset.h:1017
const int perm_cross_lane
Definition instrset.h:579
const int perm_addz
Definition instrset.h:577
const int blend_outofrange
Definition instrset.h:1030
bool hasAVX512VBMI(void)
const int perm_zeroing
Definition instrset.h:573
constexpr uint64_t perm16_flags(int const (&a)[V::size()])
Definition instrset.h:874
constexpr EList< int, N/2 > largeblock_perm(int const (&a)[N])
Definition instrset.h:976
const int perm_outofrange
Definition instrset.h:595
const int perm_largeblock
Definition instrset.h:576
const int perm_rotate
Definition instrset.h:583
const int blend_same_pattern
Definition instrset.h:1020
const int perm_shright
Definition instrset.h:585
const int perm_punpckh
Definition instrset.h:581
int physicalProcessors(int *logical_processors=0)
constexpr EList< int, N/2 > largeblock_indexes(int const (&a)[N])
Definition instrset.h:1247
const int perm_same_pattern
Definition instrset.h:580
constexpr uint64_t make_bit_mask(int const (&a)[N])
Definition instrset.h:523
const int blend_punpcklab
Definition instrset.h:1023
const int blend_allzero
Definition instrset.h:1012
bool hasAVX512ER(void)
auto blend_half(W const &a, W const &b)
Definition instrset.h:1351
void blend32()
Definition instrset.h:1298
void blend16()
Definition instrset.h:1296
const int blend_punpckhab
Definition instrset.h:1021
constexpr auto get_inttype()
Definition instrset.h:467
void blend4()
Definition instrset.h:1292
const int perm_zext
Definition instrset.h:592
constexpr auto make_broad_mask(uint64_t const m)
Definition instrset.h:550
constexpr uint64_t compress_mask(int const (&a)[N])
Definition instrset.h:822
const int blend_zeroing
Definition instrset.h:1011
constexpr int V_DC
Definition instrset.h:219
const int perm_perm
Definition instrset.h:574
const int perm_expand
Definition instrset.h:594
int instrset_detect(void)
const int blend_punpcklba
Definition instrset.h:1024
const int perm_allzero
Definition instrset.h:575
const int perm_ipattern
Definition instrset.h:597
const int perm_rot_count
Definition instrset.h:596
constexpr EList< int, N > blend_half_indexes(int const (&a)[N])
Definition instrset.h:1307
const int blend_shufab
Definition instrset.h:1027
bool hasXOP(void)
const int blend_b
Definition instrset.h:1016
bool hasFMA4(void)
T a[N]
Definition instrset.h:461