The Gaudi Framework  v29r0 (ff2e7097)
local_valgrind.h
Go to the documentation of this file.
1 /* -*- c -*-
2  ----------------------------------------------------------------
3 
4  Notice that the following BSD-style license applies to this one
5  file (valgrind.h) only. The rest of Valgrind is licensed under the
6  terms of the GNU General Public License, version 2, unless
7  otherwise indicated. See the COPYING file in the source
8  distribution for details.
9 
10  ----------------------------------------------------------------
11 
12  This file is part of Valgrind, a dynamic binary instrumentation
13  framework.
14 
15  Copyright (C) 2000-2013 Julian Seward. All rights reserved.
16 
17  Redistribution and use in source and binary forms, with or without
18  modification, are permitted provided that the following conditions
19  are met:
20 
21  1. Redistributions of source code must retain the above copyright
22  notice, this list of conditions and the following disclaimer.
23 
24  2. The origin of this software must not be misrepresented; you must
25  not claim that you wrote the original software. If you use this
26  software in a product, an acknowledgment in the product
27  documentation would be appreciated but is not required.
28 
29  3. Altered source versions must be plainly marked as such, and must
30  not be misrepresented as being the original software.
31 
32  4. The name of the author may not be used to endorse or promote
33  products derived from this software without specific prior written
34  permission.
35 
36  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
37  OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
38  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39  ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
40  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
42  GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
43  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
44  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
45  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
46  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 
48  ----------------------------------------------------------------
49 
50  Notice that the above BSD-style license applies to this one file
51  (valgrind.h) only. The entire rest of Valgrind is licensed under
52  the terms of the GNU General Public License, version 2. See the
53  COPYING file in the source distribution for details.
54 
55  ----------------------------------------------------------------
56 */
57 
58 /* This file is for inclusion into client (your!) code.
59 
60  You can use these macros to manipulate and query Valgrind's
61  execution inside your own programs.
62 
63  The resulting executables will still run without Valgrind, just a
64  little bit more slowly than they otherwise would, but otherwise
65  unchanged. When not running on valgrind, each client request
66  consumes very few (eg. 7) instructions, so the resulting performance
67  loss is negligible unless you plan to execute client requests
68  millions of times per second. Nevertheless, if that is still a
69  problem, you can compile with the NVALGRIND symbol defined (gcc
70  -DNVALGRIND) so that client requests are not even compiled in. */
71 
72 #ifndef __VALGRIND_H
73 #define __VALGRIND_H
74 
75 /* ------------------------------------------------------------------ */
76 /* VERSION NUMBER OF VALGRIND */
77 /* ------------------------------------------------------------------ */
78 
79 /* Specify Valgrind's version number, so that user code can
80  conditionally compile based on our version number. Note that these
81  were introduced at version 3.6 and so do not exist in version 3.5
82  or earlier. The recommended way to use them to check for "version
83  X.Y or later" is (eg)
84 
85 #if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \
86  && (__VALGRIND_MAJOR__ > 3 \
87  || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
88 */
89 #define __VALGRIND_MAJOR__ 3
90 #define __VALGRIND_MINOR__ 8
91 
92 #include <stdarg.h>
93 
94 /* Nb: this file might be included in a file compiled with -ansi. So
95  we can't use C++ style "//" comments nor the "asm" keyword (instead
96  use "__asm__"). */
97 
98 /* Derive some tags indicating what the target platform is. Note
99  that in this file we're using the compiler's CPP symbols for
100  identifying architectures, which are different to the ones we use
101  within the rest of Valgrind. Note, __powerpc__ is active for both
102  32 and 64-bit PPC, whereas __powerpc64__ is only active for the
103  latter (on Linux, that is).
104 
105  Misc note: how to find out what's predefined in gcc by default:
106  gcc -Wp,-dM somefile.c
107 */
108 #undef PLAT_x86_darwin
109 #undef PLAT_amd64_darwin
110 #undef PLAT_x86_win32
111 #undef PLAT_amd64_win64
112 #undef PLAT_x86_linux
113 #undef PLAT_amd64_linux
114 #undef PLAT_ppc32_linux
115 #undef PLAT_ppc64_linux
116 #undef PLAT_arm_linux
117 #undef PLAT_s390x_linux
118 #undef PLAT_mips32_linux
119 #undef PLAT_mips64_linux
120 
121 #if defined( __APPLE__ ) && defined( __i386__ )
122 #define PLAT_x86_darwin 1
123 #elif defined( __APPLE__ ) && defined( __x86_64__ )
124 #define PLAT_amd64_darwin 1
125 #elif defined( __MINGW32__ ) || defined( __CYGWIN32__ ) || ( defined( _WIN32 ) && defined( _M_IX86 ) )
126 #define PLAT_x86_win32 1
127 #elif defined( __MINGW64__ ) || ( defined( _WIN64 ) && defined( _M_X64 ) )
128 #define PLAT_amd64_win64 1
129 #elif defined( __linux__ ) && defined( __i386__ )
130 #define PLAT_x86_linux 1
131 #elif defined( __linux__ ) && defined( __x86_64__ )
132 #define PLAT_amd64_linux 1
133 #elif defined( __linux__ ) && defined( __powerpc__ ) && !defined( __powerpc64__ )
134 #define PLAT_ppc32_linux 1
135 #elif defined( __linux__ ) && defined( __powerpc__ ) && defined( __powerpc64__ )
136 #define PLAT_ppc64_linux 1
137 #elif defined( __linux__ ) && defined( __arm__ )
138 #define PLAT_arm_linux 1
139 #elif defined( __linux__ ) && defined( __s390__ ) && defined( __s390x__ )
140 #define PLAT_s390x_linux 1
141 #elif defined( __linux__ ) && defined( __mips__ )
142 #if ( __mips == 64 )
143 #define PLAT_mips64_linux 1
144 #else
145 #define PLAT_mips32_linux 1
146 #endif
147 #else
148 /* If we're not compiling for our target platform, don't generate
149  any inline asms. */
150 #if !defined( NVALGRIND )
151 #define NVALGRIND 1
152 #endif
153 #endif
154 
155 /* ------------------------------------------------------------------ */
156 /* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
157 /* in here of use to end-users -- skip to the next section. */
158 /* ------------------------------------------------------------------ */
159 
160 /*
161  * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client
162  * request. Accepts both pointers and integers as arguments.
163  *
164  * VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind
165  * client request that does not return a value.
166 
167  * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
168  * client request and whose value equals the client request result. Accepts
169  * both pointers and integers as arguments. Note that such calls are not
170  * necessarily pure functions -- they may have side effects.
171  */
172 
173 #define VALGRIND_DO_CLIENT_REQUEST( _zzq_rlval, _zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, \
174  _zzq_arg4, _zzq_arg5 ) \
175  do { \
176  ( _zzq_rlval ) = VALGRIND_DO_CLIENT_REQUEST_EXPR( ( _zzq_default ), ( _zzq_request ), ( _zzq_arg1 ), \
177  ( _zzq_arg2 ), ( _zzq_arg3 ), ( _zzq_arg4 ), ( _zzq_arg5 ) ); \
178  } while ( 0 )
179 
180 #define VALGRIND_DO_CLIENT_REQUEST_STMT( _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5 ) \
181  do { \
182  (void)VALGRIND_DO_CLIENT_REQUEST_EXPR( 0, ( _zzq_request ), ( _zzq_arg1 ), ( _zzq_arg2 ), ( _zzq_arg3 ), \
183  ( _zzq_arg4 ), ( _zzq_arg5 ) ); \
184  } while ( 0 )
185 
186 #if defined( NVALGRIND )
187 
188 /* Define NVALGRIND to completely remove the Valgrind magic sequence
189  from the compiled code (analogous to NDEBUG's effects on
190  assert()) */
191 #define VALGRIND_DO_CLIENT_REQUEST_EXPR( _zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
192  _zzq_arg5 ) \
193  ( _zzq_default )
194 
195 #else /* ! NVALGRIND */
196 
197 /* The following defines the magic code sequences which the JITter
198  spots and handles magically. Don't look too closely at them as
199  they will rot your brain.
200 
201  The assembly code sequences for all architectures is in this one
202  file. This is because this file must be stand-alone, and we don't
203  want to have multiple files.
204 
205  For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
206  value gets put in the return slot, so that everything works when
207  this is executed not under Valgrind. Args are passed in a memory
208  block, and so there's no intrinsic limit to the number that could
209  be passed, but it's currently five.
210 
211  The macro args are:
212  _zzq_rlval result lvalue
213  _zzq_default default value (result returned when running on real CPU)
214  _zzq_request request code
215  _zzq_arg1..5 request params
216 
217  The other two macros are used to support function wrapping, and are
218  a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
219  guest's NRADDR pseudo-register and whatever other information is
220  needed to safely run the call original from the wrapper: on
221  ppc64-linux, the R2 value at the divert point is also needed. This
222  information is abstracted into a user-visible type, OrigFn.
223 
224  VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
225  guest, but guarantees that the branch instruction will not be
226  redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
227  branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
228  complete inline asm, since it needs to be combined with more magic
229  inline asm stuff to be useful.
230 */
231 
232 /* ------------------------- x86-{linux,darwin} ---------------- */
233 
234 #if defined( PLAT_x86_linux ) || defined( PLAT_x86_darwin ) || ( defined( PLAT_x86_win32 ) && defined( __GNUC__ ) )
235 
236 typedef struct {
237  unsigned int nraddr; /* where's the code? */
238 } OrigFn;
239 
240 #define __SPECIAL_INSTRUCTION_PREAMBLE \
241  "roll $3, %%edi ; roll $13, %%edi\n\t" \
242  "roll $29, %%edi ; roll $19, %%edi\n\t"
243 
244 #define VALGRIND_DO_CLIENT_REQUEST_EXPR( _zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
245  _zzq_arg5 ) \
246  __extension__( { \
247  volatile unsigned int _zzq_args[6]; \
248  volatile unsigned int _zzq_result; \
249  _zzq_args[0] = (unsigned int)( _zzq_request ); \
250  _zzq_args[1] = (unsigned int)( _zzq_arg1 ); \
251  _zzq_args[2] = (unsigned int)( _zzq_arg2 ); \
252  _zzq_args[3] = (unsigned int)( _zzq_arg3 ); \
253  _zzq_args[4] = (unsigned int)( _zzq_arg4 ); \
254  _zzq_args[5] = (unsigned int)( _zzq_arg5 ); \
255  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE /* %EDX = client_request ( %EAX ) */ \
256  "xchgl %%ebx,%%ebx" \
257  : "=d"( _zzq_result ) \
258  : "a"( &_zzq_args[0] ), "0"( _zzq_default ) \
259  : "cc", "memory" ); \
260  _zzq_result; \
261  } )
262 
263 #define VALGRIND_GET_NR_CONTEXT( _zzq_rlval ) \
264  { \
265  volatile OrigFn* _zzq_orig = &( _zzq_rlval ); \
266  volatile unsigned int __addr; \
267  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE /* %EAX = guest_NRADDR */ \
268  "xchgl %%ecx,%%ecx" \
269  : "=a"( __addr ) \
270  : \
271  : "cc", "memory" ); \
272  _zzq_orig->nraddr = __addr; \
273  }
274 
275 #define VALGRIND_CALL_NOREDIR_EAX \
276  __SPECIAL_INSTRUCTION_PREAMBLE \
277  /* call-noredir *%EAX */ \
278  "xchgl %%edx,%%edx\n\t"
279 
280 #define VALGRIND_VEX_INJECT_IR() \
281  do { \
282  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE "xchgl %%edi,%%edi\n\t" : : : "cc", "memory" ); \
283  } while ( 0 )
284 
285 #endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) */
286 
287 /* ------------------------- x86-Win32 ------------------------- */
288 
289 #if defined( PLAT_x86_win32 ) && !defined( __GNUC__ )
290 
291 typedef struct {
292  unsigned int nraddr; /* where's the code? */
293 } OrigFn;
294 
295 #if defined( _MSC_VER )
296 
297 #define __SPECIAL_INSTRUCTION_PREAMBLE __asm rol edi, 3 __asm rol edi, 13 __asm rol edi, 29 __asm rol edi, 19
298 
299 #define VALGRIND_DO_CLIENT_REQUEST_EXPR( _zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
300  _zzq_arg5 ) \
301  valgrind_do_client_request_expr( ( uintptr_t )( _zzq_default ), ( uintptr_t )( _zzq_request ), \
302  ( uintptr_t )( _zzq_arg1 ), ( uintptr_t )( _zzq_arg2 ), ( uintptr_t )( _zzq_arg3 ), \
303  ( uintptr_t )( _zzq_arg4 ), ( uintptr_t )( _zzq_arg5 ) )
304 
305 static __inline uintptr_t valgrind_do_client_request_expr( uintptr_t _zzq_default, uintptr_t _zzq_request,
306  uintptr_t _zzq_arg1, uintptr_t _zzq_arg2,
307  uintptr_t _zzq_arg3, uintptr_t _zzq_arg4,
308  uintptr_t _zzq_arg5 )
309 {
310  volatile uintptr_t _zzq_args[6];
311  volatile unsigned int _zzq_result;
312  _zzq_args[0] = ( uintptr_t )( _zzq_request );
313  _zzq_args[1] = ( uintptr_t )( _zzq_arg1 );
314  _zzq_args[2] = ( uintptr_t )( _zzq_arg2 );
315  _zzq_args[3] = ( uintptr_t )( _zzq_arg3 );
316  _zzq_args[4] = ( uintptr_t )( _zzq_arg4 );
317  _zzq_args[5] = ( uintptr_t )( _zzq_arg5 );
318  __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default
319  __SPECIAL_INSTRUCTION_PREAMBLE
320  /* %EDX = client_request ( %EAX ) */
321  __asm xchg ebx,ebx
322  __asm mov _zzq_result, edx
323  }
324  return _zzq_result;
325 }
326 
327 #define VALGRIND_GET_NR_CONTEXT( _zzq_rlval ) \
328  { \
329  volatile OrigFn* _zzq_orig = &( _zzq_rlval ); \
330  volatile unsigned int __addr; \
331  __asm { __SPECIAL_INSTRUCTION_PREAMBLE /* %EAX = guest_NRADDR */ \
332  __asm xchg ecx,ecx \
333  __asm mov __addr, eax \
334  } \
335  _zzq_orig->nraddr = __addr; \
336  }
337 
338 #define VALGRIND_CALL_NOREDIR_EAX ERROR
339 
340 #define VALGRIND_VEX_INJECT_IR() \
341  do { \
342  __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
343  __asm xchg edi,edi} \
344  } while ( 0 )
345 
346 #else
347 #error Unsupported compiler.
348 #endif
349 
350 #endif /* PLAT_x86_win32 */
351 
352 /* ------------------------ amd64-{linux,darwin} --------------- */
353 
354 #if defined( PLAT_amd64_linux ) || defined( PLAT_amd64_darwin )
355 
356 typedef struct {
357  unsigned long long int nraddr; /* where's the code? */
358 } OrigFn;
359 
360 #define __SPECIAL_INSTRUCTION_PREAMBLE \
361  "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
362  "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
363 
364 #define VALGRIND_DO_CLIENT_REQUEST_EXPR( _zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
365  _zzq_arg5 ) \
366  __extension__( { \
367  volatile unsigned long long int _zzq_args[6]; \
368  volatile unsigned long long int _zzq_result; \
369  _zzq_args[0] = (unsigned long long int)( _zzq_request ); \
370  _zzq_args[1] = (unsigned long long int)( _zzq_arg1 ); \
371  _zzq_args[2] = (unsigned long long int)( _zzq_arg2 ); \
372  _zzq_args[3] = (unsigned long long int)( _zzq_arg3 ); \
373  _zzq_args[4] = (unsigned long long int)( _zzq_arg4 ); \
374  _zzq_args[5] = (unsigned long long int)( _zzq_arg5 ); \
375  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE /* %RDX = client_request ( %RAX ) */ \
376  "xchgq %%rbx,%%rbx" \
377  : "=d"( _zzq_result ) \
378  : "a"( &_zzq_args[0] ), "0"( _zzq_default ) \
379  : "cc", "memory" ); \
380  _zzq_result; \
381  } )
382 
383 #define VALGRIND_GET_NR_CONTEXT( _zzq_rlval ) \
384  { \
385  volatile OrigFn* _zzq_orig = &( _zzq_rlval ); \
386  volatile unsigned long long int __addr; \
387  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE /* %RAX = guest_NRADDR */ \
388  "xchgq %%rcx,%%rcx" \
389  : "=a"( __addr ) \
390  : \
391  : "cc", "memory" ); \
392  _zzq_orig->nraddr = __addr; \
393  }
394 
395 #define VALGRIND_CALL_NOREDIR_RAX \
396  __SPECIAL_INSTRUCTION_PREAMBLE \
397  /* call-noredir *%RAX */ \
398  "xchgq %%rdx,%%rdx\n\t"
399 
400 #define VALGRIND_VEX_INJECT_IR() \
401  do { \
402  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE "xchgq %%rdi,%%rdi\n\t" : : : "cc", "memory" ); \
403  } while ( 0 )
404 
405 #endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
406 
407 /* ------------------------ ppc32-linux ------------------------ */
408 
409 #if defined( PLAT_ppc32_linux )
410 
411 typedef struct {
412  unsigned int nraddr; /* where's the code? */
413 } OrigFn;
414 
415 #define __SPECIAL_INSTRUCTION_PREAMBLE \
416  "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
417  "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
418 
419 #define VALGRIND_DO_CLIENT_REQUEST_EXPR( _zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
420  _zzq_arg5 ) \
421  \
422  __extension__( { \
423  unsigned int _zzq_args[6]; \
424  unsigned int _zzq_result; \
425  unsigned int* _zzq_ptr; \
426  _zzq_args[0] = (unsigned int)( _zzq_request ); \
427  _zzq_args[1] = (unsigned int)( _zzq_arg1 ); \
428  _zzq_args[2] = (unsigned int)( _zzq_arg2 ); \
429  _zzq_args[3] = (unsigned int)( _zzq_arg3 ); \
430  _zzq_args[4] = (unsigned int)( _zzq_arg4 ); \
431  _zzq_args[5] = (unsigned int)( _zzq_arg5 ); \
432  _zzq_ptr = _zzq_args; \
433  __asm__ volatile( "mr 3,%1\n\t" /*default*/ \
434  "mr 4,%2\n\t" /*ptr*/ \
435  __SPECIAL_INSTRUCTION_PREAMBLE /* %R3 = client_request ( %R4 ) */ \
436  "or 1,1,1\n\t" \
437  "mr %0,3" /*result*/ \
438  : "=b"( _zzq_result ) \
439  : "b"( _zzq_default ), "b"( _zzq_ptr ) \
440  : "cc", "memory", "r3", "r4" ); \
441  _zzq_result; \
442  } )
443 
444 #define VALGRIND_GET_NR_CONTEXT( _zzq_rlval ) \
445  { \
446  volatile OrigFn* _zzq_orig = &( _zzq_rlval ); \
447  unsigned int __addr; \
448  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE /* %R3 = guest_NRADDR */ \
449  "or 2,2,2\n\t" \
450  "mr %0,3" \
451  : "=b"( __addr ) \
452  : \
453  : "cc", "memory", "r3" ); \
454  _zzq_orig->nraddr = __addr; \
455  }
456 
457 #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
458  __SPECIAL_INSTRUCTION_PREAMBLE \
459  /* branch-and-link-to-noredir *%R11 */ \
460  "or 3,3,3\n\t"
461 
462 #define VALGRIND_VEX_INJECT_IR() \
463  do { \
464  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE "or 5,5,5\n\t" ); \
465  } while ( 0 )
466 
467 #endif /* PLAT_ppc32_linux */
468 
469 /* ------------------------ ppc64-linux ------------------------ */
470 
471 #if defined( PLAT_ppc64_linux )
472 
473 typedef struct {
474  unsigned long long int nraddr; /* where's the code? */
475  unsigned long long int r2; /* what tocptr do we need? */
476 } OrigFn;
477 
478 #define __SPECIAL_INSTRUCTION_PREAMBLE \
479  "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
480  "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
481 
482 #define VALGRIND_DO_CLIENT_REQUEST_EXPR( _zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
483  _zzq_arg5 ) \
484  \
485  __extension__( { \
486  unsigned long long int _zzq_args[6]; \
487  unsigned long long int _zzq_result; \
488  unsigned long long int* _zzq_ptr; \
489  _zzq_args[0] = (unsigned long long int)( _zzq_request ); \
490  _zzq_args[1] = (unsigned long long int)( _zzq_arg1 ); \
491  _zzq_args[2] = (unsigned long long int)( _zzq_arg2 ); \
492  _zzq_args[3] = (unsigned long long int)( _zzq_arg3 ); \
493  _zzq_args[4] = (unsigned long long int)( _zzq_arg4 ); \
494  _zzq_args[5] = (unsigned long long int)( _zzq_arg5 ); \
495  _zzq_ptr = _zzq_args; \
496  __asm__ volatile( "mr 3,%1\n\t" /*default*/ \
497  "mr 4,%2\n\t" /*ptr*/ \
498  __SPECIAL_INSTRUCTION_PREAMBLE /* %R3 = client_request ( %R4 ) */ \
499  "or 1,1,1\n\t" \
500  "mr %0,3" /*result*/ \
501  : "=b"( _zzq_result ) \
502  : "b"( _zzq_default ), "b"( _zzq_ptr ) \
503  : "cc", "memory", "r3", "r4" ); \
504  _zzq_result; \
505  } )
506 
507 #define VALGRIND_GET_NR_CONTEXT( _zzq_rlval ) \
508  { \
509  volatile OrigFn* _zzq_orig = &( _zzq_rlval ); \
510  unsigned long long int __addr; \
511  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE /* %R3 = guest_NRADDR */ \
512  "or 2,2,2\n\t" \
513  "mr %0,3" \
514  : "=b"( __addr ) \
515  : \
516  : "cc", "memory", "r3" ); \
517  _zzq_orig->nraddr = __addr; \
518  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE /* %R3 = guest_NRADDR_GPR2 */ \
519  "or 4,4,4\n\t" \
520  "mr %0,3" \
521  : "=b"( __addr ) \
522  : \
523  : "cc", "memory", "r3" ); \
524  _zzq_orig->r2 = __addr; \
525  }
526 
527 #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
528  __SPECIAL_INSTRUCTION_PREAMBLE \
529  /* branch-and-link-to-noredir *%R11 */ \
530  "or 3,3,3\n\t"
531 
532 #define VALGRIND_VEX_INJECT_IR() \
533  do { \
534  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE "or 5,5,5\n\t" ); \
535  } while ( 0 )
536 
537 #endif /* PLAT_ppc64_linux */
538 
539 /* ------------------------- arm-linux ------------------------- */
540 
541 #if defined( PLAT_arm_linux )
542 
543 typedef struct {
544  unsigned int nraddr; /* where's the code? */
545 } OrigFn;
546 
547 #define __SPECIAL_INSTRUCTION_PREAMBLE \
548  "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \
549  "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
550 
551 #define VALGRIND_DO_CLIENT_REQUEST_EXPR( _zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
552  _zzq_arg5 ) \
553  \
554  __extension__( { \
555  volatile unsigned int _zzq_args[6]; \
556  volatile unsigned int _zzq_result; \
557  _zzq_args[0] = (unsigned int)( _zzq_request ); \
558  _zzq_args[1] = (unsigned int)( _zzq_arg1 ); \
559  _zzq_args[2] = (unsigned int)( _zzq_arg2 ); \
560  _zzq_args[3] = (unsigned int)( _zzq_arg3 ); \
561  _zzq_args[4] = (unsigned int)( _zzq_arg4 ); \
562  _zzq_args[5] = (unsigned int)( _zzq_arg5 ); \
563  __asm__ volatile( "mov r3, %1\n\t" /*default*/ \
564  "mov r4, %2\n\t" /*ptr*/ \
565  __SPECIAL_INSTRUCTION_PREAMBLE /* R3 = client_request ( R4 ) */ \
566  "orr r10, r10, r10\n\t" \
567  "mov %0, r3" /*result*/ \
568  : "=r"( _zzq_result ) \
569  : "r"( _zzq_default ), "r"( &_zzq_args[0] ) \
570  : "cc", "memory", "r3", "r4" ); \
571  _zzq_result; \
572  } )
573 
574 #define VALGRIND_GET_NR_CONTEXT( _zzq_rlval ) \
575  { \
576  volatile OrigFn* _zzq_orig = &( _zzq_rlval ); \
577  unsigned int __addr; \
578  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE /* R3 = guest_NRADDR */ \
579  "orr r11, r11, r11\n\t" \
580  "mov %0, r3" \
581  : "=r"( __addr ) \
582  : \
583  : "cc", "memory", "r3" ); \
584  _zzq_orig->nraddr = __addr; \
585  }
586 
587 #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
588  __SPECIAL_INSTRUCTION_PREAMBLE \
589  /* branch-and-link-to-noredir *%R4 */ \
590  "orr r12, r12, r12\n\t"
591 
592 #define VALGRIND_VEX_INJECT_IR() \
593  do { \
594  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE "orr r9, r9, r9\n\t" : : : "cc", "memory" ); \
595  } while ( 0 )
596 
597 #endif /* PLAT_arm_linux */
598 
599 /* ------------------------ s390x-linux ------------------------ */
600 
601 #if defined( PLAT_s390x_linux )
602 
603 typedef struct {
604  unsigned long long int nraddr; /* where's the code? */
605 } OrigFn;
606 
607 /* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific
608  * code. This detection is implemented in platform specific toIR.c
609  * (e.g. VEX/priv/guest_s390_decoder.c).
610  */
611 #define __SPECIAL_INSTRUCTION_PREAMBLE \
612  "lr 15,15\n\t" \
613  "lr 1,1\n\t" \
614  "lr 2,2\n\t" \
615  "lr 3,3\n\t"
616 
617 #define __CLIENT_REQUEST_CODE "lr 2,2\n\t"
618 #define __GET_NR_CONTEXT_CODE "lr 3,3\n\t"
619 #define __CALL_NO_REDIR_CODE "lr 4,4\n\t"
620 #define __VEX_INJECT_IR_CODE "lr 5,5\n\t"
621 
622 #define VALGRIND_DO_CLIENT_REQUEST_EXPR( _zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
623  _zzq_arg5 ) \
624  __extension__( { \
625  volatile unsigned long long int _zzq_args[6]; \
626  volatile unsigned long long int _zzq_result; \
627  _zzq_args[0] = (unsigned long long int)( _zzq_request ); \
628  _zzq_args[1] = (unsigned long long int)( _zzq_arg1 ); \
629  _zzq_args[2] = (unsigned long long int)( _zzq_arg2 ); \
630  _zzq_args[3] = (unsigned long long int)( _zzq_arg3 ); \
631  _zzq_args[4] = (unsigned long long int)( _zzq_arg4 ); \
632  _zzq_args[5] = (unsigned long long int)( _zzq_arg5 ); \
633  __asm__ volatile( /* r2 = args */ \
634  "lgr 2,%1\n\t" /* r3 = default */ \
635  "lgr 3,%2\n\t" __SPECIAL_INSTRUCTION_PREAMBLE __CLIENT_REQUEST_CODE /* results = r3 */ \
636  "lgr %0, 3\n\t" \
637  : "=d"( _zzq_result ) \
638  : "a"( &_zzq_args[0] ), "0"( _zzq_default ) \
639  : "cc", "2", "3", "memory" ); \
640  _zzq_result; \
641  } )
642 
643 #define VALGRIND_GET_NR_CONTEXT( _zzq_rlval ) \
644  { \
645  volatile OrigFn* _zzq_orig = &( _zzq_rlval ); \
646  volatile unsigned long long int __addr; \
647  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE __GET_NR_CONTEXT_CODE "lgr %0, 3\n\t" \
648  : "=a"( __addr ) \
649  : \
650  : "cc", "3", "memory" ); \
651  _zzq_orig->nraddr = __addr; \
652  }
653 
654 #define VALGRIND_CALL_NOREDIR_R1 \
655  __SPECIAL_INSTRUCTION_PREAMBLE \
656  __CALL_NO_REDIR_CODE
657 
658 #define VALGRIND_VEX_INJECT_IR() \
659  do { \
660  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE __VEX_INJECT_IR_CODE ); \
661  } while ( 0 )
662 
663 #endif /* PLAT_s390x_linux */
664 
665 /* ------------------------- mips32-linux ---------------- */
666 
667 #if defined( PLAT_mips32_linux )
668 
669 typedef struct {
670  unsigned int nraddr; /* where's the code? */
671 } OrigFn;
672 
673 /* .word 0x342
674  * .word 0x742
675  * .word 0xC2
676  * .word 0x4C2*/
677 #define __SPECIAL_INSTRUCTION_PREAMBLE \
678  "srl $0, $0, 13\n\t" \
679  "srl $0, $0, 29\n\t" \
680  "srl $0, $0, 3\n\t" \
681  "srl $0, $0, 19\n\t"
682 
683 #define VALGRIND_DO_CLIENT_REQUEST_EXPR( _zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
684  _zzq_arg5 ) \
685  __extension__( { \
686  volatile unsigned int _zzq_args[6]; \
687  volatile unsigned int _zzq_result; \
688  _zzq_args[0] = (unsigned int)( _zzq_request ); \
689  _zzq_args[1] = (unsigned int)( _zzq_arg1 ); \
690  _zzq_args[2] = (unsigned int)( _zzq_arg2 ); \
691  _zzq_args[3] = (unsigned int)( _zzq_arg3 ); \
692  _zzq_args[4] = (unsigned int)( _zzq_arg4 ); \
693  _zzq_args[5] = (unsigned int)( _zzq_arg5 ); \
694  __asm__ volatile( "move $11, %1\n\t" /*default*/ \
695  "move $12, %2\n\t" /*ptr*/ \
696  __SPECIAL_INSTRUCTION_PREAMBLE /* T3 = client_request ( T4 ) */ \
697  "or $13, $13, $13\n\t" \
698  "move %0, $11\n\t" /*result*/ \
699  : "=r"( _zzq_result ) \
700  : "r"( _zzq_default ), "r"( &_zzq_args[0] ) \
701  : "$11", "$12" ); \
702  _zzq_result; \
703  } )
704 
705 #define VALGRIND_GET_NR_CONTEXT( _zzq_rlval ) \
706  { \
707  volatile OrigFn* _zzq_orig = &( _zzq_rlval ); \
708  volatile unsigned int __addr; \
709  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE /* %t9 = guest_NRADDR */ \
710  "or $14, $14, $14\n\t" \
711  "move %0, $11" /*result*/ \
712  : "=r"( __addr ) \
713  : \
714  : "$11" ); \
715  _zzq_orig->nraddr = __addr; \
716  }
717 
718 #define VALGRIND_CALL_NOREDIR_T9 \
719  __SPECIAL_INSTRUCTION_PREAMBLE \
720  /* call-noredir *%t9 */ \
721  "or $15, $15, $15\n\t"
722 
723 #define VALGRIND_VEX_INJECT_IR() \
724  do { \
725  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE "or $11, $11, $11\n\t" ); \
726  } while ( 0 )
727 
728 #endif /* PLAT_mips32_linux */
729 
730 /* ------------------------- mips64-linux ---------------- */
731 
732 #if defined( PLAT_mips64_linux )
733 
734 typedef struct {
735  unsigned long long nraddr; /* where's the code? */
736 } OrigFn;
737 
738 /* dsll $0,$0, 3
739  * dsll $0,$0, 13
740  * dsll $0,$0, 29
741  * dsll $0,$0, 19*/
742 #define __SPECIAL_INSTRUCTION_PREAMBLE \
743  "dsll $0,$0, 3 ; dsll $0,$0,13\n\t" \
744  "dsll $0,$0,29 ; dsll $0,$0,19\n\t"
745 
746 #define VALGRIND_DO_CLIENT_REQUEST_EXPR( _zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
747  _zzq_arg5 ) \
748  __extension__( { \
749  volatile unsigned long long int _zzq_args[6]; \
750  volatile unsigned long long int _zzq_result; \
751  _zzq_args[0] = (unsigned long long int)( _zzq_request ); \
752  _zzq_args[1] = (unsigned long long int)( _zzq_arg1 ); \
753  _zzq_args[2] = (unsigned long long int)( _zzq_arg2 ); \
754  _zzq_args[3] = (unsigned long long int)( _zzq_arg3 ); \
755  _zzq_args[4] = (unsigned long long int)( _zzq_arg4 ); \
756  _zzq_args[5] = (unsigned long long int)( _zzq_arg5 ); \
757  __asm__ volatile( "move $11, %1\n\t" /*default*/ \
758  "move $12, %2\n\t" /*ptr*/ \
759  __SPECIAL_INSTRUCTION_PREAMBLE /* $11 = client_request ( $12 ) */ \
760  "or $13, $13, $13\n\t" \
761  "move %0, $11\n\t" /*result*/ \
762  : "=r"( _zzq_result ) \
763  : "r"( _zzq_default ), "r"( &_zzq_args[0] ) \
764  : "$11", "$12" ); \
765  _zzq_result; \
766  } )
767 
768 #define VALGRIND_GET_NR_CONTEXT( _zzq_rlval ) \
769  { \
770  volatile OrigFn* _zzq_orig = &( _zzq_rlval ); \
771  volatile unsigned long long int __addr; \
772  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE /* $11 = guest_NRADDR */ \
773  "or $14, $14, $14\n\t" \
774  "move %0, $11" /*result*/ \
775  : "=r"( __addr ) \
776  : \
777  : "$11" ); \
778  _zzq_orig->nraddr = __addr; \
779  }
780 
781 #define VALGRIND_CALL_NOREDIR_T9 \
782  __SPECIAL_INSTRUCTION_PREAMBLE \
783  /* call-noredir $25 */ \
784  "or $15, $15, $15\n\t"
785 
786 #define VALGRIND_VEX_INJECT_IR() \
787  do { \
788  __asm__ volatile( __SPECIAL_INSTRUCTION_PREAMBLE "or $11, $11, $11\n\t" ); \
789  } while ( 0 )
790 
791 #endif /* PLAT_mips64_linux */
792 
793 /* Insert assembly code for other platforms here... */
794 
795 #endif /* NVALGRIND */
796 
797 /* ------------------------------------------------------------------ */
798 /* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */
799 /* ugly. It's the least-worst tradeoff I can think of. */
800 /* ------------------------------------------------------------------ */
801 
802 /* This section defines magic (a.k.a appalling-hack) macros for doing
803  guaranteed-no-redirection macros, so as to get from function
804  wrappers to the functions they are wrapping. The whole point is to
805  construct standard call sequences, but to do the call itself with a
806  special no-redirect call pseudo-instruction that the JIT
807  understands and handles specially. This section is long and
808  repetitious, and I can't see a way to make it shorter.
809 
810  The naming scheme is as follows:
811 
812  CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
813 
814  'W' stands for "word" and 'v' for "void". Hence there are
815  different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
816  and for each, the possibility of returning a word-typed result, or
817  no result.
818 */
819 
820 /* Use these to write the name of your wrapper. NOTE: duplicates
821  VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts
822  the default behaviour equivalance class tag "0000" into the name.
823  See pub_tool_redir.h for details -- normally you don't need to
824  think about this, though. */
825 
826 /* Use an extra level of macroisation so as to ensure the soname/fnname
827  args are fully macro-expanded before pasting them together. */
828 #define VG_CONCAT4( _aa, _bb, _cc, _dd ) _aa##_bb##_cc##_dd
829 
830 #define I_WRAP_SONAME_FNNAME_ZU( soname, fnname ) VG_CONCAT4( _vgw00000ZU_, soname, _, fnname )
831 
832 #define I_WRAP_SONAME_FNNAME_ZZ( soname, fnname ) VG_CONCAT4( _vgw00000ZZ_, soname, _, fnname )
833 
834 /* Use this macro from within a wrapper function to collect the
835  context (address and possibly other info) of the original function.
836  Once you have that you can then use it in one of the CALL_FN_
837  macros. The type of the argument _lval is OrigFn. */
838 #define VALGRIND_GET_ORIG_FN( _lval ) VALGRIND_GET_NR_CONTEXT( _lval )
839 
840 /* Also provide end-user facilities for function replacement, rather
841  than wrapping. A replacement function differs from a wrapper in
842  that it has no way to get hold of the original function being
843  called, and hence no way to call onwards to it. In a replacement
844  function, VALGRIND_GET_ORIG_FN always returns zero. */
845 
846 #define I_REPLACE_SONAME_FNNAME_ZU( soname, fnname ) VG_CONCAT4( _vgr00000ZU_, soname, _, fnname )
847 
848 #define I_REPLACE_SONAME_FNNAME_ZZ( soname, fnname ) VG_CONCAT4( _vgr00000ZZ_, soname, _, fnname )
849 
850 /* Derivatives of the main macros below, for calling functions
851  returning void. */
852 
853 #define CALL_FN_v_v( fnptr ) \
854  do { \
855  volatile unsigned long _junk; \
856  CALL_FN_W_v( _junk, fnptr ); \
857  } while ( 0 )
858 
859 #define CALL_FN_v_W( fnptr, arg1 ) \
860  do { \
861  volatile unsigned long _junk; \
862  CALL_FN_W_W( _junk, fnptr, arg1 ); \
863  } while ( 0 )
864 
865 #define CALL_FN_v_WW( fnptr, arg1, arg2 ) \
866  do { \
867  volatile unsigned long _junk; \
868  CALL_FN_W_WW( _junk, fnptr, arg1, arg2 ); \
869  } while ( 0 )
870 
871 #define CALL_FN_v_WWW( fnptr, arg1, arg2, arg3 ) \
872  do { \
873  volatile unsigned long _junk; \
874  CALL_FN_W_WWW( _junk, fnptr, arg1, arg2, arg3 ); \
875  } while ( 0 )
876 
877 #define CALL_FN_v_WWWW( fnptr, arg1, arg2, arg3, arg4 ) \
878  do { \
879  volatile unsigned long _junk; \
880  CALL_FN_W_WWWW( _junk, fnptr, arg1, arg2, arg3, arg4 ); \
881  } while ( 0 )
882 
883 #define CALL_FN_v_5W( fnptr, arg1, arg2, arg3, arg4, arg5 ) \
884  do { \
885  volatile unsigned long _junk; \
886  CALL_FN_W_5W( _junk, fnptr, arg1, arg2, arg3, arg4, arg5 ); \
887  } while ( 0 )
888 
889 #define CALL_FN_v_6W( fnptr, arg1, arg2, arg3, arg4, arg5, arg6 ) \
890  do { \
891  volatile unsigned long _junk; \
892  CALL_FN_W_6W( _junk, fnptr, arg1, arg2, arg3, arg4, arg5, arg6 ); \
893  } while ( 0 )
894 
895 #define CALL_FN_v_7W( fnptr, arg1, arg2, arg3, arg4, arg5, arg6, arg7 ) \
896  do { \
897  volatile unsigned long _junk; \
898  CALL_FN_W_7W( _junk, fnptr, arg1, arg2, arg3, arg4, arg5, arg6, arg7 ); \
899  } while ( 0 )
900 
901 /* ------------------------- x86-{linux,darwin} ---------------- */
902 
903 #if defined( PLAT_x86_linux ) || defined( PLAT_x86_darwin )
904 
905 /* These regs are trashed by the hidden call. No need to mention eax
906  as gcc can already see that, plus causes gcc to bomb. */
907 #define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
908 
909 /* Macros to save and align the stack before making a function
910  call and restore it afterwards as gcc may not keep the stack
911  pointer aligned if it doesn't realise calls are being made
912  to other functions. */
913 
914 #define VALGRIND_ALIGN_STACK \
915  "movl %%esp,%%edi\n\t" \
916  "andl $0xfffffff0,%%esp\n\t"
917 #define VALGRIND_RESTORE_STACK "movl %%edi,%%esp\n\t"
918 
919 /* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
920  long) == 4. */
921 
922 #define CALL_FN_W_v( lval, orig ) \
923  do { \
924  volatile OrigFn _orig = ( orig ); \
925  volatile unsigned long _argvec[1]; \
926  volatile unsigned long _res; \
927  _argvec[0] = (unsigned long)_orig.nraddr; \
928  __asm__ volatile( VALGRIND_ALIGN_STACK "movl (%%eax), %%eax\n\t" /* target->%eax */ \
929  VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
930  : /*out*/ "=a"( _res ) \
931  : /*in*/ "a"( &_argvec[0] ) \
932  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" ); \
933  lval = (__typeof__( lval ))_res; \
934  } while ( 0 )
935 
936 #define CALL_FN_W_W( lval, orig, arg1 ) \
937  do { \
938  volatile OrigFn _orig = ( orig ); \
939  volatile unsigned long _argvec[2]; \
940  volatile unsigned long _res; \
941  _argvec[0] = (unsigned long)_orig.nraddr; \
942  _argvec[1] = (unsigned long)( arg1 ); \
943  __asm__ volatile( VALGRIND_ALIGN_STACK "subl $12, %%esp\n\t" \
944  "pushl 4(%%eax)\n\t" \
945  "movl (%%eax), %%eax\n\t" /* target->%eax */ \
946  VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
947  : /*out*/ "=a"( _res ) \
948  : /*in*/ "a"( &_argvec[0] ) \
949  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" ); \
950  lval = (__typeof__( lval ))_res; \
951  } while ( 0 )
952 
953 #define CALL_FN_W_WW( lval, orig, arg1, arg2 ) \
954  do { \
955  volatile OrigFn _orig = ( orig ); \
956  volatile unsigned long _argvec[3]; \
957  volatile unsigned long _res; \
958  _argvec[0] = (unsigned long)_orig.nraddr; \
959  _argvec[1] = (unsigned long)( arg1 ); \
960  _argvec[2] = (unsigned long)( arg2 ); \
961  __asm__ volatile( VALGRIND_ALIGN_STACK "subl $8, %%esp\n\t" \
962  "pushl 8(%%eax)\n\t" \
963  "pushl 4(%%eax)\n\t" \
964  "movl (%%eax), %%eax\n\t" /* target->%eax */ \
965  VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
966  : /*out*/ "=a"( _res ) \
967  : /*in*/ "a"( &_argvec[0] ) \
968  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" ); \
969  lval = (__typeof__( lval ))_res; \
970  } while ( 0 )
971 
972 #define CALL_FN_W_WWW( lval, orig, arg1, arg2, arg3 ) \
973  do { \
974  volatile OrigFn _orig = ( orig ); \
975  volatile unsigned long _argvec[4]; \
976  volatile unsigned long _res; \
977  _argvec[0] = (unsigned long)_orig.nraddr; \
978  _argvec[1] = (unsigned long)( arg1 ); \
979  _argvec[2] = (unsigned long)( arg2 ); \
980  _argvec[3] = (unsigned long)( arg3 ); \
981  __asm__ volatile( VALGRIND_ALIGN_STACK "subl $4, %%esp\n\t" \
982  "pushl 12(%%eax)\n\t" \
983  "pushl 8(%%eax)\n\t" \
984  "pushl 4(%%eax)\n\t" \
985  "movl (%%eax), %%eax\n\t" /* target->%eax */ \
986  VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
987  : /*out*/ "=a"( _res ) \
988  : /*in*/ "a"( &_argvec[0] ) \
989  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" ); \
990  lval = (__typeof__( lval ))_res; \
991  } while ( 0 )
992 
993 #define CALL_FN_W_WWWW( lval, orig, arg1, arg2, arg3, arg4 ) \
994  do { \
995  volatile OrigFn _orig = ( orig ); \
996  volatile unsigned long _argvec[5]; \
997  volatile unsigned long _res; \
998  _argvec[0] = (unsigned long)_orig.nraddr; \
999  _argvec[1] = (unsigned long)( arg1 ); \
1000  _argvec[2] = (unsigned long)( arg2 ); \
1001  _argvec[3] = (unsigned long)( arg3 ); \
1002  _argvec[4] = (unsigned long)( arg4 ); \
1003  __asm__ volatile( VALGRIND_ALIGN_STACK "pushl 16(%%eax)\n\t" \
1004  "pushl 12(%%eax)\n\t" \
1005  "pushl 8(%%eax)\n\t" \
1006  "pushl 4(%%eax)\n\t" \
1007  "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1008  VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
1009  : /*out*/ "=a"( _res ) \
1010  : /*in*/ "a"( &_argvec[0] ) \
1011  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" ); \
1012  lval = (__typeof__( lval ))_res; \
1013  } while ( 0 )
1014 
1015 #define CALL_FN_W_5W( lval, orig, arg1, arg2, arg3, arg4, arg5 ) \
1016  do { \
1017  volatile OrigFn _orig = ( orig ); \
1018  volatile unsigned long _argvec[6]; \
1019  volatile unsigned long _res; \
1020  _argvec[0] = (unsigned long)_orig.nraddr; \
1021  _argvec[1] = (unsigned long)( arg1 ); \
1022  _argvec[2] = (unsigned long)( arg2 ); \
1023  _argvec[3] = (unsigned long)( arg3 ); \
1024  _argvec[4] = (unsigned long)( arg4 ); \
1025  _argvec[5] = (unsigned long)( arg5 ); \
1026  __asm__ volatile( VALGRIND_ALIGN_STACK "subl $12, %%esp\n\t" \
1027  "pushl 20(%%eax)\n\t" \
1028  "pushl 16(%%eax)\n\t" \
1029  "pushl 12(%%eax)\n\t" \
1030  "pushl 8(%%eax)\n\t" \
1031  "pushl 4(%%eax)\n\t" \
1032  "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1033  VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
1034  : /*out*/ "=a"( _res ) \
1035  : /*in*/ "a"( &_argvec[0] ) \
1036  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" ); \
1037  lval = (__typeof__( lval ))_res; \
1038  } while ( 0 )
1039 
1040 #define CALL_FN_W_6W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6 ) \
1041  do { \
1042  volatile OrigFn _orig = ( orig ); \
1043  volatile unsigned long _argvec[7]; \
1044  volatile unsigned long _res; \
1045  _argvec[0] = (unsigned long)_orig.nraddr; \
1046  _argvec[1] = (unsigned long)( arg1 ); \
1047  _argvec[2] = (unsigned long)( arg2 ); \
1048  _argvec[3] = (unsigned long)( arg3 ); \
1049  _argvec[4] = (unsigned long)( arg4 ); \
1050  _argvec[5] = (unsigned long)( arg5 ); \
1051  _argvec[6] = (unsigned long)( arg6 ); \
1052  __asm__ volatile( VALGRIND_ALIGN_STACK "subl $8, %%esp\n\t" \
1053  "pushl 24(%%eax)\n\t" \
1054  "pushl 20(%%eax)\n\t" \
1055  "pushl 16(%%eax)\n\t" \
1056  "pushl 12(%%eax)\n\t" \
1057  "pushl 8(%%eax)\n\t" \
1058  "pushl 4(%%eax)\n\t" \
1059  "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1060  VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
1061  : /*out*/ "=a"( _res ) \
1062  : /*in*/ "a"( &_argvec[0] ) \
1063  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" ); \
1064  lval = (__typeof__( lval ))_res; \
1065  } while ( 0 )
1066 
1067 #define CALL_FN_W_7W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7 ) \
1068  do { \
1069  volatile OrigFn _orig = ( orig ); \
1070  volatile unsigned long _argvec[8]; \
1071  volatile unsigned long _res; \
1072  _argvec[0] = (unsigned long)_orig.nraddr; \
1073  _argvec[1] = (unsigned long)( arg1 ); \
1074  _argvec[2] = (unsigned long)( arg2 ); \
1075  _argvec[3] = (unsigned long)( arg3 ); \
1076  _argvec[4] = (unsigned long)( arg4 ); \
1077  _argvec[5] = (unsigned long)( arg5 ); \
1078  _argvec[6] = (unsigned long)( arg6 ); \
1079  _argvec[7] = (unsigned long)( arg7 ); \
1080  __asm__ volatile( VALGRIND_ALIGN_STACK "subl $4, %%esp\n\t" \
1081  "pushl 28(%%eax)\n\t" \
1082  "pushl 24(%%eax)\n\t" \
1083  "pushl 20(%%eax)\n\t" \
1084  "pushl 16(%%eax)\n\t" \
1085  "pushl 12(%%eax)\n\t" \
1086  "pushl 8(%%eax)\n\t" \
1087  "pushl 4(%%eax)\n\t" \
1088  "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1089  VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
1090  : /*out*/ "=a"( _res ) \
1091  : /*in*/ "a"( &_argvec[0] ) \
1092  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" ); \
1093  lval = (__typeof__( lval ))_res; \
1094  } while ( 0 )
1095 
1096 #define CALL_FN_W_8W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 ) \
1097  do { \
1098  volatile OrigFn _orig = ( orig ); \
1099  volatile unsigned long _argvec[9]; \
1100  volatile unsigned long _res; \
1101  _argvec[0] = (unsigned long)_orig.nraddr; \
1102  _argvec[1] = (unsigned long)( arg1 ); \
1103  _argvec[2] = (unsigned long)( arg2 ); \
1104  _argvec[3] = (unsigned long)( arg3 ); \
1105  _argvec[4] = (unsigned long)( arg4 ); \
1106  _argvec[5] = (unsigned long)( arg5 ); \
1107  _argvec[6] = (unsigned long)( arg6 ); \
1108  _argvec[7] = (unsigned long)( arg7 ); \
1109  _argvec[8] = (unsigned long)( arg8 ); \
1110  __asm__ volatile( VALGRIND_ALIGN_STACK "pushl 32(%%eax)\n\t" \
1111  "pushl 28(%%eax)\n\t" \
1112  "pushl 24(%%eax)\n\t" \
1113  "pushl 20(%%eax)\n\t" \
1114  "pushl 16(%%eax)\n\t" \
1115  "pushl 12(%%eax)\n\t" \
1116  "pushl 8(%%eax)\n\t" \
1117  "pushl 4(%%eax)\n\t" \
1118  "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1119  VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
1120  : /*out*/ "=a"( _res ) \
1121  : /*in*/ "a"( &_argvec[0] ) \
1122  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" ); \
1123  lval = (__typeof__( lval ))_res; \
1124  } while ( 0 )
1125 
1126 #define CALL_FN_W_9W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9 ) \
1127  do { \
1128  volatile OrigFn _orig = ( orig ); \
1129  volatile unsigned long _argvec[10]; \
1130  volatile unsigned long _res; \
1131  _argvec[0] = (unsigned long)_orig.nraddr; \
1132  _argvec[1] = (unsigned long)( arg1 ); \
1133  _argvec[2] = (unsigned long)( arg2 ); \
1134  _argvec[3] = (unsigned long)( arg3 ); \
1135  _argvec[4] = (unsigned long)( arg4 ); \
1136  _argvec[5] = (unsigned long)( arg5 ); \
1137  _argvec[6] = (unsigned long)( arg6 ); \
1138  _argvec[7] = (unsigned long)( arg7 ); \
1139  _argvec[8] = (unsigned long)( arg8 ); \
1140  _argvec[9] = (unsigned long)( arg9 ); \
1141  __asm__ volatile( VALGRIND_ALIGN_STACK "subl $12, %%esp\n\t" \
1142  "pushl 36(%%eax)\n\t" \
1143  "pushl 32(%%eax)\n\t" \
1144  "pushl 28(%%eax)\n\t" \
1145  "pushl 24(%%eax)\n\t" \
1146  "pushl 20(%%eax)\n\t" \
1147  "pushl 16(%%eax)\n\t" \
1148  "pushl 12(%%eax)\n\t" \
1149  "pushl 8(%%eax)\n\t" \
1150  "pushl 4(%%eax)\n\t" \
1151  "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1152  VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
1153  : /*out*/ "=a"( _res ) \
1154  : /*in*/ "a"( &_argvec[0] ) \
1155  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" ); \
1156  lval = (__typeof__( lval ))_res; \
1157  } while ( 0 )
1158 
1159 #define CALL_FN_W_10W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10 ) \
1160  do { \
1161  volatile OrigFn _orig = ( orig ); \
1162  volatile unsigned long _argvec[11]; \
1163  volatile unsigned long _res; \
1164  _argvec[0] = (unsigned long)_orig.nraddr; \
1165  _argvec[1] = (unsigned long)( arg1 ); \
1166  _argvec[2] = (unsigned long)( arg2 ); \
1167  _argvec[3] = (unsigned long)( arg3 ); \
1168  _argvec[4] = (unsigned long)( arg4 ); \
1169  _argvec[5] = (unsigned long)( arg5 ); \
1170  _argvec[6] = (unsigned long)( arg6 ); \
1171  _argvec[7] = (unsigned long)( arg7 ); \
1172  _argvec[8] = (unsigned long)( arg8 ); \
1173  _argvec[9] = (unsigned long)( arg9 ); \
1174  _argvec[10] = (unsigned long)( arg10 ); \
1175  __asm__ volatile( VALGRIND_ALIGN_STACK "subl $8, %%esp\n\t" \
1176  "pushl 40(%%eax)\n\t" \
1177  "pushl 36(%%eax)\n\t" \
1178  "pushl 32(%%eax)\n\t" \
1179  "pushl 28(%%eax)\n\t" \
1180  "pushl 24(%%eax)\n\t" \
1181  "pushl 20(%%eax)\n\t" \
1182  "pushl 16(%%eax)\n\t" \
1183  "pushl 12(%%eax)\n\t" \
1184  "pushl 8(%%eax)\n\t" \
1185  "pushl 4(%%eax)\n\t" \
1186  "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1187  VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
1188  : /*out*/ "=a"( _res ) \
1189  : /*in*/ "a"( &_argvec[0] ) \
1190  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" ); \
1191  lval = (__typeof__( lval ))_res; \
1192  } while ( 0 )
1193 
1194 #define CALL_FN_W_11W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11 ) \
1195  do { \
1196  volatile OrigFn _orig = ( orig ); \
1197  volatile unsigned long _argvec[12]; \
1198  volatile unsigned long _res; \
1199  _argvec[0] = (unsigned long)_orig.nraddr; \
1200  _argvec[1] = (unsigned long)( arg1 ); \
1201  _argvec[2] = (unsigned long)( arg2 ); \
1202  _argvec[3] = (unsigned long)( arg3 ); \
1203  _argvec[4] = (unsigned long)( arg4 ); \
1204  _argvec[5] = (unsigned long)( arg5 ); \
1205  _argvec[6] = (unsigned long)( arg6 ); \
1206  _argvec[7] = (unsigned long)( arg7 ); \
1207  _argvec[8] = (unsigned long)( arg8 ); \
1208  _argvec[9] = (unsigned long)( arg9 ); \
1209  _argvec[10] = (unsigned long)( arg10 ); \
1210  _argvec[11] = (unsigned long)( arg11 ); \
1211  __asm__ volatile( VALGRIND_ALIGN_STACK "subl $4, %%esp\n\t" \
1212  "pushl 44(%%eax)\n\t" \
1213  "pushl 40(%%eax)\n\t" \
1214  "pushl 36(%%eax)\n\t" \
1215  "pushl 32(%%eax)\n\t" \
1216  "pushl 28(%%eax)\n\t" \
1217  "pushl 24(%%eax)\n\t" \
1218  "pushl 20(%%eax)\n\t" \
1219  "pushl 16(%%eax)\n\t" \
1220  "pushl 12(%%eax)\n\t" \
1221  "pushl 8(%%eax)\n\t" \
1222  "pushl 4(%%eax)\n\t" \
1223  "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1224  VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
1225  : /*out*/ "=a"( _res ) \
1226  : /*in*/ "a"( &_argvec[0] ) \
1227  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" ); \
1228  lval = (__typeof__( lval ))_res; \
1229  } while ( 0 )
1230 
1231 #define CALL_FN_W_12W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12 ) \
1232  do { \
1233  volatile OrigFn _orig = ( orig ); \
1234  volatile unsigned long _argvec[13]; \
1235  volatile unsigned long _res; \
1236  _argvec[0] = (unsigned long)_orig.nraddr; \
1237  _argvec[1] = (unsigned long)( arg1 ); \
1238  _argvec[2] = (unsigned long)( arg2 ); \
1239  _argvec[3] = (unsigned long)( arg3 ); \
1240  _argvec[4] = (unsigned long)( arg4 ); \
1241  _argvec[5] = (unsigned long)( arg5 ); \
1242  _argvec[6] = (unsigned long)( arg6 ); \
1243  _argvec[7] = (unsigned long)( arg7 ); \
1244  _argvec[8] = (unsigned long)( arg8 ); \
1245  _argvec[9] = (unsigned long)( arg9 ); \
1246  _argvec[10] = (unsigned long)( arg10 ); \
1247  _argvec[11] = (unsigned long)( arg11 ); \
1248  _argvec[12] = (unsigned long)( arg12 ); \
1249  __asm__ volatile( VALGRIND_ALIGN_STACK "pushl 48(%%eax)\n\t" \
1250  "pushl 44(%%eax)\n\t" \
1251  "pushl 40(%%eax)\n\t" \
1252  "pushl 36(%%eax)\n\t" \
1253  "pushl 32(%%eax)\n\t" \
1254  "pushl 28(%%eax)\n\t" \
1255  "pushl 24(%%eax)\n\t" \
1256  "pushl 20(%%eax)\n\t" \
1257  "pushl 16(%%eax)\n\t" \
1258  "pushl 12(%%eax)\n\t" \
1259  "pushl 8(%%eax)\n\t" \
1260  "pushl 4(%%eax)\n\t" \
1261  "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1262  VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
1263  : /*out*/ "=a"( _res ) \
1264  : /*in*/ "a"( &_argvec[0] ) \
1265  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" ); \
1266  lval = (__typeof__( lval ))_res; \
1267  } while ( 0 )
1268 
1269 #endif /* PLAT_x86_linux || PLAT_x86_darwin */
1270 
1271 /* ------------------------ amd64-{linux,darwin} --------------- */
1272 
1273 #if defined( PLAT_amd64_linux ) || defined( PLAT_amd64_darwin )
1274 
1275 /* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
1276 
1277 /* These regs are trashed by the hidden call. */
1278 #define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11"
1279 
1280 /* This is all pretty complex. It's so as to make stack unwinding
1281  work reliably. See bug 243270. The basic problem is the sub and
1282  add of 128 of %rsp in all of the following macros. If gcc believes
1283  the CFA is in %rsp, then unwinding may fail, because what's at the
1284  CFA is not what gcc "expected" when it constructs the CFIs for the
1285  places where the macros are instantiated.
1286 
1287  But we can't just add a CFI annotation to increase the CFA offset
1288  by 128, to match the sub of 128 from %rsp, because we don't know
1289  whether gcc has chosen %rsp as the CFA at that point, or whether it
1290  has chosen some other register (eg, %rbp). In the latter case,
1291  adding a CFI annotation to change the CFA offset is simply wrong.
1292 
1293  So the solution is to get hold of the CFA using
1294  __builtin_dwarf_cfa(), put it in a known register, and add a
1295  CFI annotation to say what the register is. We choose %rbp for
1296  this (perhaps perversely), because:
1297 
1298  (1) %rbp is already subject to unwinding. If a new register was
1299  chosen then the unwinder would have to unwind it in all stack
1300  traces, which is expensive, and
1301 
1302  (2) %rbp is already subject to precise exception updates in the
1303  JIT. If a new register was chosen, we'd have to have precise
1304  exceptions for it too, which reduces performance of the
1305  generated code.
1306 
1307  However .. one extra complication. We can't just whack the result
1308  of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
1309  list of trashed registers at the end of the inline assembly
1310  fragments; gcc won't allow %rbp to appear in that list. Hence
1311  instead we need to stash %rbp in %r15 for the duration of the asm,
1312  and say that %r15 is trashed instead. gcc seems happy to go with
1313  that.
1314 
1315  Oh .. and this all needs to be conditionalised so that it is
1316  unchanged from before this commit, when compiled with older gccs
1317  that don't support __builtin_dwarf_cfa. Furthermore, since
1318  this header file is freestanding, it has to be independent of
1319  config.h, and so the following conditionalisation cannot depend on
1320  configure time checks.
1321 
1322  Although it's not clear from
1323  'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
1324  this expression excludes Darwin.
1325  .cfi directives in Darwin assembly appear to be completely
1326  different and I haven't investigated how they work.
1327 
1328  For even more entertainment value, note we have to use the
1329  completely undocumented __builtin_dwarf_cfa(), which appears to
1330  really compute the CFA, whereas __builtin_frame_address(0) claims
1331  to but actually doesn't. See
1332  https://bugs.kde.org/show_bug.cgi?id=243270#c47
1333 */
1334 #if defined( __GNUC__ ) && defined( __GCC_HAVE_DWARF2_CFI_ASM )
1335 #define __FRAME_POINTER , "r"( __builtin_dwarf_cfa() )
1336 #define VALGRIND_CFI_PROLOGUE \
1337  "movq %%rbp, %%r15\n\t" \
1338  "movq %2, %%rbp\n\t" \
1339  ".cfi_remember_state\n\t" \
1340  ".cfi_def_cfa rbp, 0\n\t"
1341 #define VALGRIND_CFI_EPILOGUE \
1342  "movq %%r15, %%rbp\n\t" \
1343  ".cfi_restore_state\n\t"
1344 #else
1345 #define __FRAME_POINTER
1346 #define VALGRIND_CFI_PROLOGUE
1347 #define VALGRIND_CFI_EPILOGUE
1348 #endif
1349 
1350 /* Macros to save and align the stack before making a function
1351  call and restore it afterwards as gcc may not keep the stack
1352  pointer aligned if it doesn't realise calls are being made
1353  to other functions. */
1354 
1355 #define VALGRIND_ALIGN_STACK \
1356  "movq %%rsp,%%r14\n\t" \
1357  "andq $0xfffffffffffffff0,%%rsp\n\t"
1358 #define VALGRIND_RESTORE_STACK "movq %%r14,%%rsp\n\t"
1359 
1360 /* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
1361  long) == 8. */
1362 
1363 /* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_
1364  macros. In order not to trash the stack redzone, we need to drop
1365  %rsp by 128 before the hidden call, and restore afterwards. The
1366  nastyness is that it is only by luck that the stack still appears
1367  to be unwindable during the hidden call - since then the behaviour
1368  of any routine using this macro does not match what the CFI data
1369  says. Sigh.
1370 
1371  Why is this important? Imagine that a wrapper has a stack
1372  allocated local, and passes to the hidden call, a pointer to it.
1373  Because gcc does not know about the hidden call, it may allocate
1374  that local in the redzone. Unfortunately the hidden call may then
1375  trash it before it comes to use it. So we must step clear of the
1376  redzone, for the duration of the hidden call, to make it safe.
1377 
1378  Probably the same problem afflicts the other redzone-style ABIs too
1379  (ppc64-linux); but for those, the stack is
1380  self describing (none of this CFI nonsense) so at least messing
1381  with the stack pointer doesn't give a danger of non-unwindable
1382  stack. */
1383 
1384 #define CALL_FN_W_v( lval, orig ) \
1385  do { \
1386  volatile OrigFn _orig = ( orig ); \
1387  volatile unsigned long _argvec[1]; \
1388  volatile unsigned long _res; \
1389  _argvec[0] = (unsigned long)_orig.nraddr; \
1390  __asm__ volatile( VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1391  "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1392  VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1393  : /*out*/ "=a"( _res ) \
1394  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1395  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" ); \
1396  lval = (__typeof__( lval ))_res; \
1397  } while ( 0 )
1398 
1399 #define CALL_FN_W_W( lval, orig, arg1 ) \
1400  do { \
1401  volatile OrigFn _orig = ( orig ); \
1402  volatile unsigned long _argvec[2]; \
1403  volatile unsigned long _res; \
1404  _argvec[0] = (unsigned long)_orig.nraddr; \
1405  _argvec[1] = (unsigned long)( arg1 ); \
1406  __asm__ volatile( VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1407  "movq 8(%%rax), %%rdi\n\t" \
1408  "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1409  VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1410  : /*out*/ "=a"( _res ) \
1411  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1412  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" ); \
1413  lval = (__typeof__( lval ))_res; \
1414  } while ( 0 )
1415 
1416 #define CALL_FN_W_WW( lval, orig, arg1, arg2 ) \
1417  do { \
1418  volatile OrigFn _orig = ( orig ); \
1419  volatile unsigned long _argvec[3]; \
1420  volatile unsigned long _res; \
1421  _argvec[0] = (unsigned long)_orig.nraddr; \
1422  _argvec[1] = (unsigned long)( arg1 ); \
1423  _argvec[2] = (unsigned long)( arg2 ); \
1424  __asm__ volatile( VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1425  "movq 16(%%rax), %%rsi\n\t" \
1426  "movq 8(%%rax), %%rdi\n\t" \
1427  "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1428  VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1429  : /*out*/ "=a"( _res ) \
1430  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1431  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" ); \
1432  lval = (__typeof__( lval ))_res; \
1433  } while ( 0 )
1434 
1435 #define CALL_FN_W_WWW( lval, orig, arg1, arg2, arg3 ) \
1436  do { \
1437  volatile OrigFn _orig = ( orig ); \
1438  volatile unsigned long _argvec[4]; \
1439  volatile unsigned long _res; \
1440  _argvec[0] = (unsigned long)_orig.nraddr; \
1441  _argvec[1] = (unsigned long)( arg1 ); \
1442  _argvec[2] = (unsigned long)( arg2 ); \
1443  _argvec[3] = (unsigned long)( arg3 ); \
1444  __asm__ volatile( VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1445  "movq 24(%%rax), %%rdx\n\t" \
1446  "movq 16(%%rax), %%rsi\n\t" \
1447  "movq 8(%%rax), %%rdi\n\t" \
1448  "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1449  VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1450  : /*out*/ "=a"( _res ) \
1451  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1452  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" ); \
1453  lval = (__typeof__( lval ))_res; \
1454  } while ( 0 )
1455 
1456 #define CALL_FN_W_WWWW( lval, orig, arg1, arg2, arg3, arg4 ) \
1457  do { \
1458  volatile OrigFn _orig = ( orig ); \
1459  volatile unsigned long _argvec[5]; \
1460  volatile unsigned long _res; \
1461  _argvec[0] = (unsigned long)_orig.nraddr; \
1462  _argvec[1] = (unsigned long)( arg1 ); \
1463  _argvec[2] = (unsigned long)( arg2 ); \
1464  _argvec[3] = (unsigned long)( arg3 ); \
1465  _argvec[4] = (unsigned long)( arg4 ); \
1466  __asm__ volatile( VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1467  "movq 32(%%rax), %%rcx\n\t" \
1468  "movq 24(%%rax), %%rdx\n\t" \
1469  "movq 16(%%rax), %%rsi\n\t" \
1470  "movq 8(%%rax), %%rdi\n\t" \
1471  "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1472  VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1473  : /*out*/ "=a"( _res ) \
1474  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1475  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" ); \
1476  lval = (__typeof__( lval ))_res; \
1477  } while ( 0 )
1478 
1479 #define CALL_FN_W_5W( lval, orig, arg1, arg2, arg3, arg4, arg5 ) \
1480  do { \
1481  volatile OrigFn _orig = ( orig ); \
1482  volatile unsigned long _argvec[6]; \
1483  volatile unsigned long _res; \
1484  _argvec[0] = (unsigned long)_orig.nraddr; \
1485  _argvec[1] = (unsigned long)( arg1 ); \
1486  _argvec[2] = (unsigned long)( arg2 ); \
1487  _argvec[3] = (unsigned long)( arg3 ); \
1488  _argvec[4] = (unsigned long)( arg4 ); \
1489  _argvec[5] = (unsigned long)( arg5 ); \
1490  __asm__ volatile( VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1491  "movq 40(%%rax), %%r8\n\t" \
1492  "movq 32(%%rax), %%rcx\n\t" \
1493  "movq 24(%%rax), %%rdx\n\t" \
1494  "movq 16(%%rax), %%rsi\n\t" \
1495  "movq 8(%%rax), %%rdi\n\t" \
1496  "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1497  VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1498  : /*out*/ "=a"( _res ) \
1499  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1500  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" ); \
1501  lval = (__typeof__( lval ))_res; \
1502  } while ( 0 )
1503 
1504 #define CALL_FN_W_6W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6 ) \
1505  do { \
1506  volatile OrigFn _orig = ( orig ); \
1507  volatile unsigned long _argvec[7]; \
1508  volatile unsigned long _res; \
1509  _argvec[0] = (unsigned long)_orig.nraddr; \
1510  _argvec[1] = (unsigned long)( arg1 ); \
1511  _argvec[2] = (unsigned long)( arg2 ); \
1512  _argvec[3] = (unsigned long)( arg3 ); \
1513  _argvec[4] = (unsigned long)( arg4 ); \
1514  _argvec[5] = (unsigned long)( arg5 ); \
1515  _argvec[6] = (unsigned long)( arg6 ); \
1516  __asm__ volatile( VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1517  "movq 48(%%rax), %%r9\n\t" \
1518  "movq 40(%%rax), %%r8\n\t" \
1519  "movq 32(%%rax), %%rcx\n\t" \
1520  "movq 24(%%rax), %%rdx\n\t" \
1521  "movq 16(%%rax), %%rsi\n\t" \
1522  "movq 8(%%rax), %%rdi\n\t" \
1523  "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1524  VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1525  : /*out*/ "=a"( _res ) \
1526  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1527  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" ); \
1528  lval = (__typeof__( lval ))_res; \
1529  } while ( 0 )
1530 
1531 #define CALL_FN_W_7W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7 ) \
1532  do { \
1533  volatile OrigFn _orig = ( orig ); \
1534  volatile unsigned long _argvec[8]; \
1535  volatile unsigned long _res; \
1536  _argvec[0] = (unsigned long)_orig.nraddr; \
1537  _argvec[1] = (unsigned long)( arg1 ); \
1538  _argvec[2] = (unsigned long)( arg2 ); \
1539  _argvec[3] = (unsigned long)( arg3 ); \
1540  _argvec[4] = (unsigned long)( arg4 ); \
1541  _argvec[5] = (unsigned long)( arg5 ); \
1542  _argvec[6] = (unsigned long)( arg6 ); \
1543  _argvec[7] = (unsigned long)( arg7 ); \
1544  __asm__ volatile( VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $136,%%rsp\n\t" \
1545  "pushq 56(%%rax)\n\t" \
1546  "movq 48(%%rax), %%r9\n\t" \
1547  "movq 40(%%rax), %%r8\n\t" \
1548  "movq 32(%%rax), %%rcx\n\t" \
1549  "movq 24(%%rax), %%rdx\n\t" \
1550  "movq 16(%%rax), %%rsi\n\t" \
1551  "movq 8(%%rax), %%rdi\n\t" \
1552  "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1553  VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1554  : /*out*/ "=a"( _res ) \
1555  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1556  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" ); \
1557  lval = (__typeof__( lval ))_res; \
1558  } while ( 0 )
1559 
1560 #define CALL_FN_W_8W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 ) \
1561  do { \
1562  volatile OrigFn _orig = ( orig ); \
1563  volatile unsigned long _argvec[9]; \
1564  volatile unsigned long _res; \
1565  _argvec[0] = (unsigned long)_orig.nraddr; \
1566  _argvec[1] = (unsigned long)( arg1 ); \
1567  _argvec[2] = (unsigned long)( arg2 ); \
1568  _argvec[3] = (unsigned long)( arg3 ); \
1569  _argvec[4] = (unsigned long)( arg4 ); \
1570  _argvec[5] = (unsigned long)( arg5 ); \
1571  _argvec[6] = (unsigned long)( arg6 ); \
1572  _argvec[7] = (unsigned long)( arg7 ); \
1573  _argvec[8] = (unsigned long)( arg8 ); \
1574  __asm__ volatile( VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1575  "pushq 64(%%rax)\n\t" \
1576  "pushq 56(%%rax)\n\t" \
1577  "movq 48(%%rax), %%r9\n\t" \
1578  "movq 40(%%rax), %%r8\n\t" \
1579  "movq 32(%%rax), %%rcx\n\t" \
1580  "movq 24(%%rax), %%rdx\n\t" \
1581  "movq 16(%%rax), %%rsi\n\t" \
1582  "movq 8(%%rax), %%rdi\n\t" \
1583  "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1584  VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1585  : /*out*/ "=a"( _res ) \
1586  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1587  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" ); \
1588  lval = (__typeof__( lval ))_res; \
1589  } while ( 0 )
1590 
1591 #define CALL_FN_W_9W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9 ) \
1592  do { \
1593  volatile OrigFn _orig = ( orig ); \
1594  volatile unsigned long _argvec[10]; \
1595  volatile unsigned long _res; \
1596  _argvec[0] = (unsigned long)_orig.nraddr; \
1597  _argvec[1] = (unsigned long)( arg1 ); \
1598  _argvec[2] = (unsigned long)( arg2 ); \
1599  _argvec[3] = (unsigned long)( arg3 ); \
1600  _argvec[4] = (unsigned long)( arg4 ); \
1601  _argvec[5] = (unsigned long)( arg5 ); \
1602  _argvec[6] = (unsigned long)( arg6 ); \
1603  _argvec[7] = (unsigned long)( arg7 ); \
1604  _argvec[8] = (unsigned long)( arg8 ); \
1605  _argvec[9] = (unsigned long)( arg9 ); \
1606  __asm__ volatile( VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $136,%%rsp\n\t" \
1607  "pushq 72(%%rax)\n\t" \
1608  "pushq 64(%%rax)\n\t" \
1609  "pushq 56(%%rax)\n\t" \
1610  "movq 48(%%rax), %%r9\n\t" \
1611  "movq 40(%%rax), %%r8\n\t" \
1612  "movq 32(%%rax), %%rcx\n\t" \
1613  "movq 24(%%rax), %%rdx\n\t" \
1614  "movq 16(%%rax), %%rsi\n\t" \
1615  "movq 8(%%rax), %%rdi\n\t" \
1616  "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1617  VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1618  : /*out*/ "=a"( _res ) \
1619  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1620  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" ); \
1621  lval = (__typeof__( lval ))_res; \
1622  } while ( 0 )
1623 
1624 #define CALL_FN_W_10W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10 ) \
1625  do { \
1626  volatile OrigFn _orig = ( orig ); \
1627  volatile unsigned long _argvec[11]; \
1628  volatile unsigned long _res; \
1629  _argvec[0] = (unsigned long)_orig.nraddr; \
1630  _argvec[1] = (unsigned long)( arg1 ); \
1631  _argvec[2] = (unsigned long)( arg2 ); \
1632  _argvec[3] = (unsigned long)( arg3 ); \
1633  _argvec[4] = (unsigned long)( arg4 ); \
1634  _argvec[5] = (unsigned long)( arg5 ); \
1635  _argvec[6] = (unsigned long)( arg6 ); \
1636  _argvec[7] = (unsigned long)( arg7 ); \
1637  _argvec[8] = (unsigned long)( arg8 ); \
1638  _argvec[9] = (unsigned long)( arg9 ); \
1639  _argvec[10] = (unsigned long)( arg10 ); \
1640  __asm__ volatile( VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1641  "pushq 80(%%rax)\n\t" \
1642  "pushq 72(%%rax)\n\t" \
1643  "pushq 64(%%rax)\n\t" \
1644  "pushq 56(%%rax)\n\t" \
1645  "movq 48(%%rax), %%r9\n\t" \
1646  "movq 40(%%rax), %%r8\n\t" \
1647  "movq 32(%%rax), %%rcx\n\t" \
1648  "movq 24(%%rax), %%rdx\n\t" \
1649  "movq 16(%%rax), %%rsi\n\t" \
1650  "movq 8(%%rax), %%rdi\n\t" \
1651  "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1652  VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1653  : /*out*/ "=a"( _res ) \
1654  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1655  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" ); \
1656  lval = (__typeof__( lval ))_res; \
1657  } while ( 0 )
1658 
1659 #define CALL_FN_W_11W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11 ) \
1660  do { \
1661  volatile OrigFn _orig = ( orig ); \
1662  volatile unsigned long _argvec[12]; \
1663  volatile unsigned long _res; \
1664  _argvec[0] = (unsigned long)_orig.nraddr; \
1665  _argvec[1] = (unsigned long)( arg1 ); \
1666  _argvec[2] = (unsigned long)( arg2 ); \
1667  _argvec[3] = (unsigned long)( arg3 ); \
1668  _argvec[4] = (unsigned long)( arg4 ); \
1669  _argvec[5] = (unsigned long)( arg5 ); \
1670  _argvec[6] = (unsigned long)( arg6 ); \
1671  _argvec[7] = (unsigned long)( arg7 ); \
1672  _argvec[8] = (unsigned long)( arg8 ); \
1673  _argvec[9] = (unsigned long)( arg9 ); \
1674  _argvec[10] = (unsigned long)( arg10 ); \
1675  _argvec[11] = (unsigned long)( arg11 ); \
1676  __asm__ volatile( VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $136,%%rsp\n\t" \
1677  "pushq 88(%%rax)\n\t" \
1678  "pushq 80(%%rax)\n\t" \
1679  "pushq 72(%%rax)\n\t" \
1680  "pushq 64(%%rax)\n\t" \
1681  "pushq 56(%%rax)\n\t" \
1682  "movq 48(%%rax), %%r9\n\t" \
1683  "movq 40(%%rax), %%r8\n\t" \
1684  "movq 32(%%rax), %%rcx\n\t" \
1685  "movq 24(%%rax), %%rdx\n\t" \
1686  "movq 16(%%rax), %%rsi\n\t" \
1687  "movq 8(%%rax), %%rdi\n\t" \
1688  "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1689  VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1690  : /*out*/ "=a"( _res ) \
1691  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1692  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" ); \
1693  lval = (__typeof__( lval ))_res; \
1694  } while ( 0 )
1695 
1696 #define CALL_FN_W_12W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12 ) \
1697  do { \
1698  volatile OrigFn _orig = ( orig ); \
1699  volatile unsigned long _argvec[13]; \
1700  volatile unsigned long _res; \
1701  _argvec[0] = (unsigned long)_orig.nraddr; \
1702  _argvec[1] = (unsigned long)( arg1 ); \
1703  _argvec[2] = (unsigned long)( arg2 ); \
1704  _argvec[3] = (unsigned long)( arg3 ); \
1705  _argvec[4] = (unsigned long)( arg4 ); \
1706  _argvec[5] = (unsigned long)( arg5 ); \
1707  _argvec[6] = (unsigned long)( arg6 ); \
1708  _argvec[7] = (unsigned long)( arg7 ); \
1709  _argvec[8] = (unsigned long)( arg8 ); \
1710  _argvec[9] = (unsigned long)( arg9 ); \
1711  _argvec[10] = (unsigned long)( arg10 ); \
1712  _argvec[11] = (unsigned long)( arg11 ); \
1713  _argvec[12] = (unsigned long)( arg12 ); \
1714  __asm__ volatile( VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1715  "pushq 96(%%rax)\n\t" \
1716  "pushq 88(%%rax)\n\t" \
1717  "pushq 80(%%rax)\n\t" \
1718  "pushq 72(%%rax)\n\t" \
1719  "pushq 64(%%rax)\n\t" \
1720  "pushq 56(%%rax)\n\t" \
1721  "movq 48(%%rax), %%r9\n\t" \
1722  "movq 40(%%rax), %%r8\n\t" \
1723  "movq 32(%%rax), %%rcx\n\t" \
1724  "movq 24(%%rax), %%rdx\n\t" \
1725  "movq 16(%%rax), %%rsi\n\t" \
1726  "movq 8(%%rax), %%rdi\n\t" \
1727  "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1728  VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1729  : /*out*/ "=a"( _res ) \
1730  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1731  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" ); \
1732  lval = (__typeof__( lval ))_res; \
1733  } while ( 0 )
1734 
1735 #endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
1736 
1737 /* ------------------------ ppc32-linux ------------------------ */
1738 
1739 #if defined( PLAT_ppc32_linux )
1740 
1741 /* This is useful for finding out about the on-stack stuff:
1742 
1743  extern int f9 ( int,int,int,int,int,int,int,int,int );
1744  extern int f10 ( int,int,int,int,int,int,int,int,int,int );
1745  extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
1746  extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
1747 
1748  int g9 ( void ) {
1749  return f9(11,22,33,44,55,66,77,88,99);
1750  }
1751  int g10 ( void ) {
1752  return f10(11,22,33,44,55,66,77,88,99,110);
1753  }
1754  int g11 ( void ) {
1755  return f11(11,22,33,44,55,66,77,88,99,110,121);
1756  }
1757  int g12 ( void ) {
1758  return f12(11,22,33,44,55,66,77,88,99,110,121,132);
1759  }
1760 */
1761 
1762 /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
1763 
1764 /* These regs are trashed by the hidden call. */
1765 #define __CALLER_SAVED_REGS \
1766  "lr", "ctr", "xer", "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", "r0", "r2", "r3", "r4", "r5", "r6", \
1767  "r7", "r8", "r9", "r10", "r11", "r12", "r13"
1768 
1769 /* Macros to save and align the stack before making a function
1770  call and restore it afterwards as gcc may not keep the stack
1771  pointer aligned if it doesn't realise calls are being made
1772  to other functions. */
1773 
1774 #define VALGRIND_ALIGN_STACK \
1775  "mr 28,1\n\t" \
1776  "rlwinm 1,1,0,0,27\n\t"
1777 #define VALGRIND_RESTORE_STACK "mr 1,28\n\t"
1778 
1779 /* These CALL_FN_ macros assume that on ppc32-linux,
1780  sizeof(unsigned long) == 4. */
1781 
1782 #define CALL_FN_W_v( lval, orig ) \
1783  do { \
1784  volatile OrigFn _orig = ( orig ); \
1785  volatile unsigned long _argvec[1]; \
1786  volatile unsigned long _res; \
1787  _argvec[0] = (unsigned long)_orig.nraddr; \
1788  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1789  "lwz 11,0(11)\n\t" /* target->r11 */ \
1790  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1791  : /*out*/ "=r"( _res ) \
1792  : /*in*/ "r"( &_argvec[0] ) \
1793  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
1794  lval = (__typeof__( lval ))_res; \
1795  } while ( 0 )
1796 
1797 #define CALL_FN_W_W( lval, orig, arg1 ) \
1798  do { \
1799  volatile OrigFn _orig = ( orig ); \
1800  volatile unsigned long _argvec[2]; \
1801  volatile unsigned long _res; \
1802  _argvec[0] = (unsigned long)_orig.nraddr; \
1803  _argvec[1] = (unsigned long)arg1; \
1804  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1805  "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1806  "lwz 11,0(11)\n\t" /* target->r11 */ \
1807  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1808  : /*out*/ "=r"( _res ) \
1809  : /*in*/ "r"( &_argvec[0] ) \
1810  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
1811  lval = (__typeof__( lval ))_res; \
1812  } while ( 0 )
1813 
1814 #define CALL_FN_W_WW( lval, orig, arg1, arg2 ) \
1815  do { \
1816  volatile OrigFn _orig = ( orig ); \
1817  volatile unsigned long _argvec[3]; \
1818  volatile unsigned long _res; \
1819  _argvec[0] = (unsigned long)_orig.nraddr; \
1820  _argvec[1] = (unsigned long)arg1; \
1821  _argvec[2] = (unsigned long)arg2; \
1822  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1823  "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1824  "lwz 4,8(11)\n\t" \
1825  "lwz 11,0(11)\n\t" /* target->r11 */ \
1826  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1827  : /*out*/ "=r"( _res ) \
1828  : /*in*/ "r"( &_argvec[0] ) \
1829  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
1830  lval = (__typeof__( lval ))_res; \
1831  } while ( 0 )
1832 
1833 #define CALL_FN_W_WWW( lval, orig, arg1, arg2, arg3 ) \
1834  do { \
1835  volatile OrigFn _orig = ( orig ); \
1836  volatile unsigned long _argvec[4]; \
1837  volatile unsigned long _res; \
1838  _argvec[0] = (unsigned long)_orig.nraddr; \
1839  _argvec[1] = (unsigned long)arg1; \
1840  _argvec[2] = (unsigned long)arg2; \
1841  _argvec[3] = (unsigned long)arg3; \
1842  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1843  "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1844  "lwz 4,8(11)\n\t" \
1845  "lwz 5,12(11)\n\t" \
1846  "lwz 11,0(11)\n\t" /* target->r11 */ \
1847  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1848  : /*out*/ "=r"( _res ) \
1849  : /*in*/ "r"( &_argvec[0] ) \
1850  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
1851  lval = (__typeof__( lval ))_res; \
1852  } while ( 0 )
1853 
1854 #define CALL_FN_W_WWWW( lval, orig, arg1, arg2, arg3, arg4 ) \
1855  do { \
1856  volatile OrigFn _orig = ( orig ); \
1857  volatile unsigned long _argvec[5]; \
1858  volatile unsigned long _res; \
1859  _argvec[0] = (unsigned long)_orig.nraddr; \
1860  _argvec[1] = (unsigned long)arg1; \
1861  _argvec[2] = (unsigned long)arg2; \
1862  _argvec[3] = (unsigned long)arg3; \
1863  _argvec[4] = (unsigned long)arg4; \
1864  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1865  "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1866  "lwz 4,8(11)\n\t" \
1867  "lwz 5,12(11)\n\t" \
1868  "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1869  "lwz 11,0(11)\n\t" /* target->r11 */ \
1870  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1871  : /*out*/ "=r"( _res ) \
1872  : /*in*/ "r"( &_argvec[0] ) \
1873  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
1874  lval = (__typeof__( lval ))_res; \
1875  } while ( 0 )
1876 
1877 #define CALL_FN_W_5W( lval, orig, arg1, arg2, arg3, arg4, arg5 ) \
1878  do { \
1879  volatile OrigFn _orig = ( orig ); \
1880  volatile unsigned long _argvec[6]; \
1881  volatile unsigned long _res; \
1882  _argvec[0] = (unsigned long)_orig.nraddr; \
1883  _argvec[1] = (unsigned long)arg1; \
1884  _argvec[2] = (unsigned long)arg2; \
1885  _argvec[3] = (unsigned long)arg3; \
1886  _argvec[4] = (unsigned long)arg4; \
1887  _argvec[5] = (unsigned long)arg5; \
1888  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1889  "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1890  "lwz 4,8(11)\n\t" \
1891  "lwz 5,12(11)\n\t" \
1892  "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1893  "lwz 7,20(11)\n\t" \
1894  "lwz 11,0(11)\n\t" /* target->r11 */ \
1895  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1896  : /*out*/ "=r"( _res ) \
1897  : /*in*/ "r"( &_argvec[0] ) \
1898  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
1899  lval = (__typeof__( lval ))_res; \
1900  } while ( 0 )
1901 
1902 #define CALL_FN_W_6W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6 ) \
1903  do { \
1904  volatile OrigFn _orig = ( orig ); \
1905  volatile unsigned long _argvec[7]; \
1906  volatile unsigned long _res; \
1907  _argvec[0] = (unsigned long)_orig.nraddr; \
1908  _argvec[1] = (unsigned long)arg1; \
1909  _argvec[2] = (unsigned long)arg2; \
1910  _argvec[3] = (unsigned long)arg3; \
1911  _argvec[4] = (unsigned long)arg4; \
1912  _argvec[5] = (unsigned long)arg5; \
1913  _argvec[6] = (unsigned long)arg6; \
1914  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1915  "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1916  "lwz 4,8(11)\n\t" \
1917  "lwz 5,12(11)\n\t" \
1918  "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1919  "lwz 7,20(11)\n\t" \
1920  "lwz 8,24(11)\n\t" \
1921  "lwz 11,0(11)\n\t" /* target->r11 */ \
1922  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1923  : /*out*/ "=r"( _res ) \
1924  : /*in*/ "r"( &_argvec[0] ) \
1925  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
1926  lval = (__typeof__( lval ))_res; \
1927  } while ( 0 )
1928 
1929 #define CALL_FN_W_7W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7 ) \
1930  do { \
1931  volatile OrigFn _orig = ( orig ); \
1932  volatile unsigned long _argvec[8]; \
1933  volatile unsigned long _res; \
1934  _argvec[0] = (unsigned long)_orig.nraddr; \
1935  _argvec[1] = (unsigned long)arg1; \
1936  _argvec[2] = (unsigned long)arg2; \
1937  _argvec[3] = (unsigned long)arg3; \
1938  _argvec[4] = (unsigned long)arg4; \
1939  _argvec[5] = (unsigned long)arg5; \
1940  _argvec[6] = (unsigned long)arg6; \
1941  _argvec[7] = (unsigned long)arg7; \
1942  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1943  "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1944  "lwz 4,8(11)\n\t" \
1945  "lwz 5,12(11)\n\t" \
1946  "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1947  "lwz 7,20(11)\n\t" \
1948  "lwz 8,24(11)\n\t" \
1949  "lwz 9,28(11)\n\t" \
1950  "lwz 11,0(11)\n\t" /* target->r11 */ \
1951  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1952  : /*out*/ "=r"( _res ) \
1953  : /*in*/ "r"( &_argvec[0] ) \
1954  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
1955  lval = (__typeof__( lval ))_res; \
1956  } while ( 0 )
1957 
1958 #define CALL_FN_W_8W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 ) \
1959  do { \
1960  volatile OrigFn _orig = ( orig ); \
1961  volatile unsigned long _argvec[9]; \
1962  volatile unsigned long _res; \
1963  _argvec[0] = (unsigned long)_orig.nraddr; \
1964  _argvec[1] = (unsigned long)arg1; \
1965  _argvec[2] = (unsigned long)arg2; \
1966  _argvec[3] = (unsigned long)arg3; \
1967  _argvec[4] = (unsigned long)arg4; \
1968  _argvec[5] = (unsigned long)arg5; \
1969  _argvec[6] = (unsigned long)arg6; \
1970  _argvec[7] = (unsigned long)arg7; \
1971  _argvec[8] = (unsigned long)arg8; \
1972  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1973  "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1974  "lwz 4,8(11)\n\t" \
1975  "lwz 5,12(11)\n\t" \
1976  "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1977  "lwz 7,20(11)\n\t" \
1978  "lwz 8,24(11)\n\t" \
1979  "lwz 9,28(11)\n\t" \
1980  "lwz 10,32(11)\n\t" /* arg8->r10 */ \
1981  "lwz 11,0(11)\n\t" /* target->r11 */ \
1982  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1983  : /*out*/ "=r"( _res ) \
1984  : /*in*/ "r"( &_argvec[0] ) \
1985  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
1986  lval = (__typeof__( lval ))_res; \
1987  } while ( 0 )
1988 
1989 #define CALL_FN_W_9W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9 ) \
1990  do { \
1991  volatile OrigFn _orig = ( orig ); \
1992  volatile unsigned long _argvec[10]; \
1993  volatile unsigned long _res; \
1994  _argvec[0] = (unsigned long)_orig.nraddr; \
1995  _argvec[1] = (unsigned long)arg1; \
1996  _argvec[2] = (unsigned long)arg2; \
1997  _argvec[3] = (unsigned long)arg3; \
1998  _argvec[4] = (unsigned long)arg4; \
1999  _argvec[5] = (unsigned long)arg5; \
2000  _argvec[6] = (unsigned long)arg6; \
2001  _argvec[7] = (unsigned long)arg7; \
2002  _argvec[8] = (unsigned long)arg8; \
2003  _argvec[9] = (unsigned long)arg9; \
2004  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2005  "addi 1,1,-16\n\t" /* arg9 */ \
2006  "lwz 3,36(11)\n\t" \
2007  "stw 3,8(1)\n\t" /* args1-8 */ \
2008  "lwz 3,4(11)\n\t" /* arg1->r3 */ \
2009  "lwz 4,8(11)\n\t" \
2010  "lwz 5,12(11)\n\t" \
2011  "lwz 6,16(11)\n\t" /* arg4->r6 */ \
2012  "lwz 7,20(11)\n\t" \
2013  "lwz 8,24(11)\n\t" \
2014  "lwz 9,28(11)\n\t" \
2015  "lwz 10,32(11)\n\t" /* arg8->r10 */ \
2016  "lwz 11,0(11)\n\t" /* target->r11 */ \
2017  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
2018  : /*out*/ "=r"( _res ) \
2019  : /*in*/ "r"( &_argvec[0] ) \
2020  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2021  lval = (__typeof__( lval ))_res; \
2022  } while ( 0 )
2023 
2024 #define CALL_FN_W_10W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10 ) \
2025  do { \
2026  volatile OrigFn _orig = ( orig ); \
2027  volatile unsigned long _argvec[11]; \
2028  volatile unsigned long _res; \
2029  _argvec[0] = (unsigned long)_orig.nraddr; \
2030  _argvec[1] = (unsigned long)arg1; \
2031  _argvec[2] = (unsigned long)arg2; \
2032  _argvec[3] = (unsigned long)arg3; \
2033  _argvec[4] = (unsigned long)arg4; \
2034  _argvec[5] = (unsigned long)arg5; \
2035  _argvec[6] = (unsigned long)arg6; \
2036  _argvec[7] = (unsigned long)arg7; \
2037  _argvec[8] = (unsigned long)arg8; \
2038  _argvec[9] = (unsigned long)arg9; \
2039  _argvec[10] = (unsigned long)arg10; \
2040  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2041  "addi 1,1,-16\n\t" /* arg10 */ \
2042  "lwz 3,40(11)\n\t" \
2043  "stw 3,12(1)\n\t" /* arg9 */ \
2044  "lwz 3,36(11)\n\t" \
2045  "stw 3,8(1)\n\t" /* args1-8 */ \
2046  "lwz 3,4(11)\n\t" /* arg1->r3 */ \
2047  "lwz 4,8(11)\n\t" \
2048  "lwz 5,12(11)\n\t" \
2049  "lwz 6,16(11)\n\t" /* arg4->r6 */ \
2050  "lwz 7,20(11)\n\t" \
2051  "lwz 8,24(11)\n\t" \
2052  "lwz 9,28(11)\n\t" \
2053  "lwz 10,32(11)\n\t" /* arg8->r10 */ \
2054  "lwz 11,0(11)\n\t" /* target->r11 */ \
2055  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
2056  : /*out*/ "=r"( _res ) \
2057  : /*in*/ "r"( &_argvec[0] ) \
2058  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2059  lval = (__typeof__( lval ))_res; \
2060  } while ( 0 )
2061 
2062 #define CALL_FN_W_11W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11 ) \
2063  do { \
2064  volatile OrigFn _orig = ( orig ); \
2065  volatile unsigned long _argvec[12]; \
2066  volatile unsigned long _res; \
2067  _argvec[0] = (unsigned long)_orig.nraddr; \
2068  _argvec[1] = (unsigned long)arg1; \
2069  _argvec[2] = (unsigned long)arg2; \
2070  _argvec[3] = (unsigned long)arg3; \
2071  _argvec[4] = (unsigned long)arg4; \
2072  _argvec[5] = (unsigned long)arg5; \
2073  _argvec[6] = (unsigned long)arg6; \
2074  _argvec[7] = (unsigned long)arg7; \
2075  _argvec[8] = (unsigned long)arg8; \
2076  _argvec[9] = (unsigned long)arg9; \
2077  _argvec[10] = (unsigned long)arg10; \
2078  _argvec[11] = (unsigned long)arg11; \
2079  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2080  "addi 1,1,-32\n\t" /* arg11 */ \
2081  "lwz 3,44(11)\n\t" \
2082  "stw 3,16(1)\n\t" /* arg10 */ \
2083  "lwz 3,40(11)\n\t" \
2084  "stw 3,12(1)\n\t" /* arg9 */ \
2085  "lwz 3,36(11)\n\t" \
2086  "stw 3,8(1)\n\t" /* args1-8 */ \
2087  "lwz 3,4(11)\n\t" /* arg1->r3 */ \
2088  "lwz 4,8(11)\n\t" \
2089  "lwz 5,12(11)\n\t" \
2090  "lwz 6,16(11)\n\t" /* arg4->r6 */ \
2091  "lwz 7,20(11)\n\t" \
2092  "lwz 8,24(11)\n\t" \
2093  "lwz 9,28(11)\n\t" \
2094  "lwz 10,32(11)\n\t" /* arg8->r10 */ \
2095  "lwz 11,0(11)\n\t" /* target->r11 */ \
2096  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
2097  : /*out*/ "=r"( _res ) \
2098  : /*in*/ "r"( &_argvec[0] ) \
2099  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2100  lval = (__typeof__( lval ))_res; \
2101  } while ( 0 )
2102 
2103 #define CALL_FN_W_12W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12 ) \
2104  do { \
2105  volatile OrigFn _orig = ( orig ); \
2106  volatile unsigned long _argvec[13]; \
2107  volatile unsigned long _res; \
2108  _argvec[0] = (unsigned long)_orig.nraddr; \
2109  _argvec[1] = (unsigned long)arg1; \
2110  _argvec[2] = (unsigned long)arg2; \
2111  _argvec[3] = (unsigned long)arg3; \
2112  _argvec[4] = (unsigned long)arg4; \
2113  _argvec[5] = (unsigned long)arg5; \
2114  _argvec[6] = (unsigned long)arg6; \
2115  _argvec[7] = (unsigned long)arg7; \
2116  _argvec[8] = (unsigned long)arg8; \
2117  _argvec[9] = (unsigned long)arg9; \
2118  _argvec[10] = (unsigned long)arg10; \
2119  _argvec[11] = (unsigned long)arg11; \
2120  _argvec[12] = (unsigned long)arg12; \
2121  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2122  "addi 1,1,-32\n\t" /* arg12 */ \
2123  "lwz 3,48(11)\n\t" \
2124  "stw 3,20(1)\n\t" /* arg11 */ \
2125  "lwz 3,44(11)\n\t" \
2126  "stw 3,16(1)\n\t" /* arg10 */ \
2127  "lwz 3,40(11)\n\t" \
2128  "stw 3,12(1)\n\t" /* arg9 */ \
2129  "lwz 3,36(11)\n\t" \
2130  "stw 3,8(1)\n\t" /* args1-8 */ \
2131  "lwz 3,4(11)\n\t" /* arg1->r3 */ \
2132  "lwz 4,8(11)\n\t" \
2133  "lwz 5,12(11)\n\t" \
2134  "lwz 6,16(11)\n\t" /* arg4->r6 */ \
2135  "lwz 7,20(11)\n\t" \
2136  "lwz 8,24(11)\n\t" \
2137  "lwz 9,28(11)\n\t" \
2138  "lwz 10,32(11)\n\t" /* arg8->r10 */ \
2139  "lwz 11,0(11)\n\t" /* target->r11 */ \
2140  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
2141  : /*out*/ "=r"( _res ) \
2142  : /*in*/ "r"( &_argvec[0] ) \
2143  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2144  lval = (__typeof__( lval ))_res; \
2145  } while ( 0 )
2146 
2147 #endif /* PLAT_ppc32_linux */
2148 
2149 /* ------------------------ ppc64-linux ------------------------ */
2150 
2151 #if defined( PLAT_ppc64_linux )
2152 
2153 /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
2154 
2155 /* These regs are trashed by the hidden call. */
2156 #define __CALLER_SAVED_REGS \
2157  "lr", "ctr", "xer", "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", "r0", "r2", "r3", "r4", "r5", "r6", \
2158  "r7", "r8", "r9", "r10", "r11", "r12", "r13"
2159 
2160 /* Macros to save and align the stack before making a function
2161  call and restore it afterwards as gcc may not keep the stack
2162  pointer aligned if it doesn't realise calls are being made
2163  to other functions. */
2164 
2165 #define VALGRIND_ALIGN_STACK \
2166  "mr 28,1\n\t" \
2167  "rldicr 1,1,0,59\n\t"
2168 #define VALGRIND_RESTORE_STACK "mr 1,28\n\t"
2169 
2170 /* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
2171  long) == 8. */
2172 
2173 #define CALL_FN_W_v( lval, orig ) \
2174  do { \
2175  volatile OrigFn _orig = ( orig ); \
2176  volatile unsigned long _argvec[3 + 0]; \
2177  volatile unsigned long _res; \
2178  /* _argvec[0] holds current r2 across the call */ \
2179  _argvec[1] = (unsigned long)_orig.r2; \
2180  _argvec[2] = (unsigned long)_orig.nraddr; \
2181  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2182  "std 2,-16(11)\n\t" /* save tocptr */ \
2183  "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2184  "ld 11, 0(11)\n\t" /* target->r11 */ \
2185  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2186  "mr %0,3\n\t" \
2187  "ld 2,-16(11)\n\t" /* restore tocptr */ \
2188  VALGRIND_RESTORE_STACK \
2189  : /*out*/ "=r"( _res ) \
2190  : /*in*/ "r"( &_argvec[2] ) \
2191  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2192  lval = (__typeof__( lval ))_res; \
2193  } while ( 0 )
2194 
2195 #define CALL_FN_W_W( lval, orig, arg1 ) \
2196  do { \
2197  volatile OrigFn _orig = ( orig ); \
2198  volatile unsigned long _argvec[3 + 1]; \
2199  volatile unsigned long _res; \
2200  /* _argvec[0] holds current r2 across the call */ \
2201  _argvec[1] = (unsigned long)_orig.r2; \
2202  _argvec[2] = (unsigned long)_orig.nraddr; \
2203  _argvec[2 + 1] = (unsigned long)arg1; \
2204  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2205  "std 2,-16(11)\n\t" /* save tocptr */ \
2206  "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2207  "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2208  "ld 11, 0(11)\n\t" /* target->r11 */ \
2209  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2210  "mr %0,3\n\t" \
2211  "ld 2,-16(11)\n\t" /* restore tocptr */ \
2212  VALGRIND_RESTORE_STACK \
2213  : /*out*/ "=r"( _res ) \
2214  : /*in*/ "r"( &_argvec[2] ) \
2215  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2216  lval = (__typeof__( lval ))_res; \
2217  } while ( 0 )
2218 
2219 #define CALL_FN_W_WW( lval, orig, arg1, arg2 ) \
2220  do { \
2221  volatile OrigFn _orig = ( orig ); \
2222  volatile unsigned long _argvec[3 + 2]; \
2223  volatile unsigned long _res; \
2224  /* _argvec[0] holds current r2 across the call */ \
2225  _argvec[1] = (unsigned long)_orig.r2; \
2226  _argvec[2] = (unsigned long)_orig.nraddr; \
2227  _argvec[2 + 1] = (unsigned long)arg1; \
2228  _argvec[2 + 2] = (unsigned long)arg2; \
2229  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2230  "std 2,-16(11)\n\t" /* save tocptr */ \
2231  "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2232  "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2233  "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2234  "ld 11, 0(11)\n\t" /* target->r11 */ \
2235  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2236  "mr %0,3\n\t" \
2237  "ld 2,-16(11)\n\t" /* restore tocptr */ \
2238  VALGRIND_RESTORE_STACK \
2239  : /*out*/ "=r"( _res ) \
2240  : /*in*/ "r"( &_argvec[2] ) \
2241  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2242  lval = (__typeof__( lval ))_res; \
2243  } while ( 0 )
2244 
2245 #define CALL_FN_W_WWW( lval, orig, arg1, arg2, arg3 ) \
2246  do { \
2247  volatile OrigFn _orig = ( orig ); \
2248  volatile unsigned long _argvec[3 + 3]; \
2249  volatile unsigned long _res; \
2250  /* _argvec[0] holds current r2 across the call */ \
2251  _argvec[1] = (unsigned long)_orig.r2; \
2252  _argvec[2] = (unsigned long)_orig.nraddr; \
2253  _argvec[2 + 1] = (unsigned long)arg1; \
2254  _argvec[2 + 2] = (unsigned long)arg2; \
2255  _argvec[2 + 3] = (unsigned long)arg3; \
2256  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2257  "std 2,-16(11)\n\t" /* save tocptr */ \
2258  "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2259  "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2260  "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2261  "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2262  "ld 11, 0(11)\n\t" /* target->r11 */ \
2263  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2264  "mr %0,3\n\t" \
2265  "ld 2,-16(11)\n\t" /* restore tocptr */ \
2266  VALGRIND_RESTORE_STACK \
2267  : /*out*/ "=r"( _res ) \
2268  : /*in*/ "r"( &_argvec[2] ) \
2269  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2270  lval = (__typeof__( lval ))_res; \
2271  } while ( 0 )
2272 
2273 #define CALL_FN_W_WWWW( lval, orig, arg1, arg2, arg3, arg4 ) \
2274  do { \
2275  volatile OrigFn _orig = ( orig ); \
2276  volatile unsigned long _argvec[3 + 4]; \
2277  volatile unsigned long _res; \
2278  /* _argvec[0] holds current r2 across the call */ \
2279  _argvec[1] = (unsigned long)_orig.r2; \
2280  _argvec[2] = (unsigned long)_orig.nraddr; \
2281  _argvec[2 + 1] = (unsigned long)arg1; \
2282  _argvec[2 + 2] = (unsigned long)arg2; \
2283  _argvec[2 + 3] = (unsigned long)arg3; \
2284  _argvec[2 + 4] = (unsigned long)arg4; \
2285  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2286  "std 2,-16(11)\n\t" /* save tocptr */ \
2287  "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2288  "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2289  "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2290  "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2291  "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2292  "ld 11, 0(11)\n\t" /* target->r11 */ \
2293  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2294  "mr %0,3\n\t" \
2295  "ld 2,-16(11)\n\t" /* restore tocptr */ \
2296  VALGRIND_RESTORE_STACK \
2297  : /*out*/ "=r"( _res ) \
2298  : /*in*/ "r"( &_argvec[2] ) \
2299  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2300  lval = (__typeof__( lval ))_res; \
2301  } while ( 0 )
2302 
2303 #define CALL_FN_W_5W( lval, orig, arg1, arg2, arg3, arg4, arg5 ) \
2304  do { \
2305  volatile OrigFn _orig = ( orig ); \
2306  volatile unsigned long _argvec[3 + 5]; \
2307  volatile unsigned long _res; \
2308  /* _argvec[0] holds current r2 across the call */ \
2309  _argvec[1] = (unsigned long)_orig.r2; \
2310  _argvec[2] = (unsigned long)_orig.nraddr; \
2311  _argvec[2 + 1] = (unsigned long)arg1; \
2312  _argvec[2 + 2] = (unsigned long)arg2; \
2313  _argvec[2 + 3] = (unsigned long)arg3; \
2314  _argvec[2 + 4] = (unsigned long)arg4; \
2315  _argvec[2 + 5] = (unsigned long)arg5; \
2316  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2317  "std 2,-16(11)\n\t" /* save tocptr */ \
2318  "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2319  "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2320  "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2321  "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2322  "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2323  "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2324  "ld 11, 0(11)\n\t" /* target->r11 */ \
2325  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2326  "mr %0,3\n\t" \
2327  "ld 2,-16(11)\n\t" /* restore tocptr */ \
2328  VALGRIND_RESTORE_STACK \
2329  : /*out*/ "=r"( _res ) \
2330  : /*in*/ "r"( &_argvec[2] ) \
2331  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2332  lval = (__typeof__( lval ))_res; \
2333  } while ( 0 )
2334 
2335 #define CALL_FN_W_6W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6 ) \
2336  do { \
2337  volatile OrigFn _orig = ( orig ); \
2338  volatile unsigned long _argvec[3 + 6]; \
2339  volatile unsigned long _res; \
2340  /* _argvec[0] holds current r2 across the call */ \
2341  _argvec[1] = (unsigned long)_orig.r2; \
2342  _argvec[2] = (unsigned long)_orig.nraddr; \
2343  _argvec[2 + 1] = (unsigned long)arg1; \
2344  _argvec[2 + 2] = (unsigned long)arg2; \
2345  _argvec[2 + 3] = (unsigned long)arg3; \
2346  _argvec[2 + 4] = (unsigned long)arg4; \
2347  _argvec[2 + 5] = (unsigned long)arg5; \
2348  _argvec[2 + 6] = (unsigned long)arg6; \
2349  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2350  "std 2,-16(11)\n\t" /* save tocptr */ \
2351  "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2352  "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2353  "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2354  "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2355  "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2356  "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2357  "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2358  "ld 11, 0(11)\n\t" /* target->r11 */ \
2359  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2360  "mr %0,3\n\t" \
2361  "ld 2,-16(11)\n\t" /* restore tocptr */ \
2362  VALGRIND_RESTORE_STACK \
2363  : /*out*/ "=r"( _res ) \
2364  : /*in*/ "r"( &_argvec[2] ) \
2365  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2366  lval = (__typeof__( lval ))_res; \
2367  } while ( 0 )
2368 
2369 #define CALL_FN_W_7W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7 ) \
2370  do { \
2371  volatile OrigFn _orig = ( orig ); \
2372  volatile unsigned long _argvec[3 + 7]; \
2373  volatile unsigned long _res; \
2374  /* _argvec[0] holds current r2 across the call */ \
2375  _argvec[1] = (unsigned long)_orig.r2; \
2376  _argvec[2] = (unsigned long)_orig.nraddr; \
2377  _argvec[2 + 1] = (unsigned long)arg1; \
2378  _argvec[2 + 2] = (unsigned long)arg2; \
2379  _argvec[2 + 3] = (unsigned long)arg3; \
2380  _argvec[2 + 4] = (unsigned long)arg4; \
2381  _argvec[2 + 5] = (unsigned long)arg5; \
2382  _argvec[2 + 6] = (unsigned long)arg6; \
2383  _argvec[2 + 7] = (unsigned long)arg7; \
2384  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2385  "std 2,-16(11)\n\t" /* save tocptr */ \
2386  "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2387  "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2388  "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2389  "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2390  "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2391  "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2392  "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2393  "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2394  "ld 11, 0(11)\n\t" /* target->r11 */ \
2395  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2396  "mr %0,3\n\t" \
2397  "ld 2,-16(11)\n\t" /* restore tocptr */ \
2398  VALGRIND_RESTORE_STACK \
2399  : /*out*/ "=r"( _res ) \
2400  : /*in*/ "r"( &_argvec[2] ) \
2401  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2402  lval = (__typeof__( lval ))_res; \
2403  } while ( 0 )
2404 
2405 #define CALL_FN_W_8W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 ) \
2406  do { \
2407  volatile OrigFn _orig = ( orig ); \
2408  volatile unsigned long _argvec[3 + 8]; \
2409  volatile unsigned long _res; \
2410  /* _argvec[0] holds current r2 across the call */ \
2411  _argvec[1] = (unsigned long)_orig.r2; \
2412  _argvec[2] = (unsigned long)_orig.nraddr; \
2413  _argvec[2 + 1] = (unsigned long)arg1; \
2414  _argvec[2 + 2] = (unsigned long)arg2; \
2415  _argvec[2 + 3] = (unsigned long)arg3; \
2416  _argvec[2 + 4] = (unsigned long)arg4; \
2417  _argvec[2 + 5] = (unsigned long)arg5; \
2418  _argvec[2 + 6] = (unsigned long)arg6; \
2419  _argvec[2 + 7] = (unsigned long)arg7; \
2420  _argvec[2 + 8] = (unsigned long)arg8; \
2421  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2422  "std 2,-16(11)\n\t" /* save tocptr */ \
2423  "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2424  "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2425  "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2426  "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2427  "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2428  "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2429  "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2430  "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2431  "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2432  "ld 11, 0(11)\n\t" /* target->r11 */ \
2433  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2434  "mr %0,3\n\t" \
2435  "ld 2,-16(11)\n\t" /* restore tocptr */ \
2436  VALGRIND_RESTORE_STACK \
2437  : /*out*/ "=r"( _res ) \
2438  : /*in*/ "r"( &_argvec[2] ) \
2439  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2440  lval = (__typeof__( lval ))_res; \
2441  } while ( 0 )
2442 
2443 #define CALL_FN_W_9W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9 ) \
2444  do { \
2445  volatile OrigFn _orig = ( orig ); \
2446  volatile unsigned long _argvec[3 + 9]; \
2447  volatile unsigned long _res; \
2448  /* _argvec[0] holds current r2 across the call */ \
2449  _argvec[1] = (unsigned long)_orig.r2; \
2450  _argvec[2] = (unsigned long)_orig.nraddr; \
2451  _argvec[2 + 1] = (unsigned long)arg1; \
2452  _argvec[2 + 2] = (unsigned long)arg2; \
2453  _argvec[2 + 3] = (unsigned long)arg3; \
2454  _argvec[2 + 4] = (unsigned long)arg4; \
2455  _argvec[2 + 5] = (unsigned long)arg5; \
2456  _argvec[2 + 6] = (unsigned long)arg6; \
2457  _argvec[2 + 7] = (unsigned long)arg7; \
2458  _argvec[2 + 8] = (unsigned long)arg8; \
2459  _argvec[2 + 9] = (unsigned long)arg9; \
2460  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2461  "std 2,-16(11)\n\t" /* save tocptr */ \
2462  "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2463  "addi 1,1,-128\n\t" /* expand stack frame */ /* arg9 */ \
2464  "ld 3,72(11)\n\t" \
2465  "std 3,112(1)\n\t" /* args1-8 */ \
2466  "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2467  "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2468  "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2469  "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2470  "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2471  "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2472  "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2473  "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2474  "ld 11, 0(11)\n\t" /* target->r11 */ \
2475  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2476  "mr %0,3\n\t" \
2477  "ld 2,-16(11)\n\t" /* restore tocptr */ \
2478  VALGRIND_RESTORE_STACK \
2479  : /*out*/ "=r"( _res ) \
2480  : /*in*/ "r"( &_argvec[2] ) \
2481  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2482  lval = (__typeof__( lval ))_res; \
2483  } while ( 0 )
2484 
2485 #define CALL_FN_W_10W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10 ) \
2486  do { \
2487  volatile OrigFn _orig = ( orig ); \
2488  volatile unsigned long _argvec[3 + 10]; \
2489  volatile unsigned long _res; \
2490  /* _argvec[0] holds current r2 across the call */ \
2491  _argvec[1] = (unsigned long)_orig.r2; \
2492  _argvec[2] = (unsigned long)_orig.nraddr; \
2493  _argvec[2 + 1] = (unsigned long)arg1; \
2494  _argvec[2 + 2] = (unsigned long)arg2; \
2495  _argvec[2 + 3] = (unsigned long)arg3; \
2496  _argvec[2 + 4] = (unsigned long)arg4; \
2497  _argvec[2 + 5] = (unsigned long)arg5; \
2498  _argvec[2 + 6] = (unsigned long)arg6; \
2499  _argvec[2 + 7] = (unsigned long)arg7; \
2500  _argvec[2 + 8] = (unsigned long)arg8; \
2501  _argvec[2 + 9] = (unsigned long)arg9; \
2502  _argvec[2 + 10] = (unsigned long)arg10; \
2503  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2504  "std 2,-16(11)\n\t" /* save tocptr */ \
2505  "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2506  "addi 1,1,-128\n\t" /* expand stack frame */ /* arg10 */ \
2507  "ld 3,80(11)\n\t" \
2508  "std 3,120(1)\n\t" /* arg9 */ \
2509  "ld 3,72(11)\n\t" \
2510  "std 3,112(1)\n\t" /* args1-8 */ \
2511  "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2512  "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2513  "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2514  "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2515  "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2516  "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2517  "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2518  "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2519  "ld 11, 0(11)\n\t" /* target->r11 */ \
2520  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2521  "mr %0,3\n\t" \
2522  "ld 2,-16(11)\n\t" /* restore tocptr */ \
2523  VALGRIND_RESTORE_STACK \
2524  : /*out*/ "=r"( _res ) \
2525  : /*in*/ "r"( &_argvec[2] ) \
2526  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2527  lval = (__typeof__( lval ))_res; \
2528  } while ( 0 )
2529 
2530 #define CALL_FN_W_11W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11 ) \
2531  do { \
2532  volatile OrigFn _orig = ( orig ); \
2533  volatile unsigned long _argvec[3 + 11]; \
2534  volatile unsigned long _res; \
2535  /* _argvec[0] holds current r2 across the call */ \
2536  _argvec[1] = (unsigned long)_orig.r2; \
2537  _argvec[2] = (unsigned long)_orig.nraddr; \
2538  _argvec[2 + 1] = (unsigned long)arg1; \
2539  _argvec[2 + 2] = (unsigned long)arg2; \
2540  _argvec[2 + 3] = (unsigned long)arg3; \
2541  _argvec[2 + 4] = (unsigned long)arg4; \
2542  _argvec[2 + 5] = (unsigned long)arg5; \
2543  _argvec[2 + 6] = (unsigned long)arg6; \
2544  _argvec[2 + 7] = (unsigned long)arg7; \
2545  _argvec[2 + 8] = (unsigned long)arg8; \
2546  _argvec[2 + 9] = (unsigned long)arg9; \
2547  _argvec[2 + 10] = (unsigned long)arg10; \
2548  _argvec[2 + 11] = (unsigned long)arg11; \
2549  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2550  "std 2,-16(11)\n\t" /* save tocptr */ \
2551  "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2552  "addi 1,1,-144\n\t" /* expand stack frame */ /* arg11 */ \
2553  "ld 3,88(11)\n\t" \
2554  "std 3,128(1)\n\t" /* arg10 */ \
2555  "ld 3,80(11)\n\t" \
2556  "std 3,120(1)\n\t" /* arg9 */ \
2557  "ld 3,72(11)\n\t" \
2558  "std 3,112(1)\n\t" /* args1-8 */ \
2559  "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2560  "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2561  "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2562  "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2563  "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2564  "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2565  "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2566  "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2567  "ld 11, 0(11)\n\t" /* target->r11 */ \
2568  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2569  "mr %0,3\n\t" \
2570  "ld 2,-16(11)\n\t" /* restore tocptr */ \
2571  VALGRIND_RESTORE_STACK \
2572  : /*out*/ "=r"( _res ) \
2573  : /*in*/ "r"( &_argvec[2] ) \
2574  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2575  lval = (__typeof__( lval ))_res; \
2576  } while ( 0 )
2577 
2578 #define CALL_FN_W_12W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12 ) \
2579  do { \
2580  volatile OrigFn _orig = ( orig ); \
2581  volatile unsigned long _argvec[3 + 12]; \
2582  volatile unsigned long _res; \
2583  /* _argvec[0] holds current r2 across the call */ \
2584  _argvec[1] = (unsigned long)_orig.r2; \
2585  _argvec[2] = (unsigned long)_orig.nraddr; \
2586  _argvec[2 + 1] = (unsigned long)arg1; \
2587  _argvec[2 + 2] = (unsigned long)arg2; \
2588  _argvec[2 + 3] = (unsigned long)arg3; \
2589  _argvec[2 + 4] = (unsigned long)arg4; \
2590  _argvec[2 + 5] = (unsigned long)arg5; \
2591  _argvec[2 + 6] = (unsigned long)arg6; \
2592  _argvec[2 + 7] = (unsigned long)arg7; \
2593  _argvec[2 + 8] = (unsigned long)arg8; \
2594  _argvec[2 + 9] = (unsigned long)arg9; \
2595  _argvec[2 + 10] = (unsigned long)arg10; \
2596  _argvec[2 + 11] = (unsigned long)arg11; \
2597  _argvec[2 + 12] = (unsigned long)arg12; \
2598  __asm__ volatile( VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2599  "std 2,-16(11)\n\t" /* save tocptr */ \
2600  "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2601  "addi 1,1,-144\n\t" /* expand stack frame */ /* arg12 */ \
2602  "ld 3,96(11)\n\t" \
2603  "std 3,136(1)\n\t" /* arg11 */ \
2604  "ld 3,88(11)\n\t" \
2605  "std 3,128(1)\n\t" /* arg10 */ \
2606  "ld 3,80(11)\n\t" \
2607  "std 3,120(1)\n\t" /* arg9 */ \
2608  "ld 3,72(11)\n\t" \
2609  "std 3,112(1)\n\t" /* args1-8 */ \
2610  "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2611  "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2612  "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2613  "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2614  "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2615  "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2616  "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2617  "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2618  "ld 11, 0(11)\n\t" /* target->r11 */ \
2619  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2620  "mr %0,3\n\t" \
2621  "ld 2,-16(11)\n\t" /* restore tocptr */ \
2622  VALGRIND_RESTORE_STACK \
2623  : /*out*/ "=r"( _res ) \
2624  : /*in*/ "r"( &_argvec[2] ) \
2625  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" ); \
2626  lval = (__typeof__( lval ))_res; \
2627  } while ( 0 )
2628 
2629 #endif /* PLAT_ppc64_linux */
2630 
2631 /* ------------------------- arm-linux ------------------------- */
2632 
2633 #if defined( PLAT_arm_linux )
2634 
2635 /* These regs are trashed by the hidden call. */
2636 #define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3", "r4", "r14"
2637 
2638 /* Macros to save and align the stack before making a function
2639  call and restore it afterwards as gcc may not keep the stack
2640  pointer aligned if it doesn't realise calls are being made
2641  to other functions. */
2642 
2643 /* This is a bit tricky. We store the original stack pointer in r10
2644  as it is callee-saves. gcc doesn't allow the use of r11 for some
2645  reason. Also, we can't directly "bic" the stack pointer in thumb
2646  mode since r13 isn't an allowed register number in that context.
2647  So use r4 as a temporary, since that is about to get trashed
2648  anyway, just after each use of this macro. Side effect is we need
2649  to be very careful about any future changes, since
2650  VALGRIND_ALIGN_STACK simply assumes r4 is usable. */
2651 #define VALGRIND_ALIGN_STACK \
2652  "mov r10, sp\n\t" \
2653  "mov r4, sp\n\t" \
2654  "bic r4, r4, #7\n\t" \
2655  "mov sp, r4\n\t"
2656 #define VALGRIND_RESTORE_STACK "mov sp, r10\n\t"
2657 
2658 /* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
2659  long) == 4. */
2660 
2661 #define CALL_FN_W_v( lval, orig ) \
2662  do { \
2663  volatile OrigFn _orig = ( orig ); \
2664  volatile unsigned long _argvec[1]; \
2665  volatile unsigned long _res; \
2666  _argvec[0] = (unsigned long)_orig.nraddr; \
2667  __asm__ volatile( VALGRIND_ALIGN_STACK "ldr r4, [%1] \n\t" /* target->r4 */ \
2668  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0\n" \
2669  : /*out*/ "=r"( _res ) \
2670  : /*in*/ "0"( &_argvec[0] ) \
2671  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" ); \
2672  lval = (__typeof__( lval ))_res; \
2673  } while ( 0 )
2674 
2675 #define CALL_FN_W_W( lval, orig, arg1 ) \
2676  do { \
2677  volatile OrigFn _orig = ( orig ); \
2678  volatile unsigned long _argvec[2]; \
2679  volatile unsigned long _res; \
2680  _argvec[0] = (unsigned long)_orig.nraddr; \
2681  _argvec[1] = (unsigned long)( arg1 ); \
2682  __asm__ volatile( VALGRIND_ALIGN_STACK "ldr r0, [%1, #4] \n\t" \
2683  "ldr r4, [%1] \n\t" /* target->r4 */ \
2684  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0\n" \
2685  : /*out*/ "=r"( _res ) \
2686  : /*in*/ "0"( &_argvec[0] ) \
2687  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" ); \
2688  lval = (__typeof__( lval ))_res; \
2689  } while ( 0 )
2690 
2691 #define CALL_FN_W_WW( lval, orig, arg1, arg2 ) \
2692  do { \
2693  volatile OrigFn _orig = ( orig ); \
2694  volatile unsigned long _argvec[3]; \
2695  volatile unsigned long _res; \
2696  _argvec[0] = (unsigned long)_orig.nraddr; \
2697  _argvec[1] = (unsigned long)( arg1 ); \
2698  _argvec[2] = (unsigned long)( arg2 ); \
2699  __asm__ volatile( VALGRIND_ALIGN_STACK "ldr r0, [%1, #4] \n\t" \
2700  "ldr r1, [%1, #8] \n\t" \
2701  "ldr r4, [%1] \n\t" /* target->r4 */ \
2702  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0\n" \
2703  : /*out*/ "=r"( _res ) \
2704  : /*in*/ "0"( &_argvec[0] ) \
2705  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" ); \
2706  lval = (__typeof__( lval ))_res; \
2707  } while ( 0 )
2708 
2709 #define CALL_FN_W_WWW( lval, orig, arg1, arg2, arg3 ) \
2710  do { \
2711  volatile OrigFn _orig = ( orig ); \
2712  volatile unsigned long _argvec[4]; \
2713  volatile unsigned long _res; \
2714  _argvec[0] = (unsigned long)_orig.nraddr; \
2715  _argvec[1] = (unsigned long)( arg1 ); \
2716  _argvec[2] = (unsigned long)( arg2 ); \
2717  _argvec[3] = (unsigned long)( arg3 ); \
2718  __asm__ volatile( VALGRIND_ALIGN_STACK "ldr r0, [%1, #4] \n\t" \
2719  "ldr r1, [%1, #8] \n\t" \
2720  "ldr r2, [%1, #12] \n\t" \
2721  "ldr r4, [%1] \n\t" /* target->r4 */ \
2722  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0\n" \
2723  : /*out*/ "=r"( _res ) \
2724  : /*in*/ "0"( &_argvec[0] ) \
2725  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" ); \
2726  lval = (__typeof__( lval ))_res; \
2727  } while ( 0 )
2728 
2729 #define CALL_FN_W_WWWW( lval, orig, arg1, arg2, arg3, arg4 ) \
2730  do { \
2731  volatile OrigFn _orig = ( orig ); \
2732  volatile unsigned long _argvec[5]; \
2733  volatile unsigned long _res; \
2734  _argvec[0] = (unsigned long)_orig.nraddr; \
2735  _argvec[1] = (unsigned long)( arg1 ); \
2736  _argvec[2] = (unsigned long)( arg2 ); \
2737  _argvec[3] = (unsigned long)( arg3 ); \
2738  _argvec[4] = (unsigned long)( arg4 ); \
2739  __asm__ volatile( VALGRIND_ALIGN_STACK "ldr r0, [%1, #4] \n\t" \
2740  "ldr r1, [%1, #8] \n\t" \
2741  "ldr r2, [%1, #12] \n\t" \
2742  "ldr r3, [%1, #16] \n\t" \
2743  "ldr r4, [%1] \n\t" /* target->r4 */ \
2744  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2745  : /*out*/ "=r"( _res ) \
2746  : /*in*/ "0"( &_argvec[0] ) \
2747  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" ); \
2748  lval = (__typeof__( lval ))_res; \
2749  } while ( 0 )
2750 
2751 #define CALL_FN_W_5W( lval, orig, arg1, arg2, arg3, arg4, arg5 ) \
2752  do { \
2753  volatile OrigFn _orig = ( orig ); \
2754  volatile unsigned long _argvec[6]; \
2755  volatile unsigned long _res; \
2756  _argvec[0] = (unsigned long)_orig.nraddr; \
2757  _argvec[1] = (unsigned long)( arg1 ); \
2758  _argvec[2] = (unsigned long)( arg2 ); \
2759  _argvec[3] = (unsigned long)( arg3 ); \
2760  _argvec[4] = (unsigned long)( arg4 ); \
2761  _argvec[5] = (unsigned long)( arg5 ); \
2762  __asm__ volatile( VALGRIND_ALIGN_STACK "sub sp, sp, #4 \n\t" \
2763  "ldr r0, [%1, #20] \n\t" \
2764  "push {r0} \n\t" \
2765  "ldr r0, [%1, #4] \n\t" \
2766  "ldr r1, [%1, #8] \n\t" \
2767  "ldr r2, [%1, #12] \n\t" \
2768  "ldr r3, [%1, #16] \n\t" \
2769  "ldr r4, [%1] \n\t" /* target->r4 */ \
2770  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2771  : /*out*/ "=r"( _res ) \
2772  : /*in*/ "0"( &_argvec[0] ) \
2773  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" ); \
2774  lval = (__typeof__( lval ))_res; \
2775  } while ( 0 )
2776 
2777 #define CALL_FN_W_6W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6 ) \
2778  do { \
2779  volatile OrigFn _orig = ( orig ); \
2780  volatile unsigned long _argvec[7]; \
2781  volatile unsigned long _res; \
2782  _argvec[0] = (unsigned long)_orig.nraddr; \
2783  _argvec[1] = (unsigned long)( arg1 ); \
2784  _argvec[2] = (unsigned long)( arg2 ); \
2785  _argvec[3] = (unsigned long)( arg3 ); \
2786  _argvec[4] = (unsigned long)( arg4 ); \
2787  _argvec[5] = (unsigned long)( arg5 ); \
2788  _argvec[6] = (unsigned long)( arg6 ); \
2789  __asm__ volatile( VALGRIND_ALIGN_STACK "ldr r0, [%1, #20] \n\t" \
2790  "ldr r1, [%1, #24] \n\t" \
2791  "push {r0, r1} \n\t" \
2792  "ldr r0, [%1, #4] \n\t" \
2793  "ldr r1, [%1, #8] \n\t" \
2794  "ldr r2, [%1, #12] \n\t" \
2795  "ldr r3, [%1, #16] \n\t" \
2796  "ldr r4, [%1] \n\t" /* target->r4 */ \
2797  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2798  : /*out*/ "=r"( _res ) \
2799  : /*in*/ "0"( &_argvec[0] ) \
2800  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" ); \
2801  lval = (__typeof__( lval ))_res; \
2802  } while ( 0 )
2803 
2804 #define CALL_FN_W_7W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7 ) \
2805  do { \
2806  volatile OrigFn _orig = ( orig ); \
2807  volatile unsigned long _argvec[8]; \
2808  volatile unsigned long _res; \
2809  _argvec[0] = (unsigned long)_orig.nraddr; \
2810  _argvec[1] = (unsigned long)( arg1 ); \
2811  _argvec[2] = (unsigned long)( arg2 ); \
2812  _argvec[3] = (unsigned long)( arg3 ); \
2813  _argvec[4] = (unsigned long)( arg4 ); \
2814  _argvec[5] = (unsigned long)( arg5 ); \
2815  _argvec[6] = (unsigned long)( arg6 ); \
2816  _argvec[7] = (unsigned long)( arg7 ); \
2817  __asm__ volatile( VALGRIND_ALIGN_STACK "sub sp, sp, #4 \n\t" \
2818  "ldr r0, [%1, #20] \n\t" \
2819  "ldr r1, [%1, #24] \n\t" \
2820  "ldr r2, [%1, #28] \n\t" \
2821  "push {r0, r1, r2} \n\t" \
2822  "ldr r0, [%1, #4] \n\t" \
2823  "ldr r1, [%1, #8] \n\t" \
2824  "ldr r2, [%1, #12] \n\t" \
2825  "ldr r3, [%1, #16] \n\t" \
2826  "ldr r4, [%1] \n\t" /* target->r4 */ \
2827  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2828  : /*out*/ "=r"( _res ) \
2829  : /*in*/ "0"( &_argvec[0] ) \
2830  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" ); \
2831  lval = (__typeof__( lval ))_res; \
2832  } while ( 0 )
2833 
2834 #define CALL_FN_W_8W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 ) \
2835  do { \
2836  volatile OrigFn _orig = ( orig ); \
2837  volatile unsigned long _argvec[9]; \
2838  volatile unsigned long _res; \
2839  _argvec[0] = (unsigned long)_orig.nraddr; \
2840  _argvec[1] = (unsigned long)( arg1 ); \
2841  _argvec[2] = (unsigned long)( arg2 ); \
2842  _argvec[3] = (unsigned long)( arg3 ); \
2843  _argvec[4] = (unsigned long)( arg4 ); \
2844  _argvec[5] = (unsigned long)( arg5 ); \
2845  _argvec[6] = (unsigned long)( arg6 ); \
2846  _argvec[7] = (unsigned long)( arg7 ); \
2847  _argvec[8] = (unsigned long)( arg8 ); \
2848  __asm__ volatile( VALGRIND_ALIGN_STACK "ldr r0, [%1, #20] \n\t" \
2849  "ldr r1, [%1, #24] \n\t" \
2850  "ldr r2, [%1, #28] \n\t" \
2851  "ldr r3, [%1, #32] \n\t" \
2852  "push {r0, r1, r2, r3} \n\t" \
2853  "ldr r0, [%1, #4] \n\t" \
2854  "ldr r1, [%1, #8] \n\t" \
2855  "ldr r2, [%1, #12] \n\t" \
2856  "ldr r3, [%1, #16] \n\t" \
2857  "ldr r4, [%1] \n\t" /* target->r4 */ \
2858  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2859  : /*out*/ "=r"( _res ) \
2860  : /*in*/ "0"( &_argvec[0] ) \
2861  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" ); \
2862  lval = (__typeof__( lval ))_res; \
2863  } while ( 0 )
2864 
2865 #define CALL_FN_W_9W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9 ) \
2866  do { \
2867  volatile OrigFn _orig = ( orig ); \
2868  volatile unsigned long _argvec[10]; \
2869  volatile unsigned long _res; \
2870  _argvec[0] = (unsigned long)_orig.nraddr; \
2871  _argvec[1] = (unsigned long)( arg1 ); \
2872  _argvec[2] = (unsigned long)( arg2 ); \
2873  _argvec[3] = (unsigned long)( arg3 ); \
2874  _argvec[4] = (unsigned long)( arg4 ); \
2875  _argvec[5] = (unsigned long)( arg5 ); \
2876  _argvec[6] = (unsigned long)( arg6 ); \
2877  _argvec[7] = (unsigned long)( arg7 ); \
2878  _argvec[8] = (unsigned long)( arg8 ); \
2879  _argvec[9] = (unsigned long)( arg9 ); \
2880  __asm__ volatile( VALGRIND_ALIGN_STACK "sub sp, sp, #4 \n\t" \
2881  "ldr r0, [%1, #20] \n\t" \
2882  "ldr r1, [%1, #24] \n\t" \
2883  "ldr r2, [%1, #28] \n\t" \
2884  "ldr r3, [%1, #32] \n\t" \
2885  "ldr r4, [%1, #36] \n\t" \
2886  "push {r0, r1, r2, r3, r4} \n\t" \
2887  "ldr r0, [%1, #4] \n\t" \
2888  "ldr r1, [%1, #8] \n\t" \
2889  "ldr r2, [%1, #12] \n\t" \
2890  "ldr r3, [%1, #16] \n\t" \
2891  "ldr r4, [%1] \n\t" /* target->r4 */ \
2892  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2893  : /*out*/ "=r"( _res ) \
2894  : /*in*/ "0"( &_argvec[0] ) \
2895  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" ); \
2896  lval = (__typeof__( lval ))_res; \
2897  } while ( 0 )
2898 
2899 #define CALL_FN_W_10W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10 ) \
2900  do { \
2901  volatile OrigFn _orig = ( orig ); \
2902  volatile unsigned long _argvec[11]; \
2903  volatile unsigned long _res; \
2904  _argvec[0] = (unsigned long)_orig.nraddr; \
2905  _argvec[1] = (unsigned long)( arg1 ); \
2906  _argvec[2] = (unsigned long)( arg2 ); \
2907  _argvec[3] = (unsigned long)( arg3 ); \
2908  _argvec[4] = (unsigned long)( arg4 ); \
2909  _argvec[5] = (unsigned long)( arg5 ); \
2910  _argvec[6] = (unsigned long)( arg6 ); \
2911  _argvec[7] = (unsigned long)( arg7 ); \
2912  _argvec[8] = (unsigned long)( arg8 ); \
2913  _argvec[9] = (unsigned long)( arg9 ); \
2914  _argvec[10] = (unsigned long)( arg10 ); \
2915  __asm__ volatile( VALGRIND_ALIGN_STACK "ldr r0, [%1, #40] \n\t" \
2916  "push {r0} \n\t" \
2917  "ldr r0, [%1, #20] \n\t" \
2918  "ldr r1, [%1, #24] \n\t" \
2919  "ldr r2, [%1, #28] \n\t" \
2920  "ldr r3, [%1, #32] \n\t" \
2921  "ldr r4, [%1, #36] \n\t" \
2922  "push {r0, r1, r2, r3, r4} \n\t" \
2923  "ldr r0, [%1, #4] \n\t" \
2924  "ldr r1, [%1, #8] \n\t" \
2925  "ldr r2, [%1, #12] \n\t" \
2926  "ldr r3, [%1, #16] \n\t" \
2927  "ldr r4, [%1] \n\t" /* target->r4 */ \
2928  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2929  : /*out*/ "=r"( _res ) \
2930  : /*in*/ "0"( &_argvec[0] ) \
2931  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" ); \
2932  lval = (__typeof__( lval ))_res; \
2933  } while ( 0 )
2934 
2935 #define CALL_FN_W_11W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11 ) \
2936  do { \
2937  volatile OrigFn _orig = ( orig ); \
2938  volatile unsigned long _argvec[12]; \
2939  volatile unsigned long _res; \
2940  _argvec[0] = (unsigned long)_orig.nraddr; \
2941  _argvec[1] = (unsigned long)( arg1 ); \
2942  _argvec[2] = (unsigned long)( arg2 ); \
2943  _argvec[3] = (unsigned long)( arg3 ); \
2944  _argvec[4] = (unsigned long)( arg4 ); \
2945  _argvec[5] = (unsigned long)( arg5 ); \
2946  _argvec[6] = (unsigned long)( arg6 ); \
2947  _argvec[7] = (unsigned long)( arg7 ); \
2948  _argvec[8] = (unsigned long)( arg8 ); \
2949  _argvec[9] = (unsigned long)( arg9 ); \
2950  _argvec[10] = (unsigned long)( arg10 ); \
2951  _argvec[11] = (unsigned long)( arg11 ); \
2952  __asm__ volatile( VALGRIND_ALIGN_STACK "sub sp, sp, #4 \n\t" \
2953  "ldr r0, [%1, #40] \n\t" \
2954  "ldr r1, [%1, #44] \n\t" \
2955  "push {r0, r1} \n\t" \
2956  "ldr r0, [%1, #20] \n\t" \
2957  "ldr r1, [%1, #24] \n\t" \
2958  "ldr r2, [%1, #28] \n\t" \
2959  "ldr r3, [%1, #32] \n\t" \
2960  "ldr r4, [%1, #36] \n\t" \
2961  "push {r0, r1, r2, r3, r4} \n\t" \
2962  "ldr r0, [%1, #4] \n\t" \
2963  "ldr r1, [%1, #8] \n\t" \
2964  "ldr r2, [%1, #12] \n\t" \
2965  "ldr r3, [%1, #16] \n\t" \
2966  "ldr r4, [%1] \n\t" /* target->r4 */ \
2967  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2968  : /*out*/ "=r"( _res ) \
2969  : /*in*/ "0"( &_argvec[0] ) \
2970  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" ); \
2971  lval = (__typeof__( lval ))_res; \
2972  } while ( 0 )
2973 
2974 #define CALL_FN_W_12W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12 ) \
2975  do { \
2976  volatile OrigFn _orig = ( orig ); \
2977  volatile unsigned long _argvec[13]; \
2978  volatile unsigned long _res; \
2979  _argvec[0] = (unsigned long)_orig.nraddr; \
2980  _argvec[1] = (unsigned long)( arg1 ); \
2981  _argvec[2] = (unsigned long)( arg2 ); \
2982  _argvec[3] = (unsigned long)( arg3 ); \
2983  _argvec[4] = (unsigned long)( arg4 ); \
2984  _argvec[5] = (unsigned long)( arg5 ); \
2985  _argvec[6] = (unsigned long)( arg6 ); \
2986  _argvec[7] = (unsigned long)( arg7 ); \
2987  _argvec[8] = (unsigned long)( arg8 ); \
2988  _argvec[9] = (unsigned long)( arg9 ); \
2989  _argvec[10] = (unsigned long)( arg10 ); \
2990  _argvec[11] = (unsigned long)( arg11 ); \
2991  _argvec[12] = (unsigned long)( arg12 ); \
2992  __asm__ volatile( VALGRIND_ALIGN_STACK "ldr r0, [%1, #40] \n\t" \
2993  "ldr r1, [%1, #44] \n\t" \
2994  "ldr r2, [%1, #48] \n\t" \
2995  "push {r0, r1, r2} \n\t" \
2996  "ldr r0, [%1, #20] \n\t" \
2997  "ldr r1, [%1, #24] \n\t" \
2998  "ldr r2, [%1, #28] \n\t" \
2999  "ldr r3, [%1, #32] \n\t" \
3000  "ldr r4, [%1, #36] \n\t" \
3001  "push {r0, r1, r2, r3, r4} \n\t" \
3002  "ldr r0, [%1, #4] \n\t" \
3003  "ldr r1, [%1, #8] \n\t" \
3004  "ldr r2, [%1, #12] \n\t" \
3005  "ldr r3, [%1, #16] \n\t" \
3006  "ldr r4, [%1] \n\t" /* target->r4 */ \
3007  VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
3008  : /*out*/ "=r"( _res ) \
3009  : /*in*/ "0"( &_argvec[0] ) \
3010  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" ); \
3011  lval = (__typeof__( lval ))_res; \
3012  } while ( 0 )
3013 
3014 #endif /* PLAT_arm_linux */
3015 
3016 /* ------------------------- s390x-linux ------------------------- */
3017 
3018 #if defined( PLAT_s390x_linux )
3019 
3020 /* Similar workaround as amd64 (see above), but we use r11 as frame
3021  pointer and save the old r11 in r7. r11 might be used for
3022  argvec, therefore we copy argvec in r1 since r1 is clobbered
3023  after the call anyway. */
3024 #if defined( __GNUC__ ) && defined( __GCC_HAVE_DWARF2_CFI_ASM )
3025 #define __FRAME_POINTER , "d"( __builtin_dwarf_cfa() )
3026 #define VALGRIND_CFI_PROLOGUE \
3027  ".cfi_remember_state\n\t" \
3028  "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \
3029  "lgr 7,11\n\t" \
3030  "lgr 11,%2\n\t" \
3031  ".cfi_def_cfa r11, 0\n\t"
3032 #define VALGRIND_CFI_EPILOGUE \
3033  "lgr 11, 7\n\t" \
3034  ".cfi_restore_state\n\t"
3035 #else
3036 #define __FRAME_POINTER
3037 #define VALGRIND_CFI_PROLOGUE "lgr 1,%1\n\t"
3038 #define VALGRIND_CFI_EPILOGUE
3039 #endif
3040 
3041 /* Nb: On s390 the stack pointer is properly aligned *at all times*
3042  according to the s390 GCC maintainer. (The ABI specification is not
3043  precise in this regard.) Therefore, VALGRIND_ALIGN_STACK and
3044  VALGRIND_RESTORE_STACK are not defined here. */
3045 
3046 /* These regs are trashed by the hidden call. Note that we overwrite
3047  r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the
3048  function a proper return address. All others are ABI defined call
3049  clobbers. */
3050 #define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7"
3051 
3052 /* Nb: Although r11 is modified in the asm snippets below (inside
3053  VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for
3054  two reasons:
3055  (1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not
3056  modified
3057  (2) GCC will complain that r11 cannot appear inside a clobber section,
3058  when compiled with -O -fno-omit-frame-pointer
3059  */
3060 
3061 #define CALL_FN_W_v( lval, orig ) \
3062  do { \
3063  volatile OrigFn _orig = ( orig ); \
3064  volatile unsigned long _argvec[1]; \
3065  volatile unsigned long _res; \
3066  _argvec[0] = (unsigned long)_orig.nraddr; \
3067  __asm__ volatile( VALGRIND_CFI_PROLOGUE "aghi 15,-160\n\t" \
3068  "lg 1, 0(1)\n\t" /* target->r1 */ \
3069  VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3070  "aghi 15,160\n\t" VALGRIND_CFI_EPILOGUE \
3071  : /*out*/ "=d"( _res ) \
3072  : /*in*/ "d"(&_argvec[0])__FRAME_POINTER \
3073  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "7" ); \
3074  lval = (__typeof__( lval ))_res; \
3075  } while ( 0 )
3076 
3077 /* The call abi has the arguments in r2-r6 and stack */
3078 #define CALL_FN_W_W( lval, orig, arg1 ) \
3079  do { \
3080  volatile OrigFn _orig = ( orig ); \
3081  volatile unsigned long _argvec[2]; \
3082  volatile unsigned long _res; \
3083  _argvec[0] = (unsigned long)_orig.nraddr; \
3084  _argvec[1] = (unsigned long)arg1; \
3085  __asm__ volatile( VALGRIND_CFI_PROLOGUE "aghi 15,-160\n\t" \
3086  "lg 2, 8(1)\n\t" \
3087  "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3088  "aghi 15,160\n\t" VALGRIND_CFI_EPILOGUE \
3089  : /*out*/ "=d"( _res ) \
3090  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3091  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "7" ); \
3092  lval = (__typeof__( lval ))_res; \
3093  } while ( 0 )
3094 
3095 #define CALL_FN_W_WW( lval, orig, arg1, arg2 ) \
3096  do { \
3097  volatile OrigFn _orig = ( orig ); \
3098  volatile unsigned long _argvec[3]; \
3099  volatile unsigned long _res; \
3100  _argvec[0] = (unsigned long)_orig.nraddr; \
3101  _argvec[1] = (unsigned long)arg1; \
3102  _argvec[2] = (unsigned long)arg2; \
3103  __asm__ volatile( VALGRIND_CFI_PROLOGUE "aghi 15,-160\n\t" \
3104  "lg 2, 8(1)\n\t" \
3105  "lg 3,16(1)\n\t" \
3106  "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3107  "aghi 15,160\n\t" VALGRIND_CFI_EPILOGUE \
3108  : /*out*/ "=d"( _res ) \
3109  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3110  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "7" ); \
3111  lval = (__typeof__( lval ))_res; \
3112  } while ( 0 )
3113 
3114 #define CALL_FN_W_WWW( lval, orig, arg1, arg2, arg3 ) \
3115  do { \
3116  volatile OrigFn _orig = ( orig ); \
3117  volatile unsigned long _argvec[4]; \
3118  volatile unsigned long _res; \
3119  _argvec[0] = (unsigned long)_orig.nraddr; \
3120  _argvec[1] = (unsigned long)arg1; \
3121  _argvec[2] = (unsigned long)arg2; \
3122  _argvec[3] = (unsigned long)arg3; \
3123  __asm__ volatile( VALGRIND_CFI_PROLOGUE "aghi 15,-160\n\t" \
3124  "lg 2, 8(1)\n\t" \
3125  "lg 3,16(1)\n\t" \
3126  "lg 4,24(1)\n\t" \
3127  "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3128  "aghi 15,160\n\t" VALGRIND_CFI_EPILOGUE \
3129  : /*out*/ "=d"( _res ) \
3130  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3131  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "7" ); \
3132  lval = (__typeof__( lval ))_res; \
3133  } while ( 0 )
3134 
3135 #define CALL_FN_W_WWWW( lval, orig, arg1, arg2, arg3, arg4 ) \
3136  do { \
3137  volatile OrigFn _orig = ( orig ); \
3138  volatile unsigned long _argvec[5]; \
3139  volatile unsigned long _res; \
3140  _argvec[0] = (unsigned long)_orig.nraddr; \
3141  _argvec[1] = (unsigned long)arg1; \
3142  _argvec[2] = (unsigned long)arg2; \
3143  _argvec[3] = (unsigned long)arg3; \
3144  _argvec[4] = (unsigned long)arg4; \
3145  __asm__ volatile( VALGRIND_CFI_PROLOGUE "aghi 15,-160\n\t" \
3146  "lg 2, 8(1)\n\t" \
3147  "lg 3,16(1)\n\t" \
3148  "lg 4,24(1)\n\t" \
3149  "lg 5,32(1)\n\t" \
3150  "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3151  "aghi 15,160\n\t" VALGRIND_CFI_EPILOGUE \
3152  : /*out*/ "=d"( _res ) \
3153  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3154  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "7" ); \
3155  lval = (__typeof__( lval ))_res; \
3156  } while ( 0 )
3157 
3158 #define CALL_FN_W_5W( lval, orig, arg1, arg2, arg3, arg4, arg5 ) \
3159  do { \
3160  volatile OrigFn _orig = ( orig ); \
3161  volatile unsigned long _argvec[6]; \
3162  volatile unsigned long _res; \
3163  _argvec[0] = (unsigned long)_orig.nraddr; \
3164  _argvec[1] = (unsigned long)arg1; \
3165  _argvec[2] = (unsigned long)arg2; \
3166  _argvec[3] = (unsigned long)arg3; \
3167  _argvec[4] = (unsigned long)arg4; \
3168  _argvec[5] = (unsigned long)arg5; \
3169  __asm__ volatile( VALGRIND_CFI_PROLOGUE "aghi 15,-160\n\t" \
3170  "lg 2, 8(1)\n\t" \
3171  "lg 3,16(1)\n\t" \
3172  "lg 4,24(1)\n\t" \
3173  "lg 5,32(1)\n\t" \
3174  "lg 6,40(1)\n\t" \
3175  "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3176  "aghi 15,160\n\t" VALGRIND_CFI_EPILOGUE \
3177  : /*out*/ "=d"( _res ) \
3178  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3179  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7" ); \
3180  lval = (__typeof__( lval ))_res; \
3181  } while ( 0 )
3182 
3183 #define CALL_FN_W_6W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6 ) \
3184  do { \
3185  volatile OrigFn _orig = ( orig ); \
3186  volatile unsigned long _argvec[7]; \
3187  volatile unsigned long _res; \
3188  _argvec[0] = (unsigned long)_orig.nraddr; \
3189  _argvec[1] = (unsigned long)arg1; \
3190  _argvec[2] = (unsigned long)arg2; \
3191  _argvec[3] = (unsigned long)arg3; \
3192  _argvec[4] = (unsigned long)arg4; \
3193  _argvec[5] = (unsigned long)arg5; \
3194  _argvec[6] = (unsigned long)arg6; \
3195  __asm__ volatile( VALGRIND_CFI_PROLOGUE "aghi 15,-168\n\t" \
3196  "lg 2, 8(1)\n\t" \
3197  "lg 3,16(1)\n\t" \
3198  "lg 4,24(1)\n\t" \
3199  "lg 5,32(1)\n\t" \
3200  "lg 6,40(1)\n\t" \
3201  "mvc 160(8,15), 48(1)\n\t" \
3202  "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3203  "aghi 15,168\n\t" VALGRIND_CFI_EPILOGUE \
3204  : /*out*/ "=d"( _res ) \
3205  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3206  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7" ); \
3207  lval = (__typeof__( lval ))_res; \
3208  } while ( 0 )
3209 
3210 #define CALL_FN_W_7W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7 ) \
3211  do { \
3212  volatile OrigFn _orig = ( orig ); \
3213  volatile unsigned long _argvec[8]; \
3214  volatile unsigned long _res; \
3215  _argvec[0] = (unsigned long)_orig.nraddr; \
3216  _argvec[1] = (unsigned long)arg1; \
3217  _argvec[2] = (unsigned long)arg2; \
3218  _argvec[3] = (unsigned long)arg3; \
3219  _argvec[4] = (unsigned long)arg4; \
3220  _argvec[5] = (unsigned long)arg5; \
3221  _argvec[6] = (unsigned long)arg6; \
3222  _argvec[7] = (unsigned long)arg7; \
3223  __asm__ volatile( VALGRIND_CFI_PROLOGUE "aghi 15,-176\n\t" \
3224  "lg 2, 8(1)\n\t" \
3225  "lg 3,16(1)\n\t" \
3226  "lg 4,24(1)\n\t" \
3227  "lg 5,32(1)\n\t" \
3228  "lg 6,40(1)\n\t" \
3229  "mvc 160(8,15), 48(1)\n\t" \
3230  "mvc 168(8,15), 56(1)\n\t" \
3231  "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3232  "aghi 15,176\n\t" VALGRIND_CFI_EPILOGUE \
3233  : /*out*/ "=d"( _res ) \
3234  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3235  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7" ); \
3236  lval = (__typeof__( lval ))_res; \
3237  } while ( 0 )
3238 
3239 #define CALL_FN_W_8W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 ) \
3240  do { \
3241  volatile OrigFn _orig = ( orig ); \
3242  volatile unsigned long _argvec[9]; \
3243  volatile unsigned long _res; \
3244  _argvec[0] = (unsigned long)_orig.nraddr; \
3245  _argvec[1] = (unsigned long)arg1; \
3246  _argvec[2] = (unsigned long)arg2; \
3247  _argvec[3] = (unsigned long)arg3; \
3248  _argvec[4] = (unsigned long)arg4; \
3249  _argvec[5] = (unsigned long)arg5; \
3250  _argvec[6] = (unsigned long)arg6; \
3251  _argvec[7] = (unsigned long)arg7; \
3252  _argvec[8] = (unsigned long)arg8; \
3253  __asm__ volatile( VALGRIND_CFI_PROLOGUE "aghi 15,-184\n\t" \
3254  "lg 2, 8(1)\n\t" \
3255  "lg 3,16(1)\n\t" \
3256  "lg 4,24(1)\n\t" \
3257  "lg 5,32(1)\n\t" \
3258  "lg 6,40(1)\n\t" \
3259  "mvc 160(8,15), 48(1)\n\t" \
3260  "mvc 168(8,15), 56(1)\n\t" \
3261  "mvc 176(8,15), 64(1)\n\t" \
3262  "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3263  "aghi 15,184\n\t" VALGRIND_CFI_EPILOGUE \
3264  : /*out*/ "=d"( _res ) \
3265  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3266  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7" ); \
3267  lval = (__typeof__( lval ))_res; \
3268  } while ( 0 )
3269 
3270 #define CALL_FN_W_9W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9 ) \
3271  do { \
3272  volatile OrigFn _orig = ( orig ); \
3273  volatile unsigned long _argvec[10]; \
3274  volatile unsigned long _res; \
3275  _argvec[0] = (unsigned long)_orig.nraddr; \
3276  _argvec[1] = (unsigned long)arg1; \
3277  _argvec[2] = (unsigned long)arg2; \
3278  _argvec[3] = (unsigned long)arg3; \
3279  _argvec[4] = (unsigned long)arg4; \
3280  _argvec[5] = (unsigned long)arg5; \
3281  _argvec[6] = (unsigned long)arg6; \
3282  _argvec[7] = (unsigned long)arg7; \
3283  _argvec[8] = (unsigned long)arg8; \
3284  _argvec[9] = (unsigned long)arg9; \
3285  __asm__ volatile( VALGRIND_CFI_PROLOGUE "aghi 15,-192\n\t" \
3286  "lg 2, 8(1)\n\t" \
3287  "lg 3,16(1)\n\t" \
3288  "lg 4,24(1)\n\t" \
3289  "lg 5,32(1)\n\t" \
3290  "lg 6,40(1)\n\t" \
3291  "mvc 160(8,15), 48(1)\n\t" \
3292  "mvc 168(8,15), 56(1)\n\t" \
3293  "mvc 176(8,15), 64(1)\n\t" \
3294  "mvc 184(8,15), 72(1)\n\t" \
3295  "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3296  "aghi 15,192\n\t" VALGRIND_CFI_EPILOGUE \
3297  : /*out*/ "=d"( _res ) \
3298  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3299  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7" ); \
3300  lval = (__typeof__( lval ))_res; \
3301  } while ( 0 )
3302 
3303 #define CALL_FN_W_10W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10 ) \
3304  do { \
3305  volatile OrigFn _orig = ( orig ); \
3306  volatile unsigned long _argvec[11]; \
3307  volatile unsigned long _res; \
3308  _argvec[0] = (unsigned long)_orig.nraddr; \
3309  _argvec[1] = (unsigned long)arg1; \
3310  _argvec[2] = (unsigned long)arg2; \
3311  _argvec[3] = (unsigned long)arg3; \
3312  _argvec[4] = (unsigned long)arg4; \
3313  _argvec[5] = (unsigned long)arg5; \
3314  _argvec[6] = (unsigned long)arg6; \
3315  _argvec[7] = (unsigned long)arg7; \
3316  _argvec[8] = (unsigned long)arg8; \
3317  _argvec[9] = (unsigned long)arg9; \
3318  _argvec[10] = (unsigned long)arg10; \
3319  __asm__ volatile( VALGRIND_CFI_PROLOGUE "aghi 15,-200\n\t" \
3320  "lg 2, 8(1)\n\t" \
3321  "lg 3,16(1)\n\t" \
3322  "lg 4,24(1)\n\t" \
3323  "lg 5,32(1)\n\t" \
3324  "lg 6,40(1)\n\t" \
3325  "mvc 160(8,15), 48(1)\n\t" \
3326  "mvc 168(8,15), 56(1)\n\t" \
3327  "mvc 176(8,15), 64(1)\n\t" \
3328  "mvc 184(8,15), 72(1)\n\t" \
3329  "mvc 192(8,15), 80(1)\n\t" \
3330  "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3331  "aghi 15,200\n\t" VALGRIND_CFI_EPILOGUE \
3332  : /*out*/ "=d"( _res ) \
3333  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3334  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7" ); \
3335  lval = (__typeof__( lval ))_res; \
3336  } while ( 0 )
3337 
3338 #define CALL_FN_W_11W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11 ) \
3339  do { \
3340  volatile OrigFn _orig = ( orig ); \
3341  volatile unsigned long _argvec[12]; \
3342  volatile unsigned long _res; \
3343  _argvec[0] = (unsigned long)_orig.nraddr; \
3344  _argvec[1] = (unsigned long)arg1; \
3345  _argvec[2] = (unsigned long)arg2; \
3346  _argvec[3] = (unsigned long)arg3; \
3347  _argvec[4] = (unsigned long)arg4; \
3348  _argvec[5] = (unsigned long)arg5; \
3349  _argvec[6] = (unsigned long)arg6; \
3350  _argvec[7] = (unsigned long)arg7; \
3351  _argvec[8] = (unsigned long)arg8; \
3352  _argvec[9] = (unsigned long)arg9; \
3353  _argvec[10] = (unsigned long)arg10; \
3354  _argvec[11] = (unsigned long)arg11; \
3355  __asm__ volatile( VALGRIND_CFI_PROLOGUE "aghi 15,-208\n\t" \
3356  "lg 2, 8(1)\n\t" \
3357  "lg 3,16(1)\n\t" \
3358  "lg 4,24(1)\n\t" \
3359  "lg 5,32(1)\n\t" \
3360  "lg 6,40(1)\n\t" \
3361  "mvc 160(8,15), 48(1)\n\t" \
3362  "mvc 168(8,15), 56(1)\n\t" \
3363  "mvc 176(8,15), 64(1)\n\t" \
3364  "mvc 184(8,15), 72(1)\n\t" \
3365  "mvc 192(8,15), 80(1)\n\t" \
3366  "mvc 200(8,15), 88(1)\n\t" \
3367  "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3368  "aghi 15,208\n\t" VALGRIND_CFI_EPILOGUE \
3369  : /*out*/ "=d"( _res ) \
3370  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3371  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7" ); \
3372  lval = (__typeof__( lval ))_res; \
3373  } while ( 0 )
3374 
3375 #define CALL_FN_W_12W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12 ) \
3376  do { \
3377  volatile OrigFn _orig = ( orig ); \
3378  volatile unsigned long _argvec[13]; \
3379  volatile unsigned long _res; \
3380  _argvec[0] = (unsigned long)_orig.nraddr; \
3381  _argvec[1] = (unsigned long)arg1; \
3382  _argvec[2] = (unsigned long)arg2; \
3383  _argvec[3] = (unsigned long)arg3; \
3384  _argvec[4] = (unsigned long)arg4; \
3385  _argvec[5] = (unsigned long)arg5; \
3386  _argvec[6] = (unsigned long)arg6; \
3387  _argvec[7] = (unsigned long)arg7; \
3388  _argvec[8] = (unsigned long)arg8; \
3389  _argvec[9] = (unsigned long)arg9; \
3390  _argvec[10] = (unsigned long)arg10; \
3391  _argvec[11] = (unsigned long)arg11; \
3392  _argvec[12] = (unsigned long)arg12; \
3393  __asm__ volatile( VALGRIND_CFI_PROLOGUE "aghi 15,-216\n\t" \
3394  "lg 2, 8(1)\n\t" \
3395  "lg 3,16(1)\n\t" \
3396  "lg 4,24(1)\n\t" \
3397  "lg 5,32(1)\n\t" \
3398  "lg 6,40(1)\n\t" \
3399  "mvc 160(8,15), 48(1)\n\t" \
3400  "mvc 168(8,15), 56(1)\n\t" \
3401  "mvc 176(8,15), 64(1)\n\t" \
3402  "mvc 184(8,15), 72(1)\n\t" \
3403  "mvc 192(8,15), 80(1)\n\t" \
3404  "mvc 200(8,15), 88(1)\n\t" \
3405  "mvc 208(8,15), 96(1)\n\t" \
3406  "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3407  "aghi 15,216\n\t" VALGRIND_CFI_EPILOGUE \
3408  : /*out*/ "=d"( _res ) \
3409  : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3410  : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7" ); \
3411  lval = (__typeof__( lval ))_res; \
3412  } while ( 0 )
3413 
3414 #endif /* PLAT_s390x_linux */
3415 
3416 /* ------------------------- mips32-linux ----------------------- */
3417 
3418 #if defined( PLAT_mips32_linux )
3419 
3420 /* These regs are trashed by the hidden call. */
3421 #define __CALLER_SAVED_REGS \
3422  "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "$31"
3423 
3424 /* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned
3425  long) == 4. */
3426 
3427 #define CALL_FN_W_v( lval, orig ) \
3428  do { \
3429  volatile OrigFn _orig = ( orig ); \
3430  volatile unsigned long _argvec[1]; \
3431  volatile unsigned long _res; \
3432  _argvec[0] = (unsigned long)_orig.nraddr; \
3433  __asm__ volatile( "subu $29, $29, 8 \n\t" \
3434  "sw $28, 0($29) \n\t" \
3435  "sw $31, 4($29) \n\t" \
3436  "subu $29, $29, 16 \n\t" \
3437  "lw $25, 0(%1) \n\t" /* target->t9 */ \
3438  VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 16\n\t" \
3439  "lw $28, 0($29) \n\t" \
3440  "lw $31, 4($29) \n\t" \
3441  "addu $29, $29, 8 \n\t" \
3442  "move %0, $2\n" \
3443  : /*out*/ "=r"( _res ) \
3444  : /*in*/ "0"( &_argvec[0] ) \
3445  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3446  lval = (__typeof__( lval ))_res; \
3447  } while ( 0 )
3448 
3449 #define CALL_FN_W_W( lval, orig, arg1 ) \
3450  do { \
3451  volatile OrigFn _orig = ( orig ); \
3452  volatile unsigned long _argvec[2]; \
3453  volatile unsigned long _res; \
3454  _argvec[0] = (unsigned long)_orig.nraddr; \
3455  _argvec[1] = (unsigned long)( arg1 ); \
3456  __asm__ volatile( "subu $29, $29, 8 \n\t" \
3457  "sw $28, 0($29) \n\t" \
3458  "sw $31, 4($29) \n\t" \
3459  "subu $29, $29, 16 \n\t" \
3460  "lw $4, 4(%1) \n\t" /* arg1*/ \
3461  "lw $25, 0(%1) \n\t" /* target->t9 */ \
3462  VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 16 \n\t" \
3463  "lw $28, 0($29) \n\t" \
3464  "lw $31, 4($29) \n\t" \
3465  "addu $29, $29, 8 \n\t" \
3466  "move %0, $2\n" \
3467  : /*out*/ "=r"( _res ) \
3468  : /*in*/ "0"( &_argvec[0] ) \
3469  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3470  lval = (__typeof__( lval ))_res; \
3471  } while ( 0 )
3472 
3473 #define CALL_FN_W_WW( lval, orig, arg1, arg2 ) \
3474  do { \
3475  volatile OrigFn _orig = ( orig ); \
3476  volatile unsigned long _argvec[3]; \
3477  volatile unsigned long _res; \
3478  _argvec[0] = (unsigned long)_orig.nraddr; \
3479  _argvec[1] = (unsigned long)( arg1 ); \
3480  _argvec[2] = (unsigned long)( arg2 ); \
3481  __asm__ volatile( "subu $29, $29, 8 \n\t" \
3482  "sw $28, 0($29) \n\t" \
3483  "sw $31, 4($29) \n\t" \
3484  "subu $29, $29, 16 \n\t" \
3485  "lw $4, 4(%1) \n\t" \
3486  "lw $5, 8(%1) \n\t" \
3487  "lw $25, 0(%1) \n\t" /* target->t9 */ \
3488  VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 16 \n\t" \
3489  "lw $28, 0($29) \n\t" \
3490  "lw $31, 4($29) \n\t" \
3491  "addu $29, $29, 8 \n\t" \
3492  "move %0, $2\n" \
3493  : /*out*/ "=r"( _res ) \
3494  : /*in*/ "0"( &_argvec[0] ) \
3495  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3496  lval = (__typeof__( lval ))_res; \
3497  } while ( 0 )
3498 
3499 #define CALL_FN_W_WWW( lval, orig, arg1, arg2, arg3 ) \
3500  do { \
3501  volatile OrigFn _orig = ( orig ); \
3502  volatile unsigned long _argvec[4]; \
3503  volatile unsigned long _res; \
3504  _argvec[0] = (unsigned long)_orig.nraddr; \
3505  _argvec[1] = (unsigned long)( arg1 ); \
3506  _argvec[2] = (unsigned long)( arg2 ); \
3507  _argvec[3] = (unsigned long)( arg3 ); \
3508  __asm__ volatile( "subu $29, $29, 8 \n\t" \
3509  "sw $28, 0($29) \n\t" \
3510  "sw $31, 4($29) \n\t" \
3511  "subu $29, $29, 16 \n\t" \
3512  "lw $4, 4(%1) \n\t" \
3513  "lw $5, 8(%1) \n\t" \
3514  "lw $6, 12(%1) \n\t" \
3515  "lw $25, 0(%1) \n\t" /* target->t9 */ \
3516  VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 16 \n\t" \
3517  "lw $28, 0($29) \n\t" \
3518  "lw $31, 4($29) \n\t" \
3519  "addu $29, $29, 8 \n\t" \
3520  "move %0, $2\n" \
3521  : /*out*/ "=r"( _res ) \
3522  : /*in*/ "0"( &_argvec[0] ) \
3523  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3524  lval = (__typeof__( lval ))_res; \
3525  } while ( 0 )
3526 
3527 #define CALL_FN_W_WWWW( lval, orig, arg1, arg2, arg3, arg4 ) \
3528  do { \
3529  volatile OrigFn _orig = ( orig ); \
3530  volatile unsigned long _argvec[5]; \
3531  volatile unsigned long _res; \
3532  _argvec[0] = (unsigned long)_orig.nraddr; \
3533  _argvec[1] = (unsigned long)( arg1 ); \
3534  _argvec[2] = (unsigned long)( arg2 ); \
3535  _argvec[3] = (unsigned long)( arg3 ); \
3536  _argvec[4] = (unsigned long)( arg4 ); \
3537  __asm__ volatile( "subu $29, $29, 8 \n\t" \
3538  "sw $28, 0($29) \n\t" \
3539  "sw $31, 4($29) \n\t" \
3540  "subu $29, $29, 16 \n\t" \
3541  "lw $4, 4(%1) \n\t" \
3542  "lw $5, 8(%1) \n\t" \
3543  "lw $6, 12(%1) \n\t" \
3544  "lw $7, 16(%1) \n\t" \
3545  "lw $25, 0(%1) \n\t" /* target->t9 */ \
3546  VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 16 \n\t" \
3547  "lw $28, 0($29) \n\t" \
3548  "lw $31, 4($29) \n\t" \
3549  "addu $29, $29, 8 \n\t" \
3550  "move %0, $2\n" \
3551  : /*out*/ "=r"( _res ) \
3552  : /*in*/ "0"( &_argvec[0] ) \
3553  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3554  lval = (__typeof__( lval ))_res; \
3555  } while ( 0 )
3556 
3557 #define CALL_FN_W_5W( lval, orig, arg1, arg2, arg3, arg4, arg5 ) \
3558  do { \
3559  volatile OrigFn _orig = ( orig ); \
3560  volatile unsigned long _argvec[6]; \
3561  volatile unsigned long _res; \
3562  _argvec[0] = (unsigned long)_orig.nraddr; \
3563  _argvec[1] = (unsigned long)( arg1 ); \
3564  _argvec[2] = (unsigned long)( arg2 ); \
3565  _argvec[3] = (unsigned long)( arg3 ); \
3566  _argvec[4] = (unsigned long)( arg4 ); \
3567  _argvec[5] = (unsigned long)( arg5 ); \
3568  __asm__ volatile( "subu $29, $29, 8 \n\t" \
3569  "sw $28, 0($29) \n\t" \
3570  "sw $31, 4($29) \n\t" \
3571  "lw $4, 20(%1) \n\t" \
3572  "subu $29, $29, 24\n\t" \
3573  "sw $4, 16($29) \n\t" \
3574  "lw $4, 4(%1) \n\t" \
3575  "lw $5, 8(%1) \n\t" \
3576  "lw $6, 12(%1) \n\t" \
3577  "lw $7, 16(%1) \n\t" \
3578  "lw $25, 0(%1) \n\t" /* target->t9 */ \
3579  VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 24 \n\t" \
3580  "lw $28, 0($29) \n\t" \
3581  "lw $31, 4($29) \n\t" \
3582  "addu $29, $29, 8 \n\t" \
3583  "move %0, $2\n" \
3584  : /*out*/ "=r"( _res ) \
3585  : /*in*/ "0"( &_argvec[0] ) \
3586  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3587  lval = (__typeof__( lval ))_res; \
3588  } while ( 0 )
3589 #define CALL_FN_W_6W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6 ) \
3590  do { \
3591  volatile OrigFn _orig = ( orig ); \
3592  volatile unsigned long _argvec[7]; \
3593  volatile unsigned long _res; \
3594  _argvec[0] = (unsigned long)_orig.nraddr; \
3595  _argvec[1] = (unsigned long)( arg1 ); \
3596  _argvec[2] = (unsigned long)( arg2 ); \
3597  _argvec[3] = (unsigned long)( arg3 ); \
3598  _argvec[4] = (unsigned long)( arg4 ); \
3599  _argvec[5] = (unsigned long)( arg5 ); \
3600  _argvec[6] = (unsigned long)( arg6 ); \
3601  __asm__ volatile( "subu $29, $29, 8 \n\t" \
3602  "sw $28, 0($29) \n\t" \
3603  "sw $31, 4($29) \n\t" \
3604  "lw $4, 20(%1) \n\t" \
3605  "subu $29, $29, 32\n\t" \
3606  "sw $4, 16($29) \n\t" \
3607  "lw $4, 24(%1) \n\t" \
3608  "nop\n\t" \
3609  "sw $4, 20($29) \n\t" \
3610  "lw $4, 4(%1) \n\t" \
3611  "lw $5, 8(%1) \n\t" \
3612  "lw $6, 12(%1) \n\t" \
3613  "lw $7, 16(%1) \n\t" \
3614  "lw $25, 0(%1) \n\t" /* target->t9 */ \
3615  VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 32 \n\t" \
3616  "lw $28, 0($29) \n\t" \
3617  "lw $31, 4($29) \n\t" \
3618  "addu $29, $29, 8 \n\t" \
3619  "move %0, $2\n" \
3620  : /*out*/ "=r"( _res ) \
3621  : /*in*/ "0"( &_argvec[0] ) \
3622  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3623  lval = (__typeof__( lval ))_res; \
3624  } while ( 0 )
3625 
3626 #define CALL_FN_W_7W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7 ) \
3627  do { \
3628  volatile OrigFn _orig = ( orig ); \
3629  volatile unsigned long _argvec[8]; \
3630  volatile unsigned long _res; \
3631  _argvec[0] = (unsigned long)_orig.nraddr; \
3632  _argvec[1] = (unsigned long)( arg1 ); \
3633  _argvec[2] = (unsigned long)( arg2 ); \
3634  _argvec[3] = (unsigned long)( arg3 ); \
3635  _argvec[4] = (unsigned long)( arg4 ); \
3636  _argvec[5] = (unsigned long)( arg5 ); \
3637  _argvec[6] = (unsigned long)( arg6 ); \
3638  _argvec[7] = (unsigned long)( arg7 ); \
3639  __asm__ volatile( "subu $29, $29, 8 \n\t" \
3640  "sw $28, 0($29) \n\t" \
3641  "sw $31, 4($29) \n\t" \
3642  "lw $4, 20(%1) \n\t" \
3643  "subu $29, $29, 32\n\t" \
3644  "sw $4, 16($29) \n\t" \
3645  "lw $4, 24(%1) \n\t" \
3646  "sw $4, 20($29) \n\t" \
3647  "lw $4, 28(%1) \n\t" \
3648  "sw $4, 24($29) \n\t" \
3649  "lw $4, 4(%1) \n\t" \
3650  "lw $5, 8(%1) \n\t" \
3651  "lw $6, 12(%1) \n\t" \
3652  "lw $7, 16(%1) \n\t" \
3653  "lw $25, 0(%1) \n\t" /* target->t9 */ \
3654  VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 32 \n\t" \
3655  "lw $28, 0($29) \n\t" \
3656  "lw $31, 4($29) \n\t" \
3657  "addu $29, $29, 8 \n\t" \
3658  "move %0, $2\n" \
3659  : /*out*/ "=r"( _res ) \
3660  : /*in*/ "0"( &_argvec[0] ) \
3661  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3662  lval = (__typeof__( lval ))_res; \
3663  } while ( 0 )
3664 
3665 #define CALL_FN_W_8W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 ) \
3666  do { \
3667  volatile OrigFn _orig = ( orig ); \
3668  volatile unsigned long _argvec[9]; \
3669  volatile unsigned long _res; \
3670  _argvec[0] = (unsigned long)_orig.nraddr; \
3671  _argvec[1] = (unsigned long)( arg1 ); \
3672  _argvec[2] = (unsigned long)( arg2 ); \
3673  _argvec[3] = (unsigned long)( arg3 ); \
3674  _argvec[4] = (unsigned long)( arg4 ); \
3675  _argvec[5] = (unsigned long)( arg5 ); \
3676  _argvec[6] = (unsigned long)( arg6 ); \
3677  _argvec[7] = (unsigned long)( arg7 ); \
3678  _argvec[8] = (unsigned long)( arg8 ); \
3679  __asm__ volatile( "subu $29, $29, 8 \n\t" \
3680  "sw $28, 0($29) \n\t" \
3681  "sw $31, 4($29) \n\t" \
3682  "lw $4, 20(%1) \n\t" \
3683  "subu $29, $29, 40\n\t" \
3684  "sw $4, 16($29) \n\t" \
3685  "lw $4, 24(%1) \n\t" \
3686  "sw $4, 20($29) \n\t" \
3687  "lw $4, 28(%1) \n\t" \
3688  "sw $4, 24($29) \n\t" \
3689  "lw $4, 32(%1) \n\t" \
3690  "sw $4, 28($29) \n\t" \
3691  "lw $4, 4(%1) \n\t" \
3692  "lw $5, 8(%1) \n\t" \
3693  "lw $6, 12(%1) \n\t" \
3694  "lw $7, 16(%1) \n\t" \
3695  "lw $25, 0(%1) \n\t" /* target->t9 */ \
3696  VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 40 \n\t" \
3697  "lw $28, 0($29) \n\t" \
3698  "lw $31, 4($29) \n\t" \
3699  "addu $29, $29, 8 \n\t" \
3700  "move %0, $2\n" \
3701  : /*out*/ "=r"( _res ) \
3702  : /*in*/ "0"( &_argvec[0] ) \
3703  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3704  lval = (__typeof__( lval ))_res; \
3705  } while ( 0 )
3706 
3707 #define CALL_FN_W_9W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9 ) \
3708  do { \
3709  volatile OrigFn _orig = ( orig ); \
3710  volatile unsigned long _argvec[10]; \
3711  volatile unsigned long _res; \
3712  _argvec[0] = (unsigned long)_orig.nraddr; \
3713  _argvec[1] = (unsigned long)( arg1 ); \
3714  _argvec[2] = (unsigned long)( arg2 ); \
3715  _argvec[3] = (unsigned long)( arg3 ); \
3716  _argvec[4] = (unsigned long)( arg4 ); \
3717  _argvec[5] = (unsigned long)( arg5 ); \
3718  _argvec[6] = (unsigned long)( arg6 ); \
3719  _argvec[7] = (unsigned long)( arg7 ); \
3720  _argvec[8] = (unsigned long)( arg8 ); \
3721  _argvec[9] = (unsigned long)( arg9 ); \
3722  __asm__ volatile( "subu $29, $29, 8 \n\t" \
3723  "sw $28, 0($29) \n\t" \
3724  "sw $31, 4($29) \n\t" \
3725  "lw $4, 20(%1) \n\t" \
3726  "subu $29, $29, 40\n\t" \
3727  "sw $4, 16($29) \n\t" \
3728  "lw $4, 24(%1) \n\t" \
3729  "sw $4, 20($29) \n\t" \
3730  "lw $4, 28(%1) \n\t" \
3731  "sw $4, 24($29) \n\t" \
3732  "lw $4, 32(%1) \n\t" \
3733  "sw $4, 28($29) \n\t" \
3734  "lw $4, 36(%1) \n\t" \
3735  "sw $4, 32($29) \n\t" \
3736  "lw $4, 4(%1) \n\t" \
3737  "lw $5, 8(%1) \n\t" \
3738  "lw $6, 12(%1) \n\t" \
3739  "lw $7, 16(%1) \n\t" \
3740  "lw $25, 0(%1) \n\t" /* target->t9 */ \
3741  VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 40 \n\t" \
3742  "lw $28, 0($29) \n\t" \
3743  "lw $31, 4($29) \n\t" \
3744  "addu $29, $29, 8 \n\t" \
3745  "move %0, $2\n" \
3746  : /*out*/ "=r"( _res ) \
3747  : /*in*/ "0"( &_argvec[0] ) \
3748  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3749  lval = (__typeof__( lval ))_res; \
3750  } while ( 0 )
3751 
3752 #define CALL_FN_W_10W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10 ) \
3753  do { \
3754  volatile OrigFn _orig = ( orig ); \
3755  volatile unsigned long _argvec[11]; \
3756  volatile unsigned long _res; \
3757  _argvec[0] = (unsigned long)_orig.nraddr; \
3758  _argvec[1] = (unsigned long)( arg1 ); \
3759  _argvec[2] = (unsigned long)( arg2 ); \
3760  _argvec[3] = (unsigned long)( arg3 ); \
3761  _argvec[4] = (unsigned long)( arg4 ); \
3762  _argvec[5] = (unsigned long)( arg5 ); \
3763  _argvec[6] = (unsigned long)( arg6 ); \
3764  _argvec[7] = (unsigned long)( arg7 ); \
3765  _argvec[8] = (unsigned long)( arg8 ); \
3766  _argvec[9] = (unsigned long)( arg9 ); \
3767  _argvec[10] = (unsigned long)( arg10 ); \
3768  __asm__ volatile( "subu $29, $29, 8 \n\t" \
3769  "sw $28, 0($29) \n\t" \
3770  "sw $31, 4($29) \n\t" \
3771  "lw $4, 20(%1) \n\t" \
3772  "subu $29, $29, 48\n\t" \
3773  "sw $4, 16($29) \n\t" \
3774  "lw $4, 24(%1) \n\t" \
3775  "sw $4, 20($29) \n\t" \
3776  "lw $4, 28(%1) \n\t" \
3777  "sw $4, 24($29) \n\t" \
3778  "lw $4, 32(%1) \n\t" \
3779  "sw $4, 28($29) \n\t" \
3780  "lw $4, 36(%1) \n\t" \
3781  "sw $4, 32($29) \n\t" \
3782  "lw $4, 40(%1) \n\t" \
3783  "sw $4, 36($29) \n\t" \
3784  "lw $4, 4(%1) \n\t" \
3785  "lw $5, 8(%1) \n\t" \
3786  "lw $6, 12(%1) \n\t" \
3787  "lw $7, 16(%1) \n\t" \
3788  "lw $25, 0(%1) \n\t" /* target->t9 */ \
3789  VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 48 \n\t" \
3790  "lw $28, 0($29) \n\t" \
3791  "lw $31, 4($29) \n\t" \
3792  "addu $29, $29, 8 \n\t" \
3793  "move %0, $2\n" \
3794  : /*out*/ "=r"( _res ) \
3795  : /*in*/ "0"( &_argvec[0] ) \
3796  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3797  lval = (__typeof__( lval ))_res; \
3798  } while ( 0 )
3799 
3800 #define CALL_FN_W_11W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11 ) \
3801  do { \
3802  volatile OrigFn _orig = ( orig ); \
3803  volatile unsigned long _argvec[12]; \
3804  volatile unsigned long _res; \
3805  _argvec[0] = (unsigned long)_orig.nraddr; \
3806  _argvec[1] = (unsigned long)( arg1 ); \
3807  _argvec[2] = (unsigned long)( arg2 ); \
3808  _argvec[3] = (unsigned long)( arg3 ); \
3809  _argvec[4] = (unsigned long)( arg4 ); \
3810  _argvec[5] = (unsigned long)( arg5 ); \
3811  _argvec[6] = (unsigned long)( arg6 ); \
3812  _argvec[7] = (unsigned long)( arg7 ); \
3813  _argvec[8] = (unsigned long)( arg8 ); \
3814  _argvec[9] = (unsigned long)( arg9 ); \
3815  _argvec[10] = (unsigned long)( arg10 ); \
3816  _argvec[11] = (unsigned long)( arg11 ); \
3817  __asm__ volatile( "subu $29, $29, 8 \n\t" \
3818  "sw $28, 0($29) \n\t" \
3819  "sw $31, 4($29) \n\t" \
3820  "lw $4, 20(%1) \n\t" \
3821  "subu $29, $29, 48\n\t" \
3822  "sw $4, 16($29) \n\t" \
3823  "lw $4, 24(%1) \n\t" \
3824  "sw $4, 20($29) \n\t" \
3825  "lw $4, 28(%1) \n\t" \
3826  "sw $4, 24($29) \n\t" \
3827  "lw $4, 32(%1) \n\t" \
3828  "sw $4, 28($29) \n\t" \
3829  "lw $4, 36(%1) \n\t" \
3830  "sw $4, 32($29) \n\t" \
3831  "lw $4, 40(%1) \n\t" \
3832  "sw $4, 36($29) \n\t" \
3833  "lw $4, 44(%1) \n\t" \
3834  "sw $4, 40($29) \n\t" \
3835  "lw $4, 4(%1) \n\t" \
3836  "lw $5, 8(%1) \n\t" \
3837  "lw $6, 12(%1) \n\t" \
3838  "lw $7, 16(%1) \n\t" \
3839  "lw $25, 0(%1) \n\t" /* target->t9 */ \
3840  VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 48 \n\t" \
3841  "lw $28, 0($29) \n\t" \
3842  "lw $31, 4($29) \n\t" \
3843  "addu $29, $29, 8 \n\t" \
3844  "move %0, $2\n" \
3845  : /*out*/ "=r"( _res ) \
3846  : /*in*/ "0"( &_argvec[0] ) \
3847  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3848  lval = (__typeof__( lval ))_res; \
3849  } while ( 0 )
3850 
3851 #define CALL_FN_W_12W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12 ) \
3852  do { \
3853  volatile OrigFn _orig = ( orig ); \
3854  volatile unsigned long _argvec[13]; \
3855  volatile unsigned long _res; \
3856  _argvec[0] = (unsigned long)_orig.nraddr; \
3857  _argvec[1] = (unsigned long)( arg1 ); \
3858  _argvec[2] = (unsigned long)( arg2 ); \
3859  _argvec[3] = (unsigned long)( arg3 ); \
3860  _argvec[4] = (unsigned long)( arg4 ); \
3861  _argvec[5] = (unsigned long)( arg5 ); \
3862  _argvec[6] = (unsigned long)( arg6 ); \
3863  _argvec[7] = (unsigned long)( arg7 ); \
3864  _argvec[8] = (unsigned long)( arg8 ); \
3865  _argvec[9] = (unsigned long)( arg9 ); \
3866  _argvec[10] = (unsigned long)( arg10 ); \
3867  _argvec[11] = (unsigned long)( arg11 ); \
3868  _argvec[12] = (unsigned long)( arg12 ); \
3869  __asm__ volatile( "subu $29, $29, 8 \n\t" \
3870  "sw $28, 0($29) \n\t" \
3871  "sw $31, 4($29) \n\t" \
3872  "lw $4, 20(%1) \n\t" \
3873  "subu $29, $29, 56\n\t" \
3874  "sw $4, 16($29) \n\t" \
3875  "lw $4, 24(%1) \n\t" \
3876  "sw $4, 20($29) \n\t" \
3877  "lw $4, 28(%1) \n\t" \
3878  "sw $4, 24($29) \n\t" \
3879  "lw $4, 32(%1) \n\t" \
3880  "sw $4, 28($29) \n\t" \
3881  "lw $4, 36(%1) \n\t" \
3882  "sw $4, 32($29) \n\t" \
3883  "lw $4, 40(%1) \n\t" \
3884  "sw $4, 36($29) \n\t" \
3885  "lw $4, 44(%1) \n\t" \
3886  "sw $4, 40($29) \n\t" \
3887  "lw $4, 48(%1) \n\t" \
3888  "sw $4, 44($29) \n\t" \
3889  "lw $4, 4(%1) \n\t" \
3890  "lw $5, 8(%1) \n\t" \
3891  "lw $6, 12(%1) \n\t" \
3892  "lw $7, 16(%1) \n\t" \
3893  "lw $25, 0(%1) \n\t" /* target->t9 */ \
3894  VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 56 \n\t" \
3895  "lw $28, 0($29) \n\t" \
3896  "lw $31, 4($29) \n\t" \
3897  "addu $29, $29, 8 \n\t" \
3898  "move %0, $2\n" \
3899  : /*out*/ "=r"( _res ) \
3900  : /*in*/ "r"( &_argvec[0] ) \
3901  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3902  lval = (__typeof__( lval ))_res; \
3903  } while ( 0 )
3904 
3905 #endif /* PLAT_mips32_linux */
3906 
3907 /* ------------------------- mips64-linux ------------------------- */
3908 
3909 #if defined( PLAT_mips64_linux )
3910 
3911 /* These regs are trashed by the hidden call. */
3912 #define __CALLER_SAVED_REGS \
3913  "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "$31"
3914 
3915 /* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned
3916  long) == 4. */
3917 
3918 #define CALL_FN_W_v( lval, orig ) \
3919  do { \
3920  volatile OrigFn _orig = ( orig ); \
3921  volatile unsigned long _argvec[1]; \
3922  volatile unsigned long _res; \
3923  _argvec[0] = (unsigned long)_orig.nraddr; \
3924  __asm__ volatile( "ld $25, 0(%1)\n\t" /* target->t9 */ \
3925  VALGRIND_CALL_NOREDIR_T9 "move %0, $2\n" \
3926  : /*out*/ "=r"( _res ) \
3927  : /*in*/ "0"( &_argvec[0] ) \
3928  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3929  lval = (__typeof__( lval ))_res; \
3930  } while ( 0 )
3931 
3932 #define CALL_FN_W_W( lval, orig, arg1 ) \
3933  do { \
3934  volatile OrigFn _orig = ( orig ); \
3935  volatile unsigned long _argvec[2]; \
3936  volatile unsigned long _res; \
3937  _argvec[0] = (unsigned long)_orig.nraddr; \
3938  _argvec[1] = (unsigned long)( arg1 ); \
3939  __asm__ volatile( "ld $4, 8(%1)\n\t" /* arg1*/ \
3940  "ld $25, 0(%1)\n\t" /* target->t9 */ \
3941  VALGRIND_CALL_NOREDIR_T9 "move %0, $2\n" \
3942  : /*out*/ "=r"( _res ) \
3943  : /*in*/ "r"( &_argvec[0] ) \
3944  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3945  lval = (__typeof__( lval ))_res; \
3946  } while ( 0 )
3947 
3948 #define CALL_FN_W_WW( lval, orig, arg1, arg2 ) \
3949  do { \
3950  volatile OrigFn _orig = ( orig ); \
3951  volatile unsigned long _argvec[3]; \
3952  volatile unsigned long _res; \
3953  _argvec[0] = (unsigned long)_orig.nraddr; \
3954  _argvec[1] = (unsigned long)( arg1 ); \
3955  _argvec[2] = (unsigned long)( arg2 ); \
3956  __asm__ volatile( "ld $4, 8(%1)\n\t" \
3957  "ld $5, 16(%1)\n\t" \
3958  "ld $25, 0(%1)\n\t" /* target->t9 */ \
3959  VALGRIND_CALL_NOREDIR_T9 "move %0, $2\n" \
3960  : /*out*/ "=r"( _res ) \
3961  : /*in*/ "r"( &_argvec[0] ) \
3962  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3963  lval = (__typeof__( lval ))_res; \
3964  } while ( 0 )
3965 
3966 #define CALL_FN_W_WWW( lval, orig, arg1, arg2, arg3 ) \
3967  do { \
3968  volatile OrigFn _orig = ( orig ); \
3969  volatile unsigned long _argvec[4]; \
3970  volatile unsigned long _res; \
3971  _argvec[0] = (unsigned long)_orig.nraddr; \
3972  _argvec[1] = (unsigned long)( arg1 ); \
3973  _argvec[2] = (unsigned long)( arg2 ); \
3974  _argvec[3] = (unsigned long)( arg3 ); \
3975  __asm__ volatile( "ld $4, 8(%1)\n\t" \
3976  "ld $5, 16(%1)\n\t" \
3977  "ld $6, 24(%1)\n\t" \
3978  "ld $25, 0(%1)\n\t" /* target->t9 */ \
3979  VALGRIND_CALL_NOREDIR_T9 "move %0, $2\n" \
3980  : /*out*/ "=r"( _res ) \
3981  : /*in*/ "r"( &_argvec[0] ) \
3982  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
3983  lval = (__typeof__( lval ))_res; \
3984  } while ( 0 )
3985 
3986 #define CALL_FN_W_WWWW( lval, orig, arg1, arg2, arg3, arg4 ) \
3987  do { \
3988  volatile OrigFn _orig = ( orig ); \
3989  volatile unsigned long _argvec[5]; \
3990  volatile unsigned long _res; \
3991  _argvec[0] = (unsigned long)_orig.nraddr; \
3992  _argvec[1] = (unsigned long)( arg1 ); \
3993  _argvec[2] = (unsigned long)( arg2 ); \
3994  _argvec[3] = (unsigned long)( arg3 ); \
3995  _argvec[4] = (unsigned long)( arg4 ); \
3996  __asm__ volatile( "ld $4, 8(%1)\n\t" \
3997  "ld $5, 16(%1)\n\t" \
3998  "ld $6, 24(%1)\n\t" \
3999  "ld $7, 32(%1)\n\t" \
4000  "ld $25, 0(%1)\n\t" /* target->t9 */ \
4001  VALGRIND_CALL_NOREDIR_T9 "move %0, $2\n" \
4002  : /*out*/ "=r"( _res ) \
4003  : /*in*/ "r"( &_argvec[0] ) \
4004  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
4005  lval = (__typeof__( lval ))_res; \
4006  } while ( 0 )
4007 
4008 #define CALL_FN_W_5W( lval, orig, arg1, arg2, arg3, arg4, arg5 ) \
4009  do { \
4010  volatile OrigFn _orig = ( orig ); \
4011  volatile unsigned long _argvec[6]; \
4012  volatile unsigned long _res; \
4013  _argvec[0] = (unsigned long)_orig.nraddr; \
4014  _argvec[1] = (unsigned long)( arg1 ); \
4015  _argvec[2] = (unsigned long)( arg2 ); \
4016  _argvec[3] = (unsigned long)( arg3 ); \
4017  _argvec[4] = (unsigned long)( arg4 ); \
4018  _argvec[5] = (unsigned long)( arg5 ); \
4019  __asm__ volatile( "ld $4, 8(%1)\n\t" \
4020  "ld $5, 16(%1)\n\t" \
4021  "ld $6, 24(%1)\n\t" \
4022  "ld $7, 32(%1)\n\t" \
4023  "ld $8, 40(%1)\n\t" \
4024  "ld $25, 0(%1)\n\t" /* target->t9 */ \
4025  VALGRIND_CALL_NOREDIR_T9 "move %0, $2\n" \
4026  : /*out*/ "=r"( _res ) \
4027  : /*in*/ "r"( &_argvec[0] ) \
4028  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
4029  lval = (__typeof__( lval ))_res; \
4030  } while ( 0 )
4031 
4032 #define CALL_FN_W_6W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6 ) \
4033  do { \
4034  volatile OrigFn _orig = ( orig ); \
4035  volatile unsigned long _argvec[7]; \
4036  volatile unsigned long _res; \
4037  _argvec[0] = (unsigned long)_orig.nraddr; \
4038  _argvec[1] = (unsigned long)( arg1 ); \
4039  _argvec[2] = (unsigned long)( arg2 ); \
4040  _argvec[3] = (unsigned long)( arg3 ); \
4041  _argvec[4] = (unsigned long)( arg4 ); \
4042  _argvec[5] = (unsigned long)( arg5 ); \
4043  _argvec[6] = (unsigned long)( arg6 ); \
4044  __asm__ volatile( "ld $4, 8(%1)\n\t" \
4045  "ld $5, 16(%1)\n\t" \
4046  "ld $6, 24(%1)\n\t" \
4047  "ld $7, 32(%1)\n\t" \
4048  "ld $8, 40(%1)\n\t" \
4049  "ld $9, 48(%1)\n\t" \
4050  "ld $25, 0(%1)\n\t" /* target->t9 */ \
4051  VALGRIND_CALL_NOREDIR_T9 "move %0, $2\n" \
4052  : /*out*/ "=r"( _res ) \
4053  : /*in*/ "r"( &_argvec[0] ) \
4054  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
4055  lval = (__typeof__( lval ))_res; \
4056  } while ( 0 )
4057 
4058 #define CALL_FN_W_7W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7 ) \
4059  do { \
4060  volatile OrigFn _orig = ( orig ); \
4061  volatile unsigned long _argvec[8]; \
4062  volatile unsigned long _res; \
4063  _argvec[0] = (unsigned long)_orig.nraddr; \
4064  _argvec[1] = (unsigned long)( arg1 ); \
4065  _argvec[2] = (unsigned long)( arg2 ); \
4066  _argvec[3] = (unsigned long)( arg3 ); \
4067  _argvec[4] = (unsigned long)( arg4 ); \
4068  _argvec[5] = (unsigned long)( arg5 ); \
4069  _argvec[6] = (unsigned long)( arg6 ); \
4070  _argvec[7] = (unsigned long)( arg7 ); \
4071  __asm__ volatile( "ld $4, 8(%1)\n\t" \
4072  "ld $5, 16(%1)\n\t" \
4073  "ld $6, 24(%1)\n\t" \
4074  "ld $7, 32(%1)\n\t" \
4075  "ld $8, 40(%1)\n\t" \
4076  "ld $9, 48(%1)\n\t" \
4077  "ld $10, 56(%1)\n\t" \
4078  "ld $25, 0(%1) \n\t" /* target->t9 */ \
4079  VALGRIND_CALL_NOREDIR_T9 "move %0, $2\n" \
4080  : /*out*/ "=r"( _res ) \
4081  : /*in*/ "r"( &_argvec[0] ) \
4082  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
4083  lval = (__typeof__( lval ))_res; \
4084  } while ( 0 )
4085 
4086 #define CALL_FN_W_8W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 ) \
4087  do { \
4088  volatile OrigFn _orig = ( orig ); \
4089  volatile unsigned long _argvec[9]; \
4090  volatile unsigned long _res; \
4091  _argvec[0] = (unsigned long)_orig.nraddr; \
4092  _argvec[1] = (unsigned long)( arg1 ); \
4093  _argvec[2] = (unsigned long)( arg2 ); \
4094  _argvec[3] = (unsigned long)( arg3 ); \
4095  _argvec[4] = (unsigned long)( arg4 ); \
4096  _argvec[5] = (unsigned long)( arg5 ); \
4097  _argvec[6] = (unsigned long)( arg6 ); \
4098  _argvec[7] = (unsigned long)( arg7 ); \
4099  _argvec[8] = (unsigned long)( arg8 ); \
4100  __asm__ volatile( "ld $4, 8(%1)\n\t" \
4101  "ld $5, 16(%1)\n\t" \
4102  "ld $6, 24(%1)\n\t" \
4103  "ld $7, 32(%1)\n\t" \
4104  "ld $8, 40(%1)\n\t" \
4105  "ld $9, 48(%1)\n\t" \
4106  "ld $10, 56(%1)\n\t" \
4107  "ld $11, 64(%1)\n\t" \
4108  "ld $25, 0(%1) \n\t" /* target->t9 */ \
4109  VALGRIND_CALL_NOREDIR_T9 "move %0, $2\n" \
4110  : /*out*/ "=r"( _res ) \
4111  : /*in*/ "r"( &_argvec[0] ) \
4112  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
4113  lval = (__typeof__( lval ))_res; \
4114  } while ( 0 )
4115 
4116 #define CALL_FN_W_9W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9 ) \
4117  do { \
4118  volatile OrigFn _orig = ( orig ); \
4119  volatile unsigned long _argvec[10]; \
4120  volatile unsigned long _res; \
4121  _argvec[0] = (unsigned long)_orig.nraddr; \
4122  _argvec[1] = (unsigned long)( arg1 ); \
4123  _argvec[2] = (unsigned long)( arg2 ); \
4124  _argvec[3] = (unsigned long)( arg3 ); \
4125  _argvec[4] = (unsigned long)( arg4 ); \
4126  _argvec[5] = (unsigned long)( arg5 ); \
4127  _argvec[6] = (unsigned long)( arg6 ); \
4128  _argvec[7] = (unsigned long)( arg7 ); \
4129  _argvec[8] = (unsigned long)( arg8 ); \
4130  _argvec[9] = (unsigned long)( arg9 ); \
4131  __asm__ volatile( "dsubu $29, $29, 8\n\t" \
4132  "ld $4, 72(%1)\n\t" \
4133  "sd $4, 0($29)\n\t" \
4134  "ld $4, 8(%1)\n\t" \
4135  "ld $5, 16(%1)\n\t" \
4136  "ld $6, 24(%1)\n\t" \
4137  "ld $7, 32(%1)\n\t" \
4138  "ld $8, 40(%1)\n\t" \
4139  "ld $9, 48(%1)\n\t" \
4140  "ld $10, 56(%1)\n\t" \
4141  "ld $11, 64(%1)\n\t" \
4142  "ld $25, 0(%1)\n\t" /* target->t9 */ \
4143  VALGRIND_CALL_NOREDIR_T9 "daddu $29, $29, 8\n\t" \
4144  "move %0, $2\n" \
4145  : /*out*/ "=r"( _res ) \
4146  : /*in*/ "r"( &_argvec[0] ) \
4147  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
4148  lval = (__typeof__( lval ))_res; \
4149  } while ( 0 )
4150 
4151 #define CALL_FN_W_10W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10 ) \
4152  do { \
4153  volatile OrigFn _orig = ( orig ); \
4154  volatile unsigned long _argvec[11]; \
4155  volatile unsigned long _res; \
4156  _argvec[0] = (unsigned long)_orig.nraddr; \
4157  _argvec[1] = (unsigned long)( arg1 ); \
4158  _argvec[2] = (unsigned long)( arg2 ); \
4159  _argvec[3] = (unsigned long)( arg3 ); \
4160  _argvec[4] = (unsigned long)( arg4 ); \
4161  _argvec[5] = (unsigned long)( arg5 ); \
4162  _argvec[6] = (unsigned long)( arg6 ); \
4163  _argvec[7] = (unsigned long)( arg7 ); \
4164  _argvec[8] = (unsigned long)( arg8 ); \
4165  _argvec[9] = (unsigned long)( arg9 ); \
4166  _argvec[10] = (unsigned long)( arg10 ); \
4167  __asm__ volatile( "dsubu $29, $29, 16\n\t" \
4168  "ld $4, 72(%1)\n\t" \
4169  "sd $4, 0($29)\n\t" \
4170  "ld $4, 80(%1)\n\t" \
4171  "sd $4, 8($29)\n\t" \
4172  "ld $4, 8(%1)\n\t" \
4173  "ld $5, 16(%1)\n\t" \
4174  "ld $6, 24(%1)\n\t" \
4175  "ld $7, 32(%1)\n\t" \
4176  "ld $8, 40(%1)\n\t" \
4177  "ld $9, 48(%1)\n\t" \
4178  "ld $10, 56(%1)\n\t" \
4179  "ld $11, 64(%1)\n\t" \
4180  "ld $25, 0(%1)\n\t" /* target->t9 */ \
4181  VALGRIND_CALL_NOREDIR_T9 "daddu $29, $29, 16\n\t" \
4182  "move %0, $2\n" \
4183  : /*out*/ "=r"( _res ) \
4184  : /*in*/ "r"( &_argvec[0] ) \
4185  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
4186  lval = (__typeof__( lval ))_res; \
4187  } while ( 0 )
4188 
4189 #define CALL_FN_W_11W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11 ) \
4190  do { \
4191  volatile OrigFn _orig = ( orig ); \
4192  volatile unsigned long _argvec[12]; \
4193  volatile unsigned long _res; \
4194  _argvec[0] = (unsigned long)_orig.nraddr; \
4195  _argvec[1] = (unsigned long)( arg1 ); \
4196  _argvec[2] = (unsigned long)( arg2 ); \
4197  _argvec[3] = (unsigned long)( arg3 ); \
4198  _argvec[4] = (unsigned long)( arg4 ); \
4199  _argvec[5] = (unsigned long)( arg5 ); \
4200  _argvec[6] = (unsigned long)( arg6 ); \
4201  _argvec[7] = (unsigned long)( arg7 ); \
4202  _argvec[8] = (unsigned long)( arg8 ); \
4203  _argvec[9] = (unsigned long)( arg9 ); \
4204  _argvec[10] = (unsigned long)( arg10 ); \
4205  _argvec[11] = (unsigned long)( arg11 ); \
4206  __asm__ volatile( "dsubu $29, $29, 24\n\t" \
4207  "ld $4, 72(%1)\n\t" \
4208  "sd $4, 0($29)\n\t" \
4209  "ld $4, 80(%1)\n\t" \
4210  "sd $4, 8($29)\n\t" \
4211  "ld $4, 88(%1)\n\t" \
4212  "sd $4, 16($29)\n\t" \
4213  "ld $4, 8(%1)\n\t" \
4214  "ld $5, 16(%1)\n\t" \
4215  "ld $6, 24(%1)\n\t" \
4216  "ld $7, 32(%1)\n\t" \
4217  "ld $8, 40(%1)\n\t" \
4218  "ld $9, 48(%1)\n\t" \
4219  "ld $10, 56(%1)\n\t" \
4220  "ld $11, 64(%1)\n\t" \
4221  "ld $25, 0(%1)\n\t" /* target->t9 */ \
4222  VALGRIND_CALL_NOREDIR_T9 "daddu $29, $29, 24\n\t" \
4223  "move %0, $2\n" \
4224  : /*out*/ "=r"( _res ) \
4225  : /*in*/ "r"( &_argvec[0] ) \
4226  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
4227  lval = (__typeof__( lval ))_res; \
4228  } while ( 0 )
4229 
4230 #define CALL_FN_W_12W( lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12 ) \
4231  do { \
4232  volatile OrigFn _orig = ( orig ); \
4233  volatile unsigned long _argvec[13]; \
4234  volatile unsigned long _res; \
4235  _argvec[0] = (unsigned long)_orig.nraddr; \
4236  _argvec[1] = (unsigned long)( arg1 ); \
4237  _argvec[2] = (unsigned long)( arg2 ); \
4238  _argvec[3] = (unsigned long)( arg3 ); \
4239  _argvec[4] = (unsigned long)( arg4 ); \
4240  _argvec[5] = (unsigned long)( arg5 ); \
4241  _argvec[6] = (unsigned long)( arg6 ); \
4242  _argvec[7] = (unsigned long)( arg7 ); \
4243  _argvec[8] = (unsigned long)( arg8 ); \
4244  _argvec[9] = (unsigned long)( arg9 ); \
4245  _argvec[10] = (unsigned long)( arg10 ); \
4246  _argvec[11] = (unsigned long)( arg11 ); \
4247  _argvec[12] = (unsigned long)( arg12 ); \
4248  __asm__ volatile( "dsubu $29, $29, 32\n\t" \
4249  "ld $4, 72(%1)\n\t" \
4250  "sd $4, 0($29)\n\t" \
4251  "ld $4, 80(%1)\n\t" \
4252  "sd $4, 8($29)\n\t" \
4253  "ld $4, 88(%1)\n\t" \
4254  "sd $4, 16($29)\n\t" \
4255  "ld $4, 96(%1)\n\t" \
4256  "sd $4, 24($29)\n\t" \
4257  "ld $4, 8(%1)\n\t" \
4258  "ld $5, 16(%1)\n\t" \
4259  "ld $6, 24(%1)\n\t" \
4260  "ld $7, 32(%1)\n\t" \
4261  "ld $8, 40(%1)\n\t" \
4262  "ld $9, 48(%1)\n\t" \
4263  "ld $10, 56(%1)\n\t" \
4264  "ld $11, 64(%1)\n\t" \
4265  "ld $25, 0(%1)\n\t" /* target->t9 */ \
4266  VALGRIND_CALL_NOREDIR_T9 "daddu $29, $29, 32\n\t" \
4267  "move %0, $2\n" \
4268  : /*out*/ "=r"( _res ) \
4269  : /*in*/ "r"( &_argvec[0] ) \
4270  : /*trash*/ "memory", __CALLER_SAVED_REGS ); \
4271  lval = (__typeof__( lval ))_res; \
4272  } while ( 0 )
4273 
4274 #endif /* PLAT_mips64_linux */
4275 
4276 /* ------------------------------------------------------------------ */
4277 /* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
4278 /* */
4279 /* ------------------------------------------------------------------ */
4280 
4281 /* Some request codes. There are many more of these, but most are not
4282  exposed to end-user view. These are the public ones, all of the
4283  form 0x1000 + small_number.
4284 
4285  Core ones are in the range 0x00000000--0x0000ffff. The non-public
4286  ones start at 0x2000.
4287 */
4288 
4289 /* These macros are used by tools -- they must be public, but don't
4290  embed them into other programs. */
4291 #define VG_USERREQ_TOOL_BASE( a, b ) ( (unsigned int)( ( (a)&0xff ) << 24 | ( (b)&0xff ) << 16 ) )
4292 #define VG_IS_TOOL_USERREQ( a, b, v ) ( VG_USERREQ_TOOL_BASE( a, b ) == ( (v)&0xffff0000 ) )
4293 
4294 /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
4295  This enum comprises an ABI exported by Valgrind to programs
4296  which use client requests. DO NOT CHANGE THE ORDER OF THESE
4297  ENTRIES, NOR DELETE ANY -- add new ones at the end. */
4298 typedef enum {
4301 
4302  /* These allow any function to be called from the simulated
4303  CPU but run on the real CPU. Nb: the first arg passed to
4304  the function is always the ThreadId of the running
4305  thread! So CLIENT_CALL0 actually requires a 1 arg
4306  function, etc. */
4311 
4312  /* Can be useful in regression testing suites -- eg. can
4313  send Valgrind's output to /dev/null and still count
4314  errors. */
4316 
4317  /* Allows the client program and/or gdbserver to execute a monitor
4318  command. */
4320 
4321  /* These are useful and can be interpreted by any tool that
4322  tracks malloc() et al, by using vg_replace_malloc.c. */
4326  /* Memory pool support. */
4335 
4336  /* Allow printfs to valgrind log. */
4337  /* The first two pass the va_list argument by value, which
4338  assumes it is the same size as or smaller than a UWord,
4339  which generally isn't the case. Hence are deprecated.
4340  The second two pass the vargs by reference and so are
4341  immune to this problem. */
4342  /* both :: char* fmt, va_list vargs (DEPRECATED) */
4345  /* both :: char* fmt, va_list* vargs */
4348 
4349  /* Stack support. */
4353 
4354  /* Wine support */
4356 
4357  /* Querying of debug info. */
4359 
4360  /* Disable/enable error reporting level. Takes a single
4361  Word arg which is the delta to this thread's error
4362  disablement indicator. Hence 1 disables or further
4363  disables errors, and -1 moves back towards enablement.
4364  Other values are not allowed. */
4366 
4367  /* Initialise IR injection */
4370 
4371 #if !defined( __GNUC__ )
4372 #define __extension__ /* */
4373 #endif
4374 
4375 /* Returns the number of Valgrinds this code is running under. That
4376  is, 0 if running natively, 1 if running under Valgrind, 2 if
4377  running under Valgrind which is running under another Valgrind,
4378  etc. */
4379 #define RUNNING_ON_VALGRIND \
4380  (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( 0 /* if not */, VG_USERREQ__RUNNING_ON_VALGRIND, 0, 0, 0, 0, 0 )
4381 
4382 /* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
4383  _qzz_len - 1]. Useful if you are debugging a JITter or some such,
4384  since it provides a way to make sure valgrind will retranslate the
4385  invalidated area. Returns no value. */
4386 #define VALGRIND_DISCARD_TRANSLATIONS( _qzz_addr, _qzz_len ) \
4387  VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__DISCARD_TRANSLATIONS, _qzz_addr, _qzz_len, 0, 0, 0 )
4388 
4389 /* These requests are for getting Valgrind itself to print something.
4390  Possibly with a backtrace. This is a really ugly hack. The return value
4391  is the number of characters printed, excluding the "**<pid>** " part at the
4392  start and the backtrace (if present). */
4393 
4394 #if defined( __GNUC__ ) || defined( __INTEL_COMPILER ) && !defined( _MSC_VER )
4395 /* Modern GCC will optimize the static routine out if unused,
4396  and unused attribute will shut down warnings about it. */
4397 static int VALGRIND_PRINTF( const char* format, ... ) __attribute__( ( format( __printf__, 1, 2 ), __unused__ ) );
4398 #endif
4399 static int
4400 #if defined( _MSC_VER )
4401  __inline
4402 #endif
4403  VALGRIND_PRINTF( const char* format, ... )
4404 {
4405 #if defined( NVALGRIND )
4406  return 0;
4407 #else /* NVALGRIND */
4408 #if defined( _MSC_VER ) || defined( __MINGW64__ )
4409  uintptr_t _qzz_res;
4410 #else
4411  unsigned long _qzz_res;
4412 #endif
4413  va_list vargs;
4414  va_start( vargs, format );
4415 #if defined( _MSC_VER ) || defined( __MINGW64__ )
4416  _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR( 0, VG_USERREQ__PRINTF_VALIST_BY_REF, (uintptr_t)format, (uintptr_t)&vargs,
4417  0, 0, 0 );
4418 #else
4419  _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR( 0, VG_USERREQ__PRINTF_VALIST_BY_REF, (unsigned long)format,
4420  (unsigned long)&vargs, 0, 0, 0 );
4421 #endif
4422  va_end( vargs );
4423  return (int)_qzz_res;
4424 #endif /* NVALGRIND */
4425 }
4426 
4427 #if defined( __GNUC__ ) || defined( __INTEL_COMPILER ) && !defined( _MSC_VER )
4428 static int VALGRIND_PRINTF_BACKTRACE( const char* format, ... )
4429  __attribute__( ( format( __printf__, 1, 2 ), __unused__ ) );
4430 #endif
4431 static int
4432 #if defined( _MSC_VER )
4433  __inline
4434 #endif
4435  VALGRIND_PRINTF_BACKTRACE( const char* format, ... )
4436 {
4437 #if defined( NVALGRIND )
4438  return 0;
4439 #else /* NVALGRIND */
4440 #if defined( _MSC_VER ) || defined( __MINGW64__ )
4441  uintptr_t _qzz_res;
4442 #else
4443  unsigned long _qzz_res;
4444 #endif
4445  va_list vargs;
4446  va_start( vargs, format );
4447 #if defined( _MSC_VER ) || defined( __MINGW64__ )
4449  (uintptr_t)&vargs, 0, 0, 0 );
4450 #else
4452  (unsigned long)&vargs, 0, 0, 0 );
4453 #endif
4454  va_end( vargs );
4455  return (int)_qzz_res;
4456 #endif /* NVALGRIND */
4457 }
4458 
4459 /* These requests allow control to move from the simulated CPU to the
4460  real CPU, calling an arbitary function.
4461 
4462  Note that the current ThreadId is inserted as the first argument.
4463  So this call:
4464 
4465  VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
4466 
4467  requires f to have this signature:
4468 
4469  Word f(Word tid, Word arg1, Word arg2)
4470 
4471  where "Word" is a word-sized type.
4472 
4473  Note that these client requests are not entirely reliable. For example,
4474  if you call a function with them that subsequently calls printf(),
4475  there's a high chance Valgrind will crash. Generally, your prospects of
4476  these working are made higher if the called function does not refer to
4477  any global variables, and does not refer to any libc or other functions
4478  (printf et al). Any kind of entanglement with libc or dynamic linking is
4479  likely to have a bad outcome, for tricky reasons which we've grappled
4480  with a lot in the past.
4481 */
4482 #define VALGRIND_NON_SIMD_CALL0( _qyy_fn ) \
4483  VALGRIND_DO_CLIENT_REQUEST_EXPR( 0 /* default return */, VG_USERREQ__CLIENT_CALL0, _qyy_fn, 0, 0, 0, 0 )
4484 
4485 #define VALGRIND_NON_SIMD_CALL1( _qyy_fn, _qyy_arg1 ) \
4486  VALGRIND_DO_CLIENT_REQUEST_EXPR( 0 /* default return */, VG_USERREQ__CLIENT_CALL1, _qyy_fn, _qyy_arg1, 0, 0, 0 )
4487 
4488 #define VALGRIND_NON_SIMD_CALL2( _qyy_fn, _qyy_arg1, _qyy_arg2 ) \
4489  VALGRIND_DO_CLIENT_REQUEST_EXPR( 0 /* default return */, VG_USERREQ__CLIENT_CALL2, _qyy_fn, _qyy_arg1, _qyy_arg2, 0, \
4490  0 )
4491 
4492 #define VALGRIND_NON_SIMD_CALL3( _qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3 ) \
4493  VALGRIND_DO_CLIENT_REQUEST_EXPR( 0 /* default return */, VG_USERREQ__CLIENT_CALL3, _qyy_fn, _qyy_arg1, _qyy_arg2, \
4494  _qyy_arg3, 0 )
4495 
4496 /* Counts the number of errors that have been recorded by a tool. Nb:
4497  the tool must record the errors with VG_(maybe_record_error)() or
4498  VG_(unique_error)() for them to be counted. */
4499 #define VALGRIND_COUNT_ERRORS \
4500  (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( 0 /* default return */, VG_USERREQ__COUNT_ERRORS, 0, 0, 0, 0, 0 )
4501 
4502 /* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
4503  when heap blocks are allocated in order to give accurate results. This
4504  happens automatically for the standard allocator functions such as
4505  malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
4506  delete[], etc.
4507 
4508  But if your program uses a custom allocator, this doesn't automatically
4509  happen, and Valgrind will not do as well. For example, if you allocate
4510  superblocks with mmap() and then allocates chunks of the superblocks, all
4511  Valgrind's observations will be at the mmap() level and it won't know that
4512  the chunks should be considered separate entities. In Memcheck's case,
4513  that means you probably won't get heap block overrun detection (because
4514  there won't be redzones marked as unaddressable) and you definitely won't
4515  get any leak detection.
4516 
4517  The following client requests allow a custom allocator to be annotated so
4518  that it can be handled accurately by Valgrind.
4519 
4520  VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
4521  by a malloc()-like function. For Memcheck (an illustrative case), this
4522  does two things:
4523 
4524  - It records that the block has been allocated. This means any addresses
4525  within the block mentioned in error messages will be
4526  identified as belonging to the block. It also means that if the block
4527  isn't freed it will be detected by the leak checker.
4528 
4529  - It marks the block as being addressable and undefined (if 'is_zeroed' is
4530  not set), or addressable and defined (if 'is_zeroed' is set). This
4531  controls how accesses to the block by the program are handled.
4532 
4533  'addr' is the start of the usable block (ie. after any
4534  redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
4535  can apply redzones -- these are blocks of padding at the start and end of
4536  each block. Adding redzones is recommended as it makes it much more likely
4537  Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
4538  zeroed (or filled with another predictable value), as is the case for
4539  calloc().
4540 
4541  VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
4542  heap block -- that will be used by the client program -- is allocated.
4543  It's best to put it at the outermost level of the allocator if possible;
4544  for example, if you have a function my_alloc() which calls
4545  internal_alloc(), and the client request is put inside internal_alloc(),
4546  stack traces relating to the heap block will contain entries for both
4547  my_alloc() and internal_alloc(), which is probably not what you want.
4548 
4549  For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
4550  custom blocks from within a heap block, B, that has been allocated with
4551  malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
4552  -- the custom blocks will take precedence.
4553 
4554  VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For
4555  Memcheck, it does two things:
4556 
4557  - It records that the block has been deallocated. This assumes that the
4558  block was annotated as having been allocated via
4559  VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
4560 
4561  - It marks the block as being unaddressable.
4562 
4563  VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
4564  heap block is deallocated.
4565 
4566  VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For
4567  Memcheck, it does four things:
4568 
4569  - It records that the size of a block has been changed. This assumes that
4570  the block was annotated as having been allocated via
4571  VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
4572 
4573  - If the block shrunk, it marks the freed memory as being unaddressable.
4574 
4575  - If the block grew, it marks the new area as undefined and defines a red
4576  zone past the end of the new block.
4577 
4578  - The V-bits of the overlap between the old and the new block are preserved.
4579 
4580  VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block
4581  and before deallocation of the old block.
4582 
4583  In many cases, these three client requests will not be enough to get your
4584  allocator working well with Memcheck. More specifically, if your allocator
4585  writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
4586  will be necessary to mark the memory as addressable just before the zeroing
4587  occurs, otherwise you'll get a lot of invalid write errors. For example,
4588  you'll need to do this if your allocator recycles freed blocks, but it
4589  zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
4590  Alternatively, if your allocator reuses freed blocks for allocator-internal
4591  data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
4592 
4593  Really, what's happening is a blurring of the lines between the client
4594  program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
4595  memory should be considered unaddressable to the client program, but the
4596  allocator knows more than the rest of the client program and so may be able
4597  to safely access it. Extra client requests are necessary for Valgrind to
4598  understand the distinction between the allocator and the rest of the
4599  program.
4600 
4601  Ignored if addr == 0.
4602 */
4603 #define VALGRIND_MALLOCLIKE_BLOCK( addr, sizeB, rzB, is_zeroed ) \
4604  VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__MALLOCLIKE_BLOCK, addr, sizeB, rzB, is_zeroed, 0 )
4605 
4606 /* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
4607  Ignored if addr == 0.
4608 */
4609 #define VALGRIND_RESIZEINPLACE_BLOCK( addr, oldSizeB, newSizeB, rzB ) \
4610  VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__RESIZEINPLACE_BLOCK, addr, oldSizeB, newSizeB, rzB, 0 )
4611 
4612 /* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
4613  Ignored if addr == 0.
4614 */
4615 #define VALGRIND_FREELIKE_BLOCK( addr, rzB ) \
4616  VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__FREELIKE_BLOCK, addr, rzB, 0, 0, 0 )
4617 
4618 /* Create a memory pool. */
4619 #define VALGRIND_CREATE_MEMPOOL( pool, rzB, is_zeroed ) \
4620  VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__CREATE_MEMPOOL, pool, rzB, is_zeroed, 0, 0 )
4621 
4622 /* Destroy a memory pool. */
4623 #define VALGRIND_DESTROY_MEMPOOL( pool ) \
4624  VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__DESTROY_MEMPOOL, pool, 0, 0, 0, 0 )
4625 
4626 /* Associate a piece of memory with a memory pool. */
4627 #define VALGRIND_MEMPOOL_ALLOC( pool, addr, size ) \
4628  VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__MEMPOOL_ALLOC, pool, addr, size, 0, 0 )
4629 
4630 /* Disassociate a piece of memory from a memory pool. */
4631 #define VALGRIND_MEMPOOL_FREE( pool, addr ) \
4632  VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__MEMPOOL_FREE, pool, addr, 0, 0, 0 )
4633 
4634 /* Disassociate any pieces outside a particular range. */
4635 #define VALGRIND_MEMPOOL_TRIM( pool, addr, size ) \
4636  VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__MEMPOOL_TRIM, pool, addr, size, 0, 0 )
4637 
4638 /* Resize and/or move a piece associated with a memory pool. */
4639 #define VALGRIND_MOVE_MEMPOOL( poolA, poolB ) \
4640  VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__MOVE_MEMPOOL, poolA, poolB, 0, 0, 0 )
4641 
4642 /* Resize and/or move a piece associated with a memory pool. */
4643 #define VALGRIND_MEMPOOL_CHANGE( pool, addrA, addrB, size ) \
4644  VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__MEMPOOL_CHANGE, pool, addrA, addrB, size, 0 )
4645 
4646 /* Return 1 if a mempool exists, else 0. */
4647 #define VALGRIND_MEMPOOL_EXISTS( pool ) \
4648  (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( 0, VG_USERREQ__MEMPOOL_EXISTS, pool, 0, 0, 0, 0 )
4649 
4650 /* Mark a piece of memory as being a stack. Returns a stack id. */
4651 #define VALGRIND_STACK_REGISTER( start, end ) \
4652  (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( 0, VG_USERREQ__STACK_REGISTER, start, end, 0, 0, 0 )
4653 
4654 /* Unmark the piece of memory associated with a stack id as being a
4655  stack. */
4656 #define VALGRIND_STACK_DEREGISTER( id ) VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__STACK_DEREGISTER, id, 0, 0, 0, 0 )
4657 
4658 /* Change the start and end address of the stack id. */
4659 #define VALGRIND_STACK_CHANGE( id, start, end ) \
4660  VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__STACK_CHANGE, id, start, end, 0, 0 )
4661 
4662 /* Load PDB debug info for Wine PE image_map. */
4663 #define VALGRIND_LOAD_PDB_DEBUGINFO( fd, ptr, total_size, delta ) \
4664  VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__LOAD_PDB_DEBUGINFO, fd, ptr, total_size, delta, 0 )
4665 
4666 /* Map a code address to a source file name and line number. buf64
4667  must point to a 64-byte buffer in the caller's address space. The
4668  result will be dumped in there and is guaranteed to be zero
4669  terminated. If no info is found, the first byte is set to zero. */
4670 #define VALGRIND_MAP_IP_TO_SRCLOC( addr, buf64 ) \
4671  (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( 0, VG_USERREQ__MAP_IP_TO_SRCLOC, addr, buf64, 0, 0, 0 )
4672 
4673 /* Disable error reporting for this thread. Behaves in a stack like
4674  way, so you can safely call this multiple times provided that
4675  VALGRIND_ENABLE_ERROR_REPORTING is called the same number of times
4676  to re-enable reporting. The first call of this macro disables
4677  reporting. Subsequent calls have no effect except to increase the
4678  number of VALGRIND_ENABLE_ERROR_REPORTING calls needed to re-enable
4679  reporting. Child threads do not inherit this setting from their
4680  parents -- they are always created with reporting enabled. */
4681 #define VALGRIND_DISABLE_ERROR_REPORTING \
4682  VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__CHANGE_ERR_DISABLEMENT, 1, 0, 0, 0, 0 )
4683 
4684 /* Re-enable error reporting, as per comments on
4685  VALGRIND_DISABLE_ERROR_REPORTING. */
4686 #define VALGRIND_ENABLE_ERROR_REPORTING \
4687  VALGRIND_DO_CLIENT_REQUEST_STMT( VG_USERREQ__CHANGE_ERR_DISABLEMENT, -1, 0, 0, 0, 0 )
4688 
4689 /* Execute a monitor command from the client program.
4690  If a connection is opened with GDB, the output will be sent
4691  according to the output mode set for vgdb.
4692  If no connection is opened, output will go to the log output.
4693  Returns 1 if command not recognised, 0 otherwise. */
4694 #define VALGRIND_MONITOR_COMMAND( command ) \
4695  VALGRIND_DO_CLIENT_REQUEST_EXPR( 0, VG_USERREQ__GDB_MONITOR_COMMAND, command, 0, 0, 0, 0 )
4696 
4697 #undef PLAT_x86_darwin
4698 #undef PLAT_amd64_darwin
4699 #undef PLAT_x86_win32
4700 #undef PLAT_amd64_win64
4701 #undef PLAT_x86_linux
4702 #undef PLAT_amd64_linux
4703 #undef PLAT_ppc32_linux
4704 #undef PLAT_ppc64_linux
4705 #undef PLAT_arm_linux
4706 #undef PLAT_s390x_linux
4707 #undef PLAT_mips32_linux
4708 #undef PLAT_mips64_linux
4709 
4710 #endif /* __VALGRIND_H */
#define __attribute__(x)
Definition: System.cpp:79
GAUDI_API std::string format(const char *,...)
MsgStream format utility "a la sprintf(...)".
Definition: MsgStream.cpp:120
#define VALGRIND_DO_CLIENT_REQUEST_EXPR(_zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)
Vg_ClientRequest