x86inc.asm 29.3 KB
Newer Older
Loren Merritt's avatar
Loren Merritt committed
1
;*****************************************************************************
2
;* x86inc.asm: x264asm abstraction layer
Loren Merritt's avatar
Loren Merritt committed
3
;*****************************************************************************
4
;* Copyright (C) 2005-2012 x264 project
Loren Merritt's avatar
Loren Merritt committed
5
;*
6 7
;* Authors: Loren Merritt <lorenm@u.washington.edu>
;*          Anton Mitrofanov <BugMaster@narod.ru>
8
;*          Jason Garrett-Glaser <darkshikari@gmail.com>
9
;*          Henrik Gramner <hengar-6@student.ltu.se>
Loren Merritt's avatar
Loren Merritt committed
10
;*
11 12 13
;* Permission to use, copy, modify, and/or distribute this software for any
;* purpose with or without fee is hereby granted, provided that the above
;* copyright notice and this permission notice appear in all copies.
Loren Merritt's avatar
Loren Merritt committed
14
;*
15 16 17 18 19 20 21
;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
Loren Merritt's avatar
Loren Merritt committed
22 23
;*****************************************************************************

24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
; This is a header file for the x264ASM assembly language, which uses
; NASM/YASM syntax combined with a large number of macros to provide easy
; abstraction between different calling conventions (x86_32, win64, linux64).
; It also has various other useful features to simplify writing the kind of
; DSP functions that are most often used in x264.

; Unlike the rest of x264, this file is available under an ISC license, as it
; has significant usefulness outside of x264 and we want it to be available
; to the largest audience possible.  Of course, if you modify it for your own
; purposes to add a new feature, we strongly encourage contributing a patch
; as this feature might be useful for others as well.  Send patches or ideas
; to x264-devel@videolan.org .

%define program_name ff

39
%define WIN64  0
40
%define UNIX64 0
41
%if ARCH_X86_64
42
    %ifidn __OUTPUT_FORMAT__,win32
43
        %define WIN64  1
44 45
    %elifidn __OUTPUT_FORMAT__,win64
        %define WIN64  1
46
    %else
47
        %define UNIX64 1
48 49 50
    %endif
%endif

51 52 53 54 55 56
%ifdef PREFIX
    %define mangle(x) _ %+ x
%else
    %define mangle(x) x
%endif

Loren Merritt's avatar
Loren Merritt committed
57 58 59
; Name of the .rodata section.
; Kludge: Something on OS X fails to align .rodata even given an align attribute,
; so use a different read-only section.
60
%macro SECTION_RODATA 0-1 16
Loren Merritt's avatar
Loren Merritt committed
61
    %ifidn __OUTPUT_FORMAT__,macho64
62
        SECTION .text align=%1
Loren Merritt's avatar
Loren Merritt committed
63
    %elifidn __OUTPUT_FORMAT__,macho
64
        SECTION .text align=%1
Loren Merritt's avatar
Loren Merritt committed
65
        fakegot:
66 67
    %elifidn __OUTPUT_FORMAT__,aout
        section .text
Loren Merritt's avatar
Loren Merritt committed
68
    %else
69
        SECTION .rodata align=%1
Loren Merritt's avatar
Loren Merritt committed
70 71 72
    %endif
%endmacro

73 74 75 76 77 78 79 80 81
; aout does not support align=
%macro SECTION_TEXT 0-1 16
    %ifidn __OUTPUT_FORMAT__,aout
        SECTION .text
    %else
        SECTION .text align=%1
    %endif
%endmacro

82
%if WIN64
83
    %define PIC
Ronald S. Bultje's avatar
Ronald S. Bultje committed
84
%elif ARCH_X86_64 == 0
85 86 87
; x86_32 doesn't require PIC.
; Some distros prefer shared objects to be PIC, but nothing breaks if
; the code contains a few textrels, so we'll skip that complexity.
88 89 90
    %undef PIC
%endif
%ifdef PIC
91
    default rel
Loren Merritt's avatar
Loren Merritt committed
92 93
%endif

94 95 96
; Always use long nops (reduces 0x90 spam in disassembly on x86_32)
CPU amdnop

Loren Merritt's avatar
Loren Merritt committed
97 98 99 100 101 102 103
; Macros to eliminate most code duplication between x86_32 and x86_64:
; Currently this works only for leaf functions which load all their arguments
; into registers at the start, and make no other use of the stack. Luckily that
; covers most of x264's asm.

; PROLOGUE:
; %1 = number of arguments. loads them from stack if needed.
104 105
; %2 = number of registers used. pushes callee-saved regs if needed.
; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
Loren Merritt's avatar
Loren Merritt committed
106 107 108 109
; %4 = list of names to define to registers
; PROLOGUE can also be invoked by adding the same options to cglobal

; e.g.
Loren Merritt's avatar
Loren Merritt committed
110
; cglobal foo, 2,3,0, dst, src, tmp
111
; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
Loren Merritt's avatar
Loren Merritt committed
112 113 114 115 116 117

; TODO Some functions can use some args directly from the stack. If they're the
; last args then you can just not declare them, but if they're in the middle
; we need more flexible macro.

; RET:
118
; Pops anything that was pushed by PROLOGUE, and returns.
Loren Merritt's avatar
Loren Merritt committed
119 120 121 122 123

; REP_RET:
; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
; which are slow when a normal ret follows a branch.

124 125 126
; registers:
; rN and rNq are the native-size register holding function argument N
; rNd, rNw, rNb are dword, word, and byte size
127
; rNh is the high 8 bits of the word size
128 129 130
; rNm is the original location of arg N (a register or on the stack), dword
; rNmp is native size

131
%macro DECLARE_REG 2-3
Loren Merritt's avatar
Loren Merritt committed
132
    %define r%1q %2
133 134 135 136 137 138
    %define r%1d %2d
    %define r%1w %2w
    %define r%1b %2b
    %define r%1h %2h
    %if %0 == 2
        %define r%1m  %2d
139
        %define r%1mp %2
140
    %elif ARCH_X86_64 ; memory
141
        %define r%1m [rsp + stack_offset + %3]
142
        %define r%1mp qword r %+ %1m
143
    %else
144
        %define r%1m [esp + stack_offset + %3]
145
        %define r%1mp dword r %+ %1m
146
    %endif
Loren Merritt's avatar
Loren Merritt committed
147 148 149
    %define r%1  %2
%endmacro

150
%macro DECLARE_REG_SIZE 3
Loren Merritt's avatar
Loren Merritt committed
151 152 153 154 155 156
    %define r%1q r%1
    %define e%1q r%1
    %define r%1d e%1
    %define e%1d e%1
    %define r%1w %1
    %define e%1w %1
157 158
    %define r%1h %3
    %define e%1h %3
Loren Merritt's avatar
Loren Merritt committed
159 160
    %define r%1b %2
    %define e%1b %2
161
%if ARCH_X86_64 == 0
Loren Merritt's avatar
Loren Merritt committed
162 163 164 165
    %define r%1  e%1
%endif
%endmacro

166 167 168 169 170 171 172
DECLARE_REG_SIZE ax, al, ah
DECLARE_REG_SIZE bx, bl, bh
DECLARE_REG_SIZE cx, cl, ch
DECLARE_REG_SIZE dx, dl, dh
DECLARE_REG_SIZE si, sil, null
DECLARE_REG_SIZE di, dil, null
DECLARE_REG_SIZE bp, bpl, null
Loren Merritt's avatar
Loren Merritt committed
173

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
; t# defines for when per-arch register allocation is more complex than just function arguments

%macro DECLARE_REG_TMP 1-*
    %assign %%i 0
    %rep %0
        CAT_XDEFINE t, %%i, r%1
        %assign %%i %%i+1
        %rotate 1
    %endrep
%endmacro

%macro DECLARE_REG_TMP_SIZE 0-*
    %rep %0
        %define t%1q t%1 %+ q
        %define t%1d t%1 %+ d
        %define t%1w t%1 %+ w
190
        %define t%1h t%1 %+ h
191 192 193 194 195
        %define t%1b t%1 %+ b
        %rotate 1
    %endrep
%endmacro

196
DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
197

198
%if ARCH_X86_64
Loren Merritt's avatar
Loren Merritt committed
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
    %define gprsize 8
%else
    %define gprsize 4
%endif

%macro PUSH 1
    push %1
    %assign stack_offset stack_offset+gprsize
%endmacro

%macro POP 1
    pop %1
    %assign stack_offset stack_offset-gprsize
%endmacro

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
%macro PUSH_IF_USED 1-*
    %rep %0
        %if %1 < regs_used
            PUSH r%1
        %endif
        %rotate 1
    %endrep
%endmacro

%macro POP_IF_USED 1-*
    %rep %0
        %if %1 < regs_used
            pop r%1
        %endif
        %rotate 1
    %endrep
%endmacro

%macro LOAD_IF_USED 1-*
    %rep %0
        %if %1 < num_args
            mov r%1, r %+ %1 %+ mp
        %endif
        %rotate 1
    %endrep
%endmacro

Loren Merritt's avatar
Loren Merritt committed
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
%macro SUB 2
    sub %1, %2
    %ifidn %1, rsp
        %assign stack_offset stack_offset+(%2)
    %endif
%endmacro

%macro ADD 2
    add %1, %2
    %ifidn %1, rsp
        %assign stack_offset stack_offset-(%2)
    %endif
%endmacro

%macro movifnidn 2
    %ifnidn %1, %2
        mov %1, %2
    %endif
%endmacro

%macro movsxdifnidn 2
    %ifnidn %1, %2
        movsxd %1, %2
    %endif
%endmacro

%macro ASSERT 1
    %if (%1) == 0
        %error assert failed
    %endif
%endmacro

%macro DEFINE_ARGS 0-*
    %ifdef n_arg_names
        %assign %%i 0
        %rep n_arg_names
            CAT_UNDEF arg_name %+ %%i, q
            CAT_UNDEF arg_name %+ %%i, d
            CAT_UNDEF arg_name %+ %%i, w
280
            CAT_UNDEF arg_name %+ %%i, h
Loren Merritt's avatar
Loren Merritt committed
281
            CAT_UNDEF arg_name %+ %%i, b
Loren Merritt's avatar
Loren Merritt committed
282
            CAT_UNDEF arg_name %+ %%i, m
283
            CAT_UNDEF arg_name %+ %%i, mp
Loren Merritt's avatar
Loren Merritt committed
284 285 286 287 288
            CAT_UNDEF arg_name, %%i
            %assign %%i %%i+1
        %endrep
    %endif

289 290
    %xdefine %%stack_offset stack_offset
    %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
Loren Merritt's avatar
Loren Merritt committed
291 292 293 294 295
    %assign %%i 0
    %rep %0
        %xdefine %1q r %+ %%i %+ q
        %xdefine %1d r %+ %%i %+ d
        %xdefine %1w r %+ %%i %+ w
296
        %xdefine %1h r %+ %%i %+ h
Loren Merritt's avatar
Loren Merritt committed
297
        %xdefine %1b r %+ %%i %+ b
Loren Merritt's avatar
Loren Merritt committed
298
        %xdefine %1m r %+ %%i %+ m
299
        %xdefine %1mp r %+ %%i %+ mp
Loren Merritt's avatar
Loren Merritt committed
300 301 302 303
        CAT_XDEFINE arg_name, %%i, %1
        %assign %%i %%i+1
        %rotate 1
    %endrep
304 305
    %xdefine stack_offset %%stack_offset
    %assign n_arg_names %0
Loren Merritt's avatar
Loren Merritt committed
306 307
%endmacro

308
%if WIN64 ; Windows x64 ;=================================================
Loren Merritt's avatar
Loren Merritt committed
309

310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
DECLARE_REG 0,  rcx
DECLARE_REG 1,  rdx
DECLARE_REG 2,  R8
DECLARE_REG 3,  R9
DECLARE_REG 4,  R10, 40
DECLARE_REG 5,  R11, 48
DECLARE_REG 6,  rax, 56
DECLARE_REG 7,  rdi, 64
DECLARE_REG 8,  rsi, 72
DECLARE_REG 9,  rbx, 80
DECLARE_REG 10, rbp, 88
DECLARE_REG 11, R12, 96
DECLARE_REG 12, R13, 104
DECLARE_REG 13, R14, 112
DECLARE_REG 14, R15, 120
325

326
%macro PROLOGUE 2-4+ 0 ; #args, #regs, #xmm_regs, arg_names...
327
    %assign num_args %1
328
    %assign regs_used %2
329 330 331
    ASSERT regs_used >= num_args
    ASSERT regs_used <= 15
    PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
332 333 334 335 336
    %if mmsize == 8
        %assign xmm_regs_used 0
    %else
        WIN64_SPILL_XMM %3
    %endif
337
    LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
Loren Merritt's avatar
Loren Merritt committed
338 339 340 341 342 343
    DEFINE_ARGS %4
%endmacro

%macro WIN64_SPILL_XMM 1
    %assign xmm_regs_used %1
    ASSERT xmm_regs_used <= 16
344
    %if xmm_regs_used > 6
345
        SUB rsp, (xmm_regs_used-6)*16+16
346 347 348
        %assign %%i xmm_regs_used
        %rep (xmm_regs_used-6)
            %assign %%i %%i-1
349
            movdqa [rsp + (%%i-6)*16+(~stack_offset&8)], xmm %+ %%i
350 351 352 353
        %endrep
    %endif
%endmacro

Loren Merritt's avatar
Loren Merritt committed
354
%macro WIN64_RESTORE_XMM_INTERNAL 1
355 356 357 358
    %if xmm_regs_used > 6
        %assign %%i xmm_regs_used
        %rep (xmm_regs_used-6)
            %assign %%i %%i-1
359
            movdqa xmm %+ %%i, [%1 + (%%i-6)*16+(~stack_offset&8)]
360 361 362 363 364
        %endrep
        add %1, (xmm_regs_used-6)*16+16
    %endif
%endmacro

Loren Merritt's avatar
Loren Merritt committed
365 366
%macro WIN64_RESTORE_XMM 1
    WIN64_RESTORE_XMM_INTERNAL %1
367 368 369 370
    %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16
    %assign xmm_regs_used 0
%endmacro

371 372
%define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32

373
%macro RET 0
Loren Merritt's avatar
Loren Merritt committed
374
    WIN64_RESTORE_XMM_INTERNAL rsp
375
    POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
376 377 378
%if mmsize == 32
    vzeroupper
%endif
379
    ret
Loren Merritt's avatar
Loren Merritt committed
380 381
%endmacro

382
%elif ARCH_X86_64 ; *nix x64 ;=============================================
Loren Merritt's avatar
Loren Merritt committed
383

384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
DECLARE_REG 0,  rdi
DECLARE_REG 1,  rsi
DECLARE_REG 2,  rdx
DECLARE_REG 3,  rcx
DECLARE_REG 4,  R8
DECLARE_REG 5,  R9
DECLARE_REG 6,  rax, 8
DECLARE_REG 7,  R10, 16
DECLARE_REG 8,  R11, 24
DECLARE_REG 9,  rbx, 32
DECLARE_REG 10, rbp, 40
DECLARE_REG 11, R12, 48
DECLARE_REG 12, R13, 56
DECLARE_REG 13, R14, 64
DECLARE_REG 14, R15, 72
Loren Merritt's avatar
Loren Merritt committed
399

400
%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
401 402 403 404 405 406
    %assign num_args %1
    %assign regs_used %2
    ASSERT regs_used >= num_args
    ASSERT regs_used <= 15
    PUSH_IF_USED 9, 10, 11, 12, 13, 14
    LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
Loren Merritt's avatar
Loren Merritt committed
407 408 409
    DEFINE_ARGS %4
%endmacro

410 411
%define has_epilogue regs_used > 9 || mmsize == 32

Loren Merritt's avatar
Loren Merritt committed
412
%macro RET 0
413
    POP_IF_USED 14, 13, 12, 11, 10, 9
414 415 416
%if mmsize == 32
    vzeroupper
%endif
Loren Merritt's avatar
Loren Merritt committed
417 418 419 420 421
    ret
%endmacro

%else ; X86_32 ;==============================================================

422 423 424 425 426 427 428
DECLARE_REG 0, eax, 4
DECLARE_REG 1, ecx, 8
DECLARE_REG 2, edx, 12
DECLARE_REG 3, ebx, 16
DECLARE_REG 4, esi, 20
DECLARE_REG 5, edi, 24
DECLARE_REG 6, ebp, 28
Loren Merritt's avatar
Loren Merritt committed
429 430
%define rsp esp

431 432 433 434 435 436
%macro DECLARE_ARG 1-*
    %rep %0
        %define r%1m [esp + stack_offset + 4*%1 + 4]
        %define r%1mp dword r%1m
        %rotate 1
    %endrep
Loren Merritt's avatar
Loren Merritt committed
437 438
%endmacro

439
DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
Loren Merritt's avatar
Loren Merritt committed
440

441
%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
442
    %assign num_args %1
Loren Merritt's avatar
Loren Merritt committed
443
    %assign regs_used %2
444 445 446
    %if num_args > 7
        %assign num_args 7
    %endif
447 448 449 450 451 452
    %if regs_used > 7
        %assign regs_used 7
    %endif
    ASSERT regs_used >= num_args
    PUSH_IF_USED 3, 4, 5, 6
    LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
Loren Merritt's avatar
Loren Merritt committed
453 454 455
    DEFINE_ARGS %4
%endmacro

456 457
%define has_epilogue regs_used > 3 || mmsize == 32

Loren Merritt's avatar
Loren Merritt committed
458
%macro RET 0
459
    POP_IF_USED 6, 5, 4, 3
460 461 462
%if mmsize == 32
    vzeroupper
%endif
Loren Merritt's avatar
Loren Merritt committed
463 464 465 466 467
    ret
%endmacro

%endif ;======================================================================

468
%if WIN64 == 0
Loren Merritt's avatar
Loren Merritt committed
469 470 471 472 473 474
%macro WIN64_SPILL_XMM 1
%endmacro
%macro WIN64_RESTORE_XMM 1
%endmacro
%endif

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
%macro REP_RET 0
    %if has_epilogue
        RET
    %else
        rep ret
    %endif
%endmacro

%macro TAIL_CALL 2 ; callee, is_nonadjacent
    %if has_epilogue
        call %1
        RET
    %elif %2
        jmp %1
    %endif
%endmacro

Loren Merritt's avatar
Loren Merritt committed
492 493 494 495 496 497
;=============================================================================
; arch-independent part
;=============================================================================

%assign function_align 16

498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
; Begin a function.
; Applies any symbol mangling needed for C linkage, and sets up a define such that
; subsequent uses of the function name automatically refer to the mangled version.
; Appends cpuflags to the function name if cpuflags has been specified.
%macro cglobal 1-2+ ; name, [PROLOGUE args]
%if %0 == 1
    cglobal_internal %1 %+ SUFFIX
%else
    cglobal_internal %1 %+ SUFFIX, %2
%endif
%endmacro
%macro cglobal_internal 1-2+
    %ifndef cglobaled_%1
        %xdefine %1 mangle(program_name %+ _ %+ %1)
        %xdefine %1.skip_prologue %1 %+ .skip_prologue
        CAT_XDEFINE cglobaled_, %1, 1
    %endif
    %xdefine current_function %1
Loren Merritt's avatar
Loren Merritt committed
516
    %ifidn __OUTPUT_FORMAT__,elf
517
        global %1:function hidden
Loren Merritt's avatar
Loren Merritt committed
518
    %else
519
        global %1
Loren Merritt's avatar
Loren Merritt committed
520 521 522 523
    %endif
    align function_align
    %1:
    RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
524
    %assign stack_offset 0
Loren Merritt's avatar
Loren Merritt committed
525 526 527 528 529 530
    %if %0 > 1
        PROLOGUE %2
    %endif
%endmacro

%macro cextern 1
531
    %xdefine %1 mangle(program_name %+ _ %+ %1)
532
    CAT_XDEFINE cglobaled_, %1, 1
533 534 535
    extern %1
%endmacro

536
; like cextern, but without the prefix
537 538
%macro cextern_naked 1
    %xdefine %1 mangle(%1)
539
    CAT_XDEFINE cglobaled_, %1, 1
540
    extern %1
Loren Merritt's avatar
Loren Merritt committed
541 542
%endmacro

543 544 545 546 547 548
%macro const 2+
    %xdefine %1 mangle(program_name %+ _ %+ %1)
    global %1
    %1: %2
%endmacro

Loren Merritt's avatar
Loren Merritt committed
549 550 551 552 553 554
; This is needed for ELF, otherwise the GNU linker assumes the stack is
; executable by default.
%ifidn __OUTPUT_FORMAT__,elf
SECTION .note.GNU-stack noalloc noexec nowrite progbits
%endif

555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570
; cpuflags

%assign cpuflags_mmx      (1<<0)
%assign cpuflags_mmx2     (1<<1) | cpuflags_mmx
%assign cpuflags_3dnow    (1<<2) | cpuflags_mmx
%assign cpuflags_3dnow2   (1<<3) | cpuflags_3dnow
%assign cpuflags_sse      (1<<4) | cpuflags_mmx2
%assign cpuflags_sse2     (1<<5) | cpuflags_sse
%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
%assign cpuflags_sse3     (1<<7) | cpuflags_sse2
%assign cpuflags_ssse3    (1<<8) | cpuflags_sse3
%assign cpuflags_sse4     (1<<9) | cpuflags_ssse3
%assign cpuflags_sse42    (1<<10)| cpuflags_sse4
%assign cpuflags_avx      (1<<11)| cpuflags_sse42
%assign cpuflags_xop      (1<<12)| cpuflags_avx
%assign cpuflags_fma4     (1<<13)| cpuflags_avx
571 572
%assign cpuflags_avx2     (1<<14)| cpuflags_avx
%assign cpuflags_fma3     (1<<15)| cpuflags_avx
573 574 575 576 577 578 579 580

%assign cpuflags_cache32  (1<<16)
%assign cpuflags_cache64  (1<<17)
%assign cpuflags_slowctz  (1<<18)
%assign cpuflags_lzcnt    (1<<19)
%assign cpuflags_misalign (1<<20)
%assign cpuflags_aligned  (1<<21) ; not a cpu feature, but a function variant
%assign cpuflags_atom     (1<<22)
581 582 583
%assign cpuflags_bmi1     (1<<23)
%assign cpuflags_bmi2     (1<<24)|cpuflags_bmi1
%assign cpuflags_tbm      (1<<25)|cpuflags_bmi1
584 585 586 587 588 589 590 591

%define    cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))

; Takes up to 2 cpuflags from the above list.
; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
%macro INIT_CPUFLAGS 0-2
592
    CPU amdnop
593 594 595 596 597 598 599 600 601 602 603
    %if %0 >= 1
        %xdefine cpuname %1
        %assign cpuflags cpuflags_%1
        %if %0 >= 2
            %xdefine cpuname %1_%2
            %assign cpuflags cpuflags | cpuflags_%2
        %endif
        %xdefine SUFFIX _ %+ cpuname
        %if cpuflag(avx)
            %assign avx_enabled 1
        %endif
604 605 606 607 608
        %if mmsize == 16 && notcpuflag(sse2)
            %define mova movaps
            %define movu movups
            %define movnta movntps
        %endif
609 610 611 612 613
        %if cpuflag(aligned)
            %define movu mova
        %elifidn %1, sse3
            %define movu lddqu
        %endif
614 615 616
        %if notcpuflag(mmx2)
            CPU basicnop
        %endif
617 618 619 620 621 622 623
    %else
        %xdefine SUFFIX
        %undef cpuname
        %undef cpuflags
    %endif
%endmacro

Loren Merritt's avatar
Loren Merritt committed
624 625 626 627 628 629 630 631 632 633
; merge mmx and sse*

%macro CAT_XDEFINE 3
    %xdefine %1%2 %3
%endmacro

%macro CAT_UNDEF 2
    %undef %1%2
%endmacro

634
%macro INIT_MMX 0-1+
635
    %assign avx_enabled 0
636
    %define RESET_MM_PERMUTATION INIT_MMX %1
Loren Merritt's avatar
Loren Merritt committed
637 638 639 640 641
    %define mmsize 8
    %define num_mmregs 8
    %define mova movq
    %define movu movq
    %define movh movd
Loren Merritt's avatar
Loren Merritt committed
642
    %define movnta movntq
Loren Merritt's avatar
Loren Merritt committed
643 644 645 646 647 648 649 650 651 652 653
    %assign %%i 0
    %rep 8
    CAT_XDEFINE m, %%i, mm %+ %%i
    CAT_XDEFINE nmm, %%i, %%i
    %assign %%i %%i+1
    %endrep
    %rep 8
    CAT_UNDEF m, %%i
    CAT_UNDEF nmm, %%i
    %assign %%i %%i+1
    %endrep
654
    INIT_CPUFLAGS %1
Loren Merritt's avatar
Loren Merritt committed
655 656
%endmacro

657
%macro INIT_XMM 0-1+
658
    %assign avx_enabled 0
659
    %define RESET_MM_PERMUTATION INIT_XMM %1
Loren Merritt's avatar
Loren Merritt committed
660 661
    %define mmsize 16
    %define num_mmregs 8
662
    %if ARCH_X86_64
Loren Merritt's avatar
Loren Merritt committed
663 664 665 666 667
    %define num_mmregs 16
    %endif
    %define mova movdqa
    %define movu movdqu
    %define movh movq
Loren Merritt's avatar
Loren Merritt committed
668
    %define movnta movntdq
Loren Merritt's avatar
Loren Merritt committed
669 670 671 672 673 674
    %assign %%i 0
    %rep num_mmregs
    CAT_XDEFINE m, %%i, xmm %+ %%i
    CAT_XDEFINE nxmm, %%i, %%i
    %assign %%i %%i+1
    %endrep
675
    INIT_CPUFLAGS %1
Loren Merritt's avatar
Loren Merritt committed
676 677
%endmacro

678
; FIXME: INIT_AVX can be replaced by INIT_XMM avx
679 680 681 682 683 684 685
%macro INIT_AVX 0
    INIT_XMM
    %assign avx_enabled 1
    %define PALIGNR PALIGNR_SSSE3
    %define RESET_MM_PERMUTATION INIT_AVX
%endmacro

686
%macro INIT_YMM 0-1+
687
    %assign avx_enabled 1
688
    %define RESET_MM_PERMUTATION INIT_YMM %1
689 690
    %define mmsize 32
    %define num_mmregs 8
691
    %if ARCH_X86_64
692 693 694 695
    %define num_mmregs 16
    %endif
    %define mova vmovaps
    %define movu vmovups
696 697
    %undef movh
    %define movnta vmovntps
698 699 700 701 702 703
    %assign %%i 0
    %rep num_mmregs
    CAT_XDEFINE m, %%i, ymm %+ %%i
    CAT_XDEFINE nymm, %%i, %%i
    %assign %%i %%i+1
    %endrep
704
    INIT_CPUFLAGS %1
705 706
%endmacro

707
INIT_XMM
Loren Merritt's avatar
Loren Merritt committed
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762

; I often want to use macros that permute their arguments. e.g. there's no
; efficient way to implement butterfly or transpose or dct without swapping some
; arguments.
;
; I would like to not have to manually keep track of the permutations:
; If I insert a permutation in the middle of a function, it should automatically
; change everything that follows. For more complex macros I may also have multiple
; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
;
; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
; permutes its arguments. It's equivalent to exchanging the contents of the
; registers, except that this way you exchange the register names instead, so it
; doesn't cost any cycles.

%macro PERMUTE 2-* ; takes a list of pairs to swap
%rep %0/2
    %xdefine tmp%2 m%2
    %xdefine ntmp%2 nm%2
    %rotate 2
%endrep
%rep %0/2
    %xdefine m%1 tmp%2
    %xdefine nm%1 ntmp%2
    %undef tmp%2
    %undef ntmp%2
    %rotate 2
%endrep
%endmacro

%macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
%rep %0-1
%ifdef m%1
    %xdefine tmp m%1
    %xdefine m%1 m%2
    %xdefine m%2 tmp
    CAT_XDEFINE n, m%1, %1
    CAT_XDEFINE n, m%2, %2
%else
    ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
    ; Be careful using this mode in nested macros though, as in some cases there may be
    ; other copies of m# that have already been dereferenced and don't get updated correctly.
    %xdefine %%n1 n %+ %1
    %xdefine %%n2 n %+ %2
    %xdefine tmp m %+ %%n1
    CAT_XDEFINE m, %%n1, m %+ %%n2
    CAT_XDEFINE m, %%n2, tmp
    CAT_XDEFINE n, m %+ %%n1, %%n1
    CAT_XDEFINE n, m %+ %%n2, %%n2
%endif
    %undef tmp
    %rotate 1
%endrep
%endmacro

763 764 765 766 767 768 769 770 771
; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
; calls to that function will automatically load the permutation, so values can
; be returned in mmregs.
%macro SAVE_MM_PERMUTATION 0-1
    %if %0
        %xdefine %%f %1_m
    %else
        %xdefine %%f current_function %+ _m
    %endif
Loren Merritt's avatar
Loren Merritt committed
772 773
    %assign %%i 0
    %rep num_mmregs
774
        CAT_XDEFINE %%f, %%i, m %+ %%i
Loren Merritt's avatar
Loren Merritt committed
775 776 777 778
    %assign %%i %%i+1
    %endrep
%endmacro

779
%macro LOAD_MM_PERMUTATION 1 ; name to load from
780 781 782 783 784 785 786 787
    %ifdef %1_m0
        %assign %%i 0
        %rep num_mmregs
            CAT_XDEFINE m, %%i, %1_m %+ %%i
            CAT_XDEFINE n, m %+ %%i, %%i
        %assign %%i %%i+1
        %endrep
    %endif
Loren Merritt's avatar
Loren Merritt committed
788 789
%endmacro

790
; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
Loren Merritt's avatar
Loren Merritt committed
791
%macro call 1
792 793 794 795 796 797 798 799
    call_internal %1, %1 %+ SUFFIX
%endmacro
%macro call_internal 2
    %xdefine %%i %1
    %ifndef cglobaled_%1
        %ifdef cglobaled_%2
            %xdefine %%i %2
        %endif
Loren Merritt's avatar
Loren Merritt committed
800
    %endif
801 802
    call %%i
    LOAD_MM_PERMUTATION %%i
Loren Merritt's avatar
Loren Merritt committed
803 804
%endmacro

805
; Substitutions that reduce instruction size but are functionally equivalent
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
%macro add 2
    %ifnum %2
        %if %2==128
            sub %1, -128
        %else
            add %1, %2
        %endif
    %else
        add %1, %2
    %endif
%endmacro

%macro sub 2
    %ifnum %2
        %if %2==128
            add %1, -128
        %else
            sub %1, %2
        %endif
    %else
        sub %1, %2
    %endif
%endmacro
829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844

;=============================================================================
; AVX abstraction layer
;=============================================================================

%assign i 0
%rep 16
    %if i < 8
        CAT_XDEFINE sizeofmm, i, 8
    %endif
    CAT_XDEFINE sizeofxmm, i, 16
    CAT_XDEFINE sizeofymm, i, 32
%assign i i+1
%endrep
%undef i

845 846 847 848 849 850 851 852 853 854 855
%macro CHECK_AVX_INSTR_EMU 3-*
    %xdefine %%opcode %1
    %xdefine %%dst %2
    %rep %0-2
        %ifidn %%dst, %3
            %error non-avx emulation of ``%%opcode'' is not supported
        %endif
        %rotate 1
    %endrep
%endmacro

856 857
;%1 == instruction
;%2 == 1 if float, 0 if int
858
;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm)
859 860 861
;%4 == number of operands given
;%5+: operands
%macro RUN_AVX_INSTR 6-7+
862 863 864 865
    %ifid %6
        %define %%sizeofreg sizeof%6
    %elifid %5
        %define %%sizeofreg sizeof%5
866
    %else
867
        %define %%sizeofreg mmsize
868
    %endif
869 870
    %if %%sizeofreg==32
        %if %4>=3
871 872 873 874
            v%1 %5, %6, %7
        %else
            v%1 %5, %6
        %endif
875
    %else
876
        %if %%sizeofreg==8
877 878 879 880 881 882 883 884 885
            %define %%regmov movq
        %elif %2
            %define %%regmov movaps
        %else
            %define %%regmov movdqa
        %endif

        %if %4>=3+%3
            %ifnidn %5, %6
886
                %if avx_enabled && %%sizeofreg==16
887 888
                    v%1 %5, %6, %7
                %else
889
                    CHECK_AVX_INSTR_EMU {%1 %5, %6, %7}, %5, %7
890 891 892 893 894 895
                    %%regmov %5, %6
                    %1 %5, %7
                %endif
            %else
                %1 %5, %7
            %endif
896
        %elif %4>=3
897 898 899 900 901 902 903
            %1 %5, %6, %7
        %else
            %1 %5, %6
        %endif
    %endif
%endmacro

904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
; 3arg AVX ops with a memory arg can only have it in src2,
; whereas SSE emulation of 3arg prefers to have it in src1 (i.e. the mov).
; So, if the op is symmetric and the wrong one is memory, swap them.
%macro RUN_AVX_INSTR1 8
    %assign %%swap 0
    %if avx_enabled
        %ifnid %6
            %assign %%swap 1
        %endif
    %elifnidn %5, %6
        %ifnid %7
            %assign %%swap 1
        %endif
    %endif
    %if %%swap && %3 == 0 && %8 == 1
        RUN_AVX_INSTR %1, %2, %3, %4, %5, %7, %6
    %else
        RUN_AVX_INSTR %1, %2, %3, %4, %5, %6, %7
    %endif
%endmacro

925 926
;%1 == instruction
;%2 == 1 if float, 0 if int
927
;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm)
928 929 930
;%4 == 1 if symmetric (i.e. doesn't matter which src arg is which), 0 if not
%macro AVX_INSTR 4
    %macro %1 2-9 fnord, fnord, fnord, %1, %2, %3, %4
931 932 933
        %ifidn %3, fnord
            RUN_AVX_INSTR %6, %7, %8, 2, %1, %2
        %elifidn %4, fnord
934
            RUN_AVX_INSTR1 %6, %7, %8, 3, %1, %2, %3, %9
935 936 937 938 939 940 941 942
        %elifidn %5, fnord
            RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4
        %else
            RUN_AVX_INSTR %6, %7, %8, 5, %1, %2, %3, %4, %5
        %endif
    %endmacro
%endmacro

943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960
AVX_INSTR addpd, 1, 0, 1
AVX_INSTR addps, 1, 0, 1
AVX_INSTR addsd, 1, 0, 1
AVX_INSTR addss, 1, 0, 1
AVX_INSTR addsubpd, 1, 0, 0
AVX_INSTR addsubps, 1, 0, 0
AVX_INSTR andpd, 1, 0, 1
AVX_INSTR andps, 1, 0, 1
AVX_INSTR andnpd, 1, 0, 0
AVX_INSTR andnps, 1, 0, 0
AVX_INSTR blendpd, 1, 0, 0
AVX_INSTR blendps, 1, 0, 0
AVX_INSTR blendvpd, 1, 0, 0
AVX_INSTR blendvps, 1, 0, 0
AVX_INSTR cmppd, 1, 0, 0
AVX_INSTR cmpps, 1, 0, 0
AVX_INSTR cmpsd, 1, 0, 0
AVX_INSTR cmpss, 1, 0, 0
961 962
AVX_INSTR cvtdq2ps, 1, 0, 0
AVX_INSTR cvtps2dq, 1, 0, 0
963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
AVX_INSTR divpd, 1, 0, 0
AVX_INSTR divps, 1, 0, 0
AVX_INSTR divsd, 1, 0, 0
AVX_INSTR divss, 1, 0, 0
AVX_INSTR dppd, 1, 1, 0
AVX_INSTR dpps, 1, 1, 0
AVX_INSTR haddpd, 1, 0, 0
AVX_INSTR haddps, 1, 0, 0
AVX_INSTR hsubpd, 1, 0, 0
AVX_INSTR hsubps, 1, 0, 0
AVX_INSTR maxpd, 1, 0, 1
AVX_INSTR maxps, 1, 0, 1
AVX_INSTR maxsd, 1, 0, 1
AVX_INSTR maxss, 1, 0, 1
AVX_INSTR minpd, 1, 0, 1
AVX_INSTR minps, 1, 0, 1
AVX_INSTR minsd, 1, 0, 1
AVX_INSTR minss, 1, 0, 1
981 982
AVX_INSTR movhlps, 1, 0, 0
AVX_INSTR movlhps, 1, 0, 0
983 984 985 986 987 988 989 990 991
AVX_INSTR movsd, 1, 0, 0
AVX_INSTR movss, 1, 0, 0
AVX_INSTR mpsadbw, 0, 1, 0
AVX_INSTR mulpd, 1, 0, 1
AVX_INSTR mulps, 1, 0, 1
AVX_INSTR mulsd, 1, 0, 1
AVX_INSTR mulss, 1, 0, 1
AVX_INSTR orpd, 1, 0, 1
AVX_INSTR orps, 1, 0, 1
992 993 994
AVX_INSTR pabsb, 0, 0, 0
AVX_INSTR pabsw, 0, 0, 0
AVX_INSTR pabsd, 0, 0, 0
995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
AVX_INSTR packsswb, 0, 0, 0
AVX_INSTR packssdw, 0, 0, 0
AVX_INSTR packuswb, 0, 0, 0
AVX_INSTR packusdw, 0, 0, 0
AVX_INSTR paddb, 0, 0, 1
AVX_INSTR paddw, 0, 0, 1
AVX_INSTR paddd, 0, 0, 1
AVX_INSTR paddq, 0, 0, 1
AVX_INSTR paddsb, 0, 0, 1
AVX_INSTR paddsw, 0, 0, 1
AVX_INSTR paddusb, 0, 0, 1
AVX_INSTR paddusw, 0, 0, 1
AVX_INSTR palignr, 0, 1, 0
AVX_INSTR pand, 0, 0, 1
AVX_INSTR pandn, 0, 0, 0
AVX_INSTR pavgb, 0, 0, 1
AVX_INSTR pavgw, 0, 0, 1
AVX_INSTR pblendvb, 0, 0, 0
AVX_INSTR pblendw, 0, 1, 0
AVX_INSTR pcmpestri, 0, 0, 0
AVX_INSTR pcmpestrm, 0, 0, 0
AVX_INSTR pcmpistri, 0, 0, 0
AVX_INSTR pcmpistrm, 0, 0, 0
AVX_INSTR pcmpeqb, 0, 0, 1
AVX_INSTR pcmpeqw, 0, 0, 1
AVX_INSTR pcmpeqd, 0, 0, 1
AVX_INSTR pcmpeqq, 0, 0, 1
AVX_INSTR pcmpgtb, 0, 0, 0
AVX_INSTR pcmpgtw, 0, 0, 0
AVX_INSTR pcmpgtd, 0, 0, 0
AVX_INSTR pcmpgtq, 0, 0, 0
AVX_INSTR phaddw, 0, 0, 0
AVX_INSTR phaddd, 0, 0, 0
AVX_INSTR phaddsw, 0, 0, 0
AVX_INSTR phsubw, 0, 0, 0
AVX_INSTR phsubd, 0, 0, 0
AVX_INSTR phsubsw, 0, 0, 0
AVX_INSTR pmaddwd, 0, 0, 1
AVX_INSTR pmaddubsw, 0, 0, 0
AVX_INSTR pmaxsb, 0, 0, 1
AVX_INSTR pmaxsw, 0, 0, 1
AVX_INSTR pmaxsd, 0, 0, 1
AVX_INSTR pmaxub, 0, 0, 1
AVX_INSTR pmaxuw, 0, 0, 1
AVX_INSTR pmaxud, 0, 0, 1
AVX_INSTR pminsb, 0, 0, 1
AVX_INSTR pminsw, 0, 0, 1
AVX_INSTR pminsd, 0, 0, 1
AVX_INSTR pminub, 0, 0, 1
AVX_INSTR pminuw, 0, 0, 1
AVX_INSTR pminud, 0, 0, 1
1046
AVX_INSTR pmovmskb, 0, 0, 0
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
AVX_INSTR pmulhuw, 0, 0, 1
AVX_INSTR pmulhrsw, 0, 0, 1
AVX_INSTR pmulhw, 0, 0, 1
AVX_INSTR pmullw, 0, 0, 1
AVX_INSTR pmulld, 0, 0, 1
AVX_INSTR pmuludq, 0, 0, 1
AVX_INSTR pmuldq, 0, 0, 1
AVX_INSTR por, 0, 0, 1
AVX_INSTR psadbw, 0, 0, 1
AVX_INSTR pshufb, 0, 0, 0
1057 1058 1059
AVX_INSTR pshufd, 0, 1, 0
AVX_INSTR pshufhw, 0, 1, 0
AVX_INSTR pshuflw, 0, 1, 0
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
AVX_INSTR psignb, 0, 0, 0
AVX_INSTR psignw, 0, 0, 0
AVX_INSTR psignd, 0, 0, 0
AVX_INSTR psllw, 0, 0, 0
AVX_INSTR pslld, 0, 0, 0
AVX_INSTR psllq, 0, 0, 0
AVX_INSTR pslldq, 0, 0, 0
AVX_INSTR psraw, 0, 0, 0
AVX_INSTR psrad, 0, 0, 0
AVX_INSTR psrlw, 0, 0, 0
AVX_INSTR psrld, 0, 0, 0
AVX_INSTR psrlq, 0, 0, 0
AVX_INSTR psrldq, 0, 0, 0
AVX_INSTR psubb, 0, 0, 0
AVX_INSTR psubw, 0, 0, 0
AVX_INSTR psubd, 0, 0, 0
AVX_INSTR psubq, 0, 0, 0
AVX_INSTR psubsb, 0, 0, 0
AVX_INSTR psubsw, 0, 0, 0
AVX_INSTR psubusb, 0, 0, 0
AVX_INSTR psubusw, 0, 0, 0
1081
AVX_INSTR ptest, 0, 0, 0
1082 1083 1084 1085 1086 1087 1088 1089 1090
AVX_INSTR punpckhbw, 0, 0, 0
AVX_INSTR punpckhwd, 0, 0, 0
AVX_INSTR punpckhdq, 0, 0, 0
AVX_INSTR punpckhqdq, 0, 0, 0
AVX_INSTR punpcklbw, 0, 0, 0
AVX_INSTR punpcklwd, 0, 0, 0
AVX_INSTR punpckldq, 0, 0, 0
AVX_INSTR punpcklqdq, 0, 0, 0
AVX_INSTR pxor, 0, 0, 1
1091
AVX_INSTR shufps, 1, 1, 0
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
AVX_INSTR subpd, 1, 0, 0
AVX_INSTR subps, 1, 0, 0
AVX_INSTR subsd, 1, 0, 0
AVX_INSTR subss, 1, 0, 0
AVX_INSTR unpckhpd, 1, 0, 0
AVX_INSTR unpckhps, 1, 0, 0
AVX_INSTR unpcklpd, 1, 0, 0
AVX_INSTR unpcklps, 1, 0, 0
AVX_INSTR xorpd, 1, 0, 1
AVX_INSTR xorps, 1, 0, 1
1102 1103

; 3DNow instructions, for sharing code between AVX, SSE and 3DN
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
AVX_INSTR pfadd, 1, 0, 1
AVX_INSTR pfsub, 1, 0, 0
AVX_INSTR pfmul, 1, 0, 1

; base-4 constants for shuffles
%assign i 0
%rep 256
    %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
    %if j < 10
        CAT_XDEFINE q000, j, i
    %elif j < 100
        CAT_XDEFINE q00, j, i
    %elif j < 1000
        CAT_XDEFINE q0, j, i
    %else
        CAT_XDEFINE q, j, i
    %endif
%assign i i+1
%endrep
%undef i
%undef j

%macro FMA_INSTR 3
1127 1128 1129
    %macro %1 5-8 %1, %2, %3
        %if cpuflag(xop) || cpuflag(fma4)
            v%6 %1, %2, %3, %4
1130
        %else
1131 1132 1133 1134 1135 1136 1137
            %ifidn %1, %4
                %7 %5, %2, %3
                %8 %1, %4, %5
            %else
                %7 %1, %2, %3
                %8 %1, %4
            %endif
1138 1139 1140 1141
        %endif
    %endmacro
%endmacro

1142
FMA_INSTR  fmaddps,   mulps, addps
1143 1144 1145
FMA_INSTR  pmacsdd,  pmulld, paddd
FMA_INSTR  pmacsww,  pmullw, paddw
FMA_INSTR pmadcswd, pmaddwd, paddd
1146 1147 1148 1149

; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf.
; This lets us use tzcnt without bumping the yasm version requirement yet.
%define tzcnt rep bsf