Commit 71379b87 authored by John Stark's avatar John Stark Committed by James Zern

Changes to assembler for NASM on mac.

fixes non-Apple nasm part of issue #755

Change-Id: I11955d270c4ee55e3c00e99f568de01b95e7ea9a
parent b87c51ce
...@@ -617,9 +617,17 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 ...@@ -617,9 +617,17 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
%elifidn __OUTPUT_FORMAT__,elf64 %elifidn __OUTPUT_FORMAT__,elf64
global %1:function hidden global %1:function hidden
%elifidn __OUTPUT_FORMAT__,macho32 %elifidn __OUTPUT_FORMAT__,macho32
global %1:private_extern %ifdef __NASM_VER__
global %1
%else
global %1:private_extern
%endif
%elifidn __OUTPUT_FORMAT__,macho64 %elifidn __OUTPUT_FORMAT__,macho64
global %1:private_extern %ifdef __NASM_VER__
global %1
%else
global %1:private_extern
%endif
%else %else
global %1 global %1
%endif %endif
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
mov rcx, 0x0400040 mov rcx, 0x0400040
movdqa xmm4, [rdx] ;load filters movdqa xmm4, [rdx] ;load filters
movd xmm5, rcx movq xmm5, rcx
packsswb xmm4, xmm4 packsswb xmm4, xmm4
pshuflw xmm0, xmm4, 0b ;k0_k1 pshuflw xmm0, xmm4, 0b ;k0_k1
pshuflw xmm1, xmm4, 01010101b ;k2_k3 pshuflw xmm1, xmm4, 01010101b ;k2_k3
...@@ -661,7 +661,7 @@ sym(vp9_filter_block1d16_v8_avg_ssse3): ...@@ -661,7 +661,7 @@ sym(vp9_filter_block1d16_v8_avg_ssse3):
mov rcx, 0x0400040 mov rcx, 0x0400040
movdqa xmm4, [rdx] ;load filters movdqa xmm4, [rdx] ;load filters
movd xmm5, rcx movq xmm5, rcx
packsswb xmm4, xmm4 packsswb xmm4, xmm4
pshuflw xmm0, xmm4, 0b ;k0_k1 pshuflw xmm0, xmm4, 0b ;k0_k1
pshuflw xmm1, xmm4, 01010101b ;k2_k3 pshuflw xmm1, xmm4, 01010101b ;k2_k3
......
...@@ -122,8 +122,8 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \ ...@@ -122,8 +122,8 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
pcmpgtw m7, m6, m0 ; m7 = c[i] >= zbin pcmpgtw m7, m6, m0 ; m7 = c[i] >= zbin
pcmpgtw m12, m11, m0 ; m12 = c[i] >= zbin pcmpgtw m12, m11, m0 ; m12 = c[i] >= zbin
%ifidn %1, b_32x32 %ifidn %1, b_32x32
pmovmskb r6, m7 pmovmskb r6d, m7
pmovmskb r2, m12 pmovmskb r2d, m12
or r6, r2 or r6, r2
jz .skip_iter jz .skip_iter
%endif %endif
...@@ -308,8 +308,8 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \ ...@@ -308,8 +308,8 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
%ifidn %1, fp_32x32 %ifidn %1, fp_32x32
pcmpgtw m7, m6, m0 pcmpgtw m7, m6, m0
pcmpgtw m12, m11, m0 pcmpgtw m12, m11, m0
pmovmskb r6, m7 pmovmskb r6d, m7
pmovmskb r2, m12 pmovmskb r2d, m12
or r6, r2 or r6, r2
jz .skip_iter jz .skip_iter
......
...@@ -101,7 +101,7 @@ SECTION .text ...@@ -101,7 +101,7 @@ SECTION .text
pshufd m4, m6, 0x1 pshufd m4, m6, 0x1
movd [r1], m7 ; store sse movd [r1], m7 ; store sse
paddd m6, m4 paddd m6, m4
movd rax, m6 ; store sum as return value movd raxd, m6 ; store sum as return value
%else ; mmsize == 8 %else ; mmsize == 8
pshufw m4, m6, 0xe pshufw m4, m6, 0xe
pshufw m3, m7, 0xe pshufw m3, m7, 0xe
...@@ -113,7 +113,7 @@ SECTION .text ...@@ -113,7 +113,7 @@ SECTION .text
movd [r1], m7 ; store sse movd [r1], m7 ; store sse
pshufw m4, m6, 0xe pshufw m4, m6, 0xe
paddd m6, m4 paddd m6, m4
movd rax, m6 ; store sum as return value movd raxd, m6 ; store sum as return value
%endif %endif
RET RET
%endmacro %endmacro
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment