quantize_sse2.asm 9.05 KB
Newer Older
1
;
2
;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 4 5 6 7 8 9 10 11
;
;  Use of this source code is governed by a BSD-style license and patent
;  grant that can be found in the LICENSE file in the root of the source
;  tree. All contributing project authors may be found in the AUTHORS
;  file in the root of the source tree.
;


%include "vpx_ports/x86_abi_support.asm"
12
%include "asm_enc_offsets.asm"
13 14


15 16 17 18 19 20
; void vp8_regular_quantize_b_sse2 | arg
;  (BLOCK  *b,                     |  0
;   BLOCKD *d)                     |  1

global sym(vp8_regular_quantize_b_sse2)
sym(vp8_regular_quantize_b_sse2):
21 22
    push        rbp
    mov         rbp, rsp
Johann's avatar
Johann committed
23
    SAVE_XMM
24
    GET_GOT     rbx
25
    push        rsi
26 27 28 29 30

%if ABI_IS_32BIT
    push        rdi
%else
  %ifidn __OUTPUT_FORMAT__,x64
31
    push        rdi
32 33 34
  %endif
%endif

35
    ALIGN_STACK 16, rax
36 37 38 39 40
    %define BLOCKD_d          0  ;  8
    %define zrun_zbin_boost   8  ;  8
    %define abs_minus_zbin    16 ; 32
    %define temp_qcoeff       48 ; 32
    %define qcoeff            80 ; 32
Johann's avatar
Johann committed
41 42 43
    %define stack_size        112
    sub         rsp, stack_size
    ; end prolog
44

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
%if ABI_IS_32BIT
    mov         rdi, arg(0)
%else
  %ifidn __OUTPUT_FORMAT__,x64
    mov         rdi, rcx                    ; BLOCK *b
    mov         [rsp + BLOCKD_d], rdx
  %else
    ;mov         rdi, rdi                    ; BLOCK *b
    mov         [rsp + BLOCKD_d], rsi
  %endif
%endif

    mov         rdx, [rdi + vp8_block_coeff] ; coeff_ptr
    mov         rcx, [rdi + vp8_block_zbin] ; zbin_ptr
    movd        xmm7, [rdi + vp8_block_zbin_extra] ; zbin_oq_value
60

Johann's avatar
Johann committed
61
    ; z
62 63 64
    movdqa      xmm0, [rdx]
    movdqa      xmm4, [rdx + 16]
    mov         rdx, [rdi + vp8_block_round] ; round_ptr
65

Johann's avatar
Johann committed
66 67 68
    pshuflw     xmm7, xmm7, 0
    punpcklwd   xmm7, xmm7                  ; duplicated zbin_oq_value

69 70 71
    movdqa      xmm1, xmm0
    movdqa      xmm5, xmm4

Johann's avatar
Johann committed
72 73 74
    ; sz
    psraw       xmm0, 15
    psraw       xmm4, 15
75

Johann's avatar
Johann committed
76
    ; (z ^ sz)
77 78 79
    pxor        xmm1, xmm0
    pxor        xmm5, xmm4

Johann's avatar
Johann committed
80 81 82
    ; x = abs(z)
    psubw       xmm1, xmm0
    psubw       xmm5, xmm4
83

84 85 86
    movdqa      xmm2, [rcx]
    movdqa      xmm3, [rcx + 16]
    mov         rcx, [rdi + vp8_block_quant] ; quant_ptr
87

Johann's avatar
Johann committed
88
    ; *zbin_ptr + zbin_oq_value
89 90 91
    paddw       xmm2, xmm7
    paddw       xmm3, xmm7

Johann's avatar
Johann committed
92 93 94
    ; x - (*zbin_ptr + zbin_oq_value)
    psubw       xmm1, xmm2
    psubw       xmm5, xmm3
95 96
    movdqa      [rsp + abs_minus_zbin], xmm1
    movdqa      [rsp + abs_minus_zbin + 16], xmm5
97

Johann's avatar
Johann committed
98 99 100
    ; add (zbin_ptr + zbin_oq_value) back
    paddw       xmm1, xmm2
    paddw       xmm5, xmm3
101

102 103
    movdqa      xmm2, [rdx]
    movdqa      xmm6, [rdx + 16]
Johann's avatar
Johann committed
104

105 106
    movdqa      xmm3, [rcx]
    movdqa      xmm7, [rcx + 16]
107

Johann's avatar
Johann committed
108
    ; x + round
109 110 111
    paddw       xmm1, xmm2
    paddw       xmm5, xmm6

Johann's avatar
Johann committed
112 113 114
    ; y = x * quant_ptr >> 16
    pmulhw      xmm3, xmm1
    pmulhw      xmm7, xmm5
115

Johann's avatar
Johann committed
116 117 118
    ; y += x
    paddw       xmm1, xmm3
    paddw       xmm5, xmm7
119

120 121
    movdqa      [rsp + temp_qcoeff], xmm1
    movdqa      [rsp + temp_qcoeff + 16], xmm5
122

Johann's avatar
Johann committed
123 124
    pxor        xmm6, xmm6
    ; zero qcoeff
125 126
    movdqa      [rsp + qcoeff], xmm6
    movdqa      [rsp + qcoeff + 16], xmm6
Johann's avatar
Johann committed
127

128 129 130
    mov         rsi, [rdi + vp8_block_zrun_zbin_boost] ; zbin_boost_ptr
    mov         rax, [rdi + vp8_block_quant_shift] ; quant_shift_ptr
    mov         [rsp + zrun_zbin_boost], rsi
Johann's avatar
Johann committed
131

132 133
%macro ZIGZAG_LOOP 1
    movsx       edx, WORD PTR[GLOBAL(zig_zag) + (%1 * 2)] ; rc
Johann's avatar
Johann committed
134 135 136 137 138

    ; x
    movsx       ecx, WORD PTR[rsp + abs_minus_zbin + rdx *2]

    ; if (x >= zbin)
139 140 141
    sub         cx, WORD PTR[rsi]           ; x - zbin
    lea         rsi, [rsi + 2]              ; zbin_boost_ptr++
    jl          rq_zigzag_loop_%1           ; x < zbin
Johann's avatar
Johann committed
142

143
    movsx       edi, WORD PTR[rsp + temp_qcoeff + rdx *2]
Johann's avatar
Johann committed
144 145 146

    ; downshift by quant_shift[rdx]
    movsx       ecx, WORD PTR[rax + rdx*2]  ; quant_shift_ptr[rc]
147 148 149 150 151
    sar         edi, cl                     ; also sets Z bit
    je          rq_zigzag_loop_%1           ; !y
    mov         WORD PTR[rsp + qcoeff + rdx*2], di ;qcoeff_ptr[rc] = temp_qcoeff[rc]
    mov         rsi, [rsp + zrun_zbin_boost] ; reset to b->zrun_zbin_boost
rq_zigzag_loop_%1:
Johann's avatar
Johann committed
152
%endmacro
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
ZIGZAG_LOOP 0
ZIGZAG_LOOP 1
ZIGZAG_LOOP 2
ZIGZAG_LOOP 3
ZIGZAG_LOOP 4
ZIGZAG_LOOP 5
ZIGZAG_LOOP 6
ZIGZAG_LOOP 7
ZIGZAG_LOOP 8
ZIGZAG_LOOP 9
ZIGZAG_LOOP 10
ZIGZAG_LOOP 11
ZIGZAG_LOOP 12
ZIGZAG_LOOP 13
ZIGZAG_LOOP 14
ZIGZAG_LOOP 15

    movdqa      xmm2, [rsp + qcoeff]
    movdqa      xmm3, [rsp + qcoeff + 16]

%if ABI_IS_32BIT
    mov         rdi, arg(1)
%else
    mov         rdi, [rsp + BLOCKD_d]
%endif

    mov         rcx, [rdi + vp8_blockd_dequant] ; dequant_ptr
    mov         rsi, [rdi + vp8_blockd_dqcoeff] ; dqcoeff_ptr
Johann's avatar
Johann committed
181 182 183 184 185 186 187

    ; y ^ sz
    pxor        xmm2, xmm0
    pxor        xmm3, xmm4
    ; x = (y ^ sz) - sz
    psubw       xmm2, xmm0
    psubw       xmm3, xmm4
188

189 190 191 192 193
    ; dequant
    movdqa      xmm0, [rcx]
    movdqa      xmm1, [rcx + 16]

    mov         rcx, [rdi + vp8_blockd_qcoeff] ; qcoeff_ptr
194 195 196 197

    pmullw      xmm0, xmm2
    pmullw      xmm1, xmm3

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
    movdqa      [rcx], xmm2        ; store qcoeff
    movdqa      [rcx + 16], xmm3
    movdqa      [rsi], xmm0        ; store dqcoeff
    movdqa      [rsi + 16], xmm1

    ; select the last value (in zig_zag order) for EOB
    pcmpeqw     xmm2, xmm6
    pcmpeqw     xmm3, xmm6
    ; !
    pcmpeqw     xmm6, xmm6
    pxor        xmm2, xmm6
    pxor        xmm3, xmm6
    ; mask inv_zig_zag
    pand        xmm2, [GLOBAL(inv_zig_zag)]
    pand        xmm3, [GLOBAL(inv_zig_zag) + 16]
    ; select the max value
    pmaxsw      xmm2, xmm3
    pshufd      xmm3, xmm2, 00001110b
    pmaxsw      xmm2, xmm3
    pshuflw     xmm3, xmm2, 00001110b
    pmaxsw      xmm2, xmm3
    pshuflw     xmm3, xmm2, 00000001b
    pmaxsw      xmm2, xmm3
    movd        eax, xmm2
    and         eax, 0xff
    mov         [rdi + vp8_blockd_eob], eax
224 225

    ; begin epilog
Johann's avatar
Johann committed
226 227
    add         rsp, stack_size
    pop         rsp
228
%if ABI_IS_32BIT
229
    pop         rdi
230 231 232 233 234
%else
  %ifidn __OUTPUT_FORMAT__,x64
    pop         rdi
  %endif
%endif
235
    pop         rsi
236
    RESTORE_GOT
Johann's avatar
Johann committed
237
    RESTORE_XMM
238 239
    pop         rbp
    ret
240

241 242 243 244 245 246 247 248 249
; int vp8_fast_quantize_b_impl_sse2 | arg
;  (short *coeff_ptr,               |  0
;   short *qcoeff_ptr,              |  1
;   short *dequant_ptr,             |  2
;   short *inv_scan_order,          |  3
;   short *round_ptr,               |  4
;   short *quant_ptr,               |  5
;   short *dqcoeff_ptr)             |  6

250 251
global sym(vp8_fast_quantize_b_impl_sse2)
sym(vp8_fast_quantize_b_impl_sse2):
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
    push        rbp
    mov         rbp, rsp
    SHADOW_ARGS_TO_STACK 7
    push        rsi
    push        rdi
    ; end prolog

    mov         rdx, arg(0)                 ;coeff_ptr
    mov         rcx, arg(2)                 ;dequant_ptr
    mov         rdi, arg(4)                 ;round_ptr
    mov         rsi, arg(5)                 ;quant_ptr

    movdqa      xmm0, XMMWORD PTR[rdx]
    movdqa      xmm4, XMMWORD PTR[rdx + 16]

267 268
    movdqa      xmm2, XMMWORD PTR[rdi]      ;round lo
    movdqa      xmm3, XMMWORD PTR[rdi + 16] ;round hi
269 270 271 272 273 274 275 276 277 278 279 280

    movdqa      xmm1, xmm0
    movdqa      xmm5, xmm4

    psraw       xmm0, 15                    ;sign of z (aka sz)
    psraw       xmm4, 15                    ;sign of z (aka sz)

    pxor        xmm1, xmm0
    pxor        xmm5, xmm4
    psubw       xmm1, xmm0                  ;x = abs(z)
    psubw       xmm5, xmm4                  ;x = abs(z)

281 282
    paddw       xmm1, xmm2
    paddw       xmm5, xmm3
283 284 285 286 287 288 289

    pmulhw      xmm1, XMMWORD PTR[rsi]
    pmulhw      xmm5, XMMWORD PTR[rsi + 16]

    mov         rdi, arg(1)                 ;qcoeff_ptr
    mov         rsi, arg(6)                 ;dqcoeff_ptr

290 291
    movdqa      xmm2, XMMWORD PTR[rcx]
    movdqa      xmm3, XMMWORD PTR[rcx + 16]
292 293 294 295 296 297 298 299 300

    pxor        xmm1, xmm0
    pxor        xmm5, xmm4
    psubw       xmm1, xmm0
    psubw       xmm5, xmm4

    movdqa      XMMWORD PTR[rdi], xmm1
    movdqa      XMMWORD PTR[rdi + 16], xmm5

301 302
    pmullw      xmm2, xmm1
    pmullw      xmm3, xmm5
303

304
    mov         rdi, arg(3)                 ;inv_scan_order
305

306 307
    ; Start with 16
    pxor        xmm4, xmm4                  ;clear all bits
308 309 310
    pcmpeqw     xmm1, xmm4
    pcmpeqw     xmm5, xmm4

311
    pcmpeqw     xmm4, xmm4                  ;set all bits
312 313 314
    pxor        xmm1, xmm4
    pxor        xmm5, xmm4

315 316
    pand        xmm1, XMMWORD PTR[rdi]
    pand        xmm5, XMMWORD PTR[rdi+16]
317

318
    pmaxsw      xmm1, xmm5
319

320 321
    ; now down to 8
    pshufd      xmm5, xmm1, 00001110b
322

323
    pmaxsw      xmm1, xmm5
324

325 326
    ; only 4 left
    pshuflw     xmm5, xmm1, 00001110b
327

328
    pmaxsw      xmm1, xmm5
329

330 331
    ; okay, just 2!
    pshuflw     xmm5, xmm1, 00000001b
332

333
    pmaxsw      xmm1, xmm5
334

335 336
    movd        rax, xmm1
    and         rax, 0xff
337

338 339
    movdqa      XMMWORD PTR[rsi], xmm2        ;store dqcoeff
    movdqa      XMMWORD PTR[rsi + 16], xmm3   ;store dqcoeff
340 341 342 343 344 345 346

    ; begin epilog
    pop         rdi
    pop         rsi
    UNSHADOW_ARGS
    pop         rbp
    ret
347 348 349 350 351 352 353 354 355 356 357 358 359

SECTION_RODATA
align 16
zig_zag:
  dw 0x0000, 0x0001, 0x0004, 0x0008
  dw 0x0005, 0x0002, 0x0003, 0x0006
  dw 0x0009, 0x000c, 0x000d, 0x000a
  dw 0x0007, 0x000b, 0x000e, 0x000f
inv_zig_zag:
  dw 0x0001, 0x0002, 0x0006, 0x0007
  dw 0x0003, 0x0005, 0x0008, 0x000d
  dw 0x0004, 0x0009, 0x000c, 0x000e
  dw 0x000a, 0x000b, 0x000f, 0x0010