Commit 6ad1fa5a authored by Bernhard Rosenkränzer's avatar Bernhard Rosenkränzer Committed by Michael Niedermayer
Browse files

Better ARM support for mplayer/ffmpeg, ported from atty fork

while playing with some new hardware, I found it's running a forked mplayer
 -- and it looks like they're following the GPL.

 The maintainer's page is here: http://atty.jp/?Zaurus/mplayer
 Unfortunately it's mostly in Japanese, so it's hard to figure out any
  details.

  Their code looks quite interesting (at least to those of us w/ ARM CPUs).

  The patches I've attached are the patches from atty.jp with a couple of
  modifications by myself:
  - ported to current CVS
  - reverted their change of removing SNOW support from ffmpeg
  - cleaned up their bswap mess
  - removed DOS-style linebreaks from various files

patch by (Bernhard Rosenkraenzer: bero, arklinux org)

Originally committed as revision 4311 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent c66a4434
......@@ -316,8 +316,11 @@ endif
# armv4l specific stuff
ifeq ($(TARGET_ARCH_ARMV4L),yes)
ASM_OBJS += armv4l/jrevdct_arm.o armv4l/simple_idct_arm.o
ASM_OBJS += armv4l/jrevdct_arm.o armv4l/simple_idct_arm.o armv4l/dsputil_arm_s.o
OBJS += armv4l/dsputil_arm.o armv4l/mpegvideo_arm.o
ifeq ($(TARGET_IWMMXT),yes)
OBJS += armv4l/dsputil_iwmmxt.o armv4l/mpegvideo_iwmmxt.o
endif
endif
# sun mediaLib specific stuff
......@@ -327,6 +330,12 @@ OBJS += mlib/dsputil_mlib.o
CFLAGS += $(MLIB_INC)
endif
# Intel IPP specific stuff
# currently only works when libavcodec is used in mplayer
ifeq ($(HAVE_IPP),yes)
CFLAGS += $(IPP_INC)
endif
# alpha specific stuff
ifeq ($(TARGET_ARCH_ALPHA),yes)
OBJS += alpha/dsputil_alpha.o alpha/mpegvideo_alpha.o \
......
......@@ -18,6 +18,13 @@
*/
#include "../dsputil.h"
#ifdef HAVE_IPP
#include "ipp.h"
#endif
#ifdef HAVE_IWMMXT
extern void dsputil_init_iwmmxt(DSPContext* c, AVCodecContext *avctx);
#endif
extern void j_rev_dct_ARM(DCTELEM *data);
extern void simple_idct_ARM(DCTELEM *data);
......@@ -26,6 +33,146 @@ extern void simple_idct_ARM(DCTELEM *data);
static void (*ff_put_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size);
static void (*ff_add_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size);
void put_pixels8_arm(uint8_t *block, const uint8_t *pixels, int line_size, int h);
void put_pixels8_x2_arm(uint8_t *block, const uint8_t *pixels, int line_size, int h);
void put_pixels8_y2_arm(uint8_t *block, const uint8_t *pixels, int line_size, int h);
void put_pixels8_xy2_arm(uint8_t *block, const uint8_t *pixels, int line_size, int h);
void put_no_rnd_pixels8_x2_arm(uint8_t *block, const uint8_t *pixels, int line_size, int h);
void put_no_rnd_pixels8_y2_arm(uint8_t *block, const uint8_t *pixels, int line_size, int h);
void put_no_rnd_pixels8_xy2_arm(uint8_t *block, const uint8_t *pixels, int line_size, int h);
void put_pixels16_arm(uint8_t *block, const uint8_t *pixels, int line_size, int h);
static void put_pixels16_x2_arm(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
put_pixels8_x2_arm(block, pixels, line_size, h);
put_pixels8_x2_arm(block + 8, pixels + 8, line_size, h);
}
static void put_pixels16_y2_arm(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
put_pixels8_y2_arm(block, pixels, line_size, h);
put_pixels8_y2_arm(block + 8, pixels + 8, line_size, h);
}
static void put_pixels16_xy2_arm(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
put_pixels8_xy2_arm(block, pixels, line_size, h);
put_pixels8_xy2_arm(block + 8, pixels + 8, line_size, h);
}
static void put_no_rnd_pixels16_x2_arm(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
put_no_rnd_pixels8_x2_arm(block, pixels, line_size, h);
put_no_rnd_pixels8_x2_arm(block + 8, pixels + 8, line_size, h);
}
static void put_no_rnd_pixels16_y2_arm(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
put_no_rnd_pixels8_y2_arm(block, pixels, line_size, h);
put_no_rnd_pixels8_y2_arm(block + 8, pixels + 8, line_size, h);
}
static void put_no_rnd_pixels16_xy2_arm(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
put_no_rnd_pixels8_xy2_arm(block, pixels, line_size, h);
put_no_rnd_pixels8_xy2_arm(block + 8, pixels + 8, line_size, h);
}
static void add_pixels_clamped_ARM(short *block, unsigned char *dest, int line_size)
{
asm volatile (
"mov r10, #8 \n\t"
"1: \n\t"
/* load dest */
"ldr r4, [%1] \n\t"
/* block[0] and block[1]*/
"ldrsh r5, [%0] \n\t"
"ldrsh r7, [%0, #2] \n\t"
"and r6, r4, #0xFF \n\t"
"and r8, r4, #0xFF00 \n\t"
"add r6, r5, r6 \n\t"
"add r8, r7, r8, lsr #8 \n\t"
"mvn r5, r5 \n\t"
"mvn r7, r7 \n\t"
"tst r6, #0x100 \n\t"
"movne r6, r5, lsr #24 \n\t"
"tst r8, #0x100 \n\t"
"movne r8, r7, lsr #24 \n\t"
"mov r9, r6 \n\t"
"ldrsh r5, [%0, #4] \n\t" /* moved form [A] */
"orr r9, r9, r8, lsl #8 \n\t"
/* block[2] and block[3] */
/* [A] */
"ldrsh r7, [%0, #6] \n\t"
"and r6, r4, #0xFF0000 \n\t"
"and r8, r4, #0xFF000000 \n\t"
"add r6, r5, r6, lsr #16 \n\t"
"add r8, r7, r8, lsr #24 \n\t"
"mvn r5, r5 \n\t"
"mvn r7, r7 \n\t"
"tst r6, #0x100 \n\t"
"movne r6, r5, lsr #24 \n\t"
"tst r8, #0x100 \n\t"
"movne r8, r7, lsr #24 \n\t"
"orr r9, r9, r6, lsl #16 \n\t"
"ldr r4, [%1, #4] \n\t" /* moved form [B] */
"orr r9, r9, r8, lsl #24 \n\t"
/* store dest */
"ldrsh r5, [%0, #8] \n\t" /* moved form [C] */
"str r9, [%1] \n\t"
/* load dest */
/* [B] */
/* block[4] and block[5] */
/* [C] */
"ldrsh r7, [%0, #10] \n\t"
"and r6, r4, #0xFF \n\t"
"and r8, r4, #0xFF00 \n\t"
"add r6, r5, r6 \n\t"
"add r8, r7, r8, lsr #8 \n\t"
"mvn r5, r5 \n\t"
"mvn r7, r7 \n\t"
"tst r6, #0x100 \n\t"
"movne r6, r5, lsr #24 \n\t"
"tst r8, #0x100 \n\t"
"movne r8, r7, lsr #24 \n\t"
"mov r9, r6 \n\t"
"ldrsh r5, [%0, #12] \n\t" /* moved from [D] */
"orr r9, r9, r8, lsl #8 \n\t"
/* block[6] and block[7] */
/* [D] */
"ldrsh r7, [%0, #14] \n\t"
"and r6, r4, #0xFF0000 \n\t"
"and r8, r4, #0xFF000000 \n\t"
"add r6, r5, r6, lsr #16 \n\t"
"add r8, r7, r8, lsr #24 \n\t"
"mvn r5, r5 \n\t"
"mvn r7, r7 \n\t"
"tst r6, #0x100 \n\t"
"movne r6, r5, lsr #24 \n\t"
"tst r8, #0x100 \n\t"
"movne r8, r7, lsr #24 \n\t"
"orr r9, r9, r6, lsl #16 \n\t"
"add %0, %0, #16 \n\t" /* moved from [E] */
"orr r9, r9, r8, lsl #24 \n\t"
"subs r10, r10, #1 \n\t" /* moved from [F] */
/* store dest */
"str r9, [%1, #4] \n\t"
/* [E] */
/* [F] */
"add %1, %1, %2 \n\t"
"bne 1b \n\t"
:
: "r"(block),
"r"(dest),
"r"(line_size)
: "r4", "r5", "r6", "r7", "r8", "r9", "r10", "cc", "memory" );
}
/* XXX: those functions should be suppressed ASAP when all IDCTs are
converted */
static void j_rev_dct_ARM_put(uint8_t *dest, int line_size, DCTELEM *block)
......@@ -48,6 +195,34 @@ static void simple_idct_ARM_add(uint8_t *dest, int line_size, DCTELEM *block)
simple_idct_ARM (block);
ff_add_pixels_clamped(block, dest, line_size);
}
static void simple_idct_ipp(DCTELEM *block)
{
#ifdef HAVE_IPP
ippiDCT8x8Inv_Video_16s_C1I(block);
#endif
}
static void simple_idct_ipp_put(uint8_t *dest, int line_size, DCTELEM *block)
{
#ifdef HAVE_IPP
ippiDCT8x8Inv_Video_16s8u_C1R(block, dest, line_size);
#endif
}
#ifdef HAVE_IWMMXT
void add_pixels_clamped_iwmmxt(const DCTELEM *block, uint8_t *pixels, int line_size);
#endif
static void simple_idct_ipp_add(uint8_t *dest, int line_size, DCTELEM *block)
{
#ifdef HAVE_IPP
ippiDCT8x8Inv_Video_16s_C1I(block);
#ifdef HAVE_IWMMXT
add_pixels_clamped_iwmmxt(block, dest, line_size);
#else
add_pixels_clamped_ARM(block, dest, line_size);
#endif
#endif
}
void dsputil_init_armv4l(DSPContext* c, AVCodecContext *avctx)
{
......@@ -56,7 +231,11 @@ void dsputil_init_armv4l(DSPContext* c, AVCodecContext *avctx)
ff_put_pixels_clamped = c->put_pixels_clamped;
ff_add_pixels_clamped = c->add_pixels_clamped;
#ifdef HAVE_IPP
if(idct_algo==FF_IDCT_ARM){
#else
if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_ARM){
#endif
c->idct_put= j_rev_dct_ARM_put;
c->idct_add= j_rev_dct_ARM_add;
c->idct = j_rev_dct_ARM;
......@@ -66,5 +245,37 @@ void dsputil_init_armv4l(DSPContext* c, AVCodecContext *avctx)
c->idct_add= simple_idct_ARM_add;
c->idct = simple_idct_ARM;
c->idct_permutation_type= FF_NO_IDCT_PERM;
#ifdef HAVE_IPP
} else if (idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_IPP){
#else
} else if (idct_algo==FF_IDCT_IPP){
#endif
c->idct_put= simple_idct_ipp_put;
c->idct_add= simple_idct_ipp_add;
c->idct = simple_idct_ipp;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}
/* c->put_pixels_tab[0][0] = put_pixels16_arm; */ // NG!
c->put_pixels_tab[0][1] = put_pixels16_x2_arm; //OK!
c->put_pixels_tab[0][2] = put_pixels16_y2_arm; //OK!
/* c->put_pixels_tab[0][3] = put_pixels16_xy2_arm; /\* NG *\/ */
/* c->put_no_rnd_pixels_tab[0][0] = put_pixels16_arm; // ?(Ȥʤ) */
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_arm; // OK
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_arm; //OK
/* c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_arm; //NG */
c->put_pixels_tab[1][0] = put_pixels8_arm; //OK
c->put_pixels_tab[1][1] = put_pixels8_x2_arm; //OK
/* c->put_pixels_tab[1][2] = put_pixels8_y2_arm; //NG */
/* c->put_pixels_tab[1][3] = put_pixels8_xy2_arm; //NG */
c->put_no_rnd_pixels_tab[1][0] = put_pixels8_arm;//OK
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_arm; //OK
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_arm; //OK
/* c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_arm;//NG */
#if 1
#ifdef HAVE_IWMMXT
dsputil_init_iwmmxt(c, avctx);
#endif
#endif
}
@
@ ARMv4L optimized DSP utils
@ Copyright (c) 2004 AGAWA Koji <i (AT) atty (DOT) jp>
@
@ This library is free software; you can redistribute it and/or
@ modify it under the terms of the GNU Lesser General Public
@ License as published by the Free Software Foundation; either
@ version 2 of the License, or (at your option) any later version.
@
@ This library is distributed in the hope that it will be useful,
@ but WITHOUT ANY WARRANTY; without even the implied warranty of
@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
@ Lesser General Public License for more details.
@
@ You should have received a copy of the GNU Lesser General Public
@ License along with this library; if not, write to the Free Software
@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
@
.macro ADJ_ALIGN_QUADWORD_D shift, Rd0, Rd1, Rd2, Rd3, Rn0, Rn1, Rn2, Rn3, Rn4
mov \Rd0, \Rn0, lsr #(\shift * 8)
mov \Rd1, \Rn1, lsr #(\shift * 8)
mov \Rd2, \Rn2, lsr #(\shift * 8)
mov \Rd3, \Rn3, lsr #(\shift * 8)
orr \Rd0, \Rd0, \Rn1, lsl #(32 - \shift * 8)
orr \Rd1, \Rd1, \Rn2, lsl #(32 - \shift * 8)
orr \Rd2, \Rd2, \Rn3, lsl #(32 - \shift * 8)
orr \Rd3, \Rd3, \Rn4, lsl #(32 - \shift * 8)
.endm
.macro ADJ_ALIGN_DOUBLEWORD shift, R0, R1, R2
mov \R0, \R0, lsr #(\shift * 8)
orr \R0, \R0, \R1, lsl #(32 - \shift * 8)
mov \R1, \R1, lsr #(\shift * 8)
orr \R1, \R1, \R2, lsl #(32 - \shift * 8)
.endm
.macro ADJ_ALIGN_DOUBLEWORD_D shift, Rdst0, Rdst1, Rsrc0, Rsrc1, Rsrc2
mov \Rdst0, \Rsrc0, lsr #(\shift * 8)
mov \Rdst1, \Rsrc1, lsr #(\shift * 8)
orr \Rdst0, \Rdst0, \Rsrc1, lsl #(32 - (\shift * 8))
orr \Rdst1, \Rdst1, \Rsrc2, lsl #(32 - (\shift * 8))
.endm
.macro RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask
@ Rd = (Rn | Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1)
@ Rmask = 0xFEFEFEFE
@ Rn = destroy
eor \Rd0, \Rn0, \Rm0
eor \Rd1, \Rn1, \Rm1
orr \Rn0, \Rn0, \Rm0
orr \Rn1, \Rn1, \Rm1
and \Rd0, \Rd0, \Rmask
and \Rd1, \Rd1, \Rmask
sub \Rd0, \Rn0, \Rd0, lsr #1
sub \Rd1, \Rn1, \Rd1, lsr #1
.endm
.macro NO_RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask
@ Rd = (Rn & Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1)
@ Rmask = 0xFEFEFEFE
@ Rn = destroy
eor \Rd0, \Rn0, \Rm0
eor \Rd1, \Rn1, \Rm1
and \Rn0, \Rn0, \Rm0
and \Rn1, \Rn1, \Rm1
and \Rd0, \Rd0, \Rmask
and \Rd1, \Rd1, \Rmask
add \Rd0, \Rn0, \Rd0, lsr #1
add \Rd1, \Rn1, \Rd1, lsr #1
.endm
@ ----------------------------------------------------------------
.align 8
.global put_pixels16_arm
put_pixels16_arm:
@ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
@ block = word aligned, pixles = unaligned
pld [r1]
stmfd sp!, {r4-r11, lr} @ R14 is also called LR
adr r5, 5f
ands r4, r1, #3
bic r1, r1, #3
add r5, r5, r4, lsl #2
ldrne pc, [r5]
1:
ldmia r1, {r4-r7}
add r1, r1, r2
stmia r0, {r4-r7}
pld [r1]
subs r3, r3, #1
add r0, r0, r2
bne 1b
ldmfd sp!, {r4-r11, pc}
.align 8
2:
ldmia r1, {r4-r8}
add r1, r1, r2
ADJ_ALIGN_QUADWORD_D 1, r9, r10, r11, r12, r4, r5, r6, r7, r8
pld [r1]
subs r3, r3, #1
stmia r0, {r9-r12}
add r0, r0, r2
bne 2b
ldmfd sp!, {r4-r11, pc}
.align 8
3:
ldmia r1, {r4-r8}
add r1, r1, r2
ADJ_ALIGN_QUADWORD_D 2, r9, r10, r11, r12, r4, r5, r6, r7, r8
pld [r1]
subs r3, r3, #1
stmia r0, {r9-r12}
add r0, r0, r2
bne 3b
ldmfd sp!, {r4-r11, pc}
.align 8
4:
ldmia r1, {r4-r8}
add r1, r1, r2
ADJ_ALIGN_QUADWORD_D 3, r9, r10, r11, r12, r4, r5, r6, r7, r8
pld [r1]
subs r3, r3, #1
stmia r0, {r9-r12}
add r0, r0, r2
bne 4b
ldmfd sp!, {r4-r11,pc}
.align 8
5:
.word 1b
.word 2b
.word 3b
.word 4b
@ ----------------------------------------------------------------
.align 8
.global put_pixels8_arm
put_pixels8_arm:
@ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
@ block = word aligned, pixles = unaligned
pld [r1]
stmfd sp!, {r4-r5,lr} @ R14 is also called LR
adr r5, 5f
ands r4, r1, #3
bic r1, r1, #3
add r5, r5, r4, lsl #2
ldrne pc, [r5]
1:
ldmia r1, {r4-r5}
add r1, r1, r2
subs r3, r3, #1
pld [r1]
stmia r0, {r4-r5}
add r0, r0, r2
bne 1b
ldmfd sp!, {r4-r5,pc}
.align 8
2:
ldmia r1, {r4-r5, r12}
add r1, r1, r2
ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r12
pld [r1]
subs r3, r3, #1
stmia r0, {r4-r5}
add r0, r0, r2
bne 2b
ldmfd sp!, {r4-r5,pc}
.align 8
3:
ldmia r1, {r4-r5, r12}
add r1, r1, r2
ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r12
pld [r1]
subs r3, r3, #1
stmia r0, {r4-r5}
add r0, r0, r2
bne 3b
ldmfd sp!, {r4-r5,pc}
.align 8
4:
ldmia r1, {r4-r5, r12}
add r1, r1, r2
ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r12
pld [r1]
subs r3, r3, #1
stmia r0, {r4-r5}
add r0, r0, r2
bne 4b
ldmfd sp!, {r4-r5,pc}
.align 8
5:
.word 1b
.word 2b
.word 3b
.word 4b
@ ----------------------------------------------------------------
.align 8
.global put_pixels8_x2_arm
put_pixels8_x2_arm:
@ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
@ block = word aligned, pixles = unaligned
pld [r1]
stmfd sp!, {r4-r10,lr} @ R14 is also called LR
adr r5, 5f
ands r4, r1, #3
ldr r12, [r5]
add r5, r5, r4, lsl #2
bic r1, r1, #3
ldrne pc, [r5]
1:
ldmia r1, {r4-r5, r10}
add r1, r1, r2
ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
pld [r1]
RND_AVG32 r8, r9, r4, r5, r6, r7, r12
subs r3, r3, #1
stmia r0, {r8-r9}
add r0, r0, r2
bne 1b
ldmfd sp!, {r4-r10,pc}
.align 8
2:
ldmia r1, {r4-r5, r10}
add r1, r1, r2
ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
ADJ_ALIGN_DOUBLEWORD_D 2, r8, r9, r4, r5, r10
pld [r1]
RND_AVG32 r4, r5, r6, r7, r8, r9, r12
subs r3, r3, #1
stmia r0, {r4-r5}
add r0, r0, r2
bne 2b
ldmfd sp!, {r4-r10,pc}
.align 8
3:
ldmia r1, {r4-r5, r10}
add r1, r1, r2
ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r4, r5, r10
ADJ_ALIGN_DOUBLEWORD_D 3, r8, r9, r4, r5, r10
pld [r1]
RND_AVG32 r4, r5, r6, r7, r8, r9, r12
subs r3, r3, #1
stmia r0, {r4-r5}
add r0, r0, r2
bne 3b
ldmfd sp!, {r4-r10,pc}
.align 8
4:
ldmia r1, {r4-r5, r10}
add r1, r1, r2
ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r4, r5, r10
pld [r1]
RND_AVG32 r8, r9, r6, r7, r5, r10, r12
subs r3, r3, #1
stmia r0, {r8-r9}
add r0, r0, r2
bne 4b
ldmfd sp!, {r4-r10,pc} @@ update PC with LR content.
.align 8
5:
.word 0xFEFEFEFE
.word 2b
.word 3b
.word 4b
.align 8
.global put_no_rnd_pixels8_x2_arm
put_no_rnd_pixels8_x2_arm:
@ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
@ block = word aligned, pixles = unaligned
pld [r1]
stmfd sp!, {r4-r10,lr} @ R14 is also called LR
adr r5, 5f
ands r4, r1, #3
ldr r12, [r5]
add r5, r5, r4, lsl #2
bic r1, r1, #3
ldrne pc, [r5]
1:
ldmia r1, {r4-r5, r10}
add r1, r1, r2
ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
pld [r1]
NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12
subs r3, r3, #1
stmia r0, {r8-r9}
add r0, r0, r2
bne 1b
ldmfd sp!, {r4-r10,pc}
.align 8
2:
ldmia r1, {r4-r5, r10}
add r1, r1, r2
ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
ADJ_ALIGN_DOUBLEWORD_D 2, r8, r9, r4, r5, r10
pld [r1]
NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12
subs r3, r3, #1
stmia r0, {r4-r5}
add r0, r0, r2
bne 2b
ldmfd sp!, {r4-r10,pc}
.align 8
3:
ldmia r1, {r4-r5, r10}
add r1, r1, r2
ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r4, r5, r10
ADJ_ALIGN_DOUBLEWORD_D 3, r8, r9, r4, r5, r10
pld [r1]
NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12
subs r3, r3, #1
stmia r0, {r4-r5}
add r0, r0, r2
bne 3b
ldmfd sp!, {r4-r10,pc}
.align 8
4:
ldmia r1, {r4-r5, r10}
add r1, r1, r2
ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r4, r5, r10
pld [r1]
NO_RND_AVG32 r8, r9, r6, r7, r5, r10, r12