Commit 7d8c17b5 authored by Mans Rullgard's avatar Mans Rullgard
Browse files

ARM: aacdec: fix constraints on inline asm



This adds output operands for modified memory allowing the
volatile qualifiers to be dropped.
Signed-off-by: default avatarMans Rullgard <mans@mansr.com>
parent 84e4804a
......@@ -30,17 +30,17 @@ static inline float *VMUL2(float *dst, const float *v, unsigned idx,
const float *scale)
{
unsigned v0, v1;
__asm__ volatile ("ubfx %0, %4, #0, #4 \n\t"
"ubfx %1, %4, #4, #4 \n\t"
"ldr %0, [%3, %0, lsl #2] \n\t"
"ldr %1, [%3, %1, lsl #2] \n\t"
"vld1.32 {d1[]}, [%5,:32] \n\t"
"vmov d0, %0, %1 \n\t"
"vmul.f32 d0, d0, d1 \n\t"
"vst1.32 {d0}, [%2,:64]! \n\t"
: "=&r"(v0), "=&r"(v1), "+r"(dst)
: "r"(v), "r"(idx), "r"(scale)
: "d0", "d1");
__asm__ ("ubfx %0, %6, #0, #4 \n\t"
"ubfx %1, %6, #4, #4 \n\t"
"ldr %0, [%5, %0, lsl #2] \n\t"
"ldr %1, [%5, %1, lsl #2] \n\t"
"vld1.32 {d1[]}, [%7,:32] \n\t"
"vmov d0, %0, %1 \n\t"
"vmul.f32 d0, d0, d1 \n\t"
"vst1.32 {d0}, [%2,:64]! \n\t"
: "=&r"(v0), "=&r"(v1), "+r"(dst), "=m"(dst[0]), "=m"(dst[1])
: "r"(v), "r"(idx), "r"(scale)
: "d0", "d1");
return dst;
}
......@@ -49,22 +49,23 @@ static inline float *VMUL4(float *dst, const float *v, unsigned idx,
const float *scale)
{
unsigned v0, v1, v2, v3;
__asm__ volatile ("ubfx %0, %6, #0, #2 \n\t"
"ubfx %1, %6, #2, #2 \n\t"
"ldr %0, [%5, %0, lsl #2] \n\t"
"ubfx %2, %6, #4, #2 \n\t"
"ldr %1, [%5, %1, lsl #2] \n\t"
"ubfx %3, %6, #6, #2 \n\t"
"ldr %2, [%5, %2, lsl #2] \n\t"
"vmov d0, %0, %1 \n\t"
"ldr %3, [%5, %3, lsl #2] \n\t"
"vld1.32 {d2[],d3[]},[%7,:32] \n\t"
"vmov d1, %2, %3 \n\t"
"vmul.f32 q0, q0, q1 \n\t"
"vst1.32 {q0}, [%4,:128]! \n\t"
: "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst)
: "r"(v), "r"(idx), "r"(scale)
: "d0", "d1", "d2", "d3");
__asm__ ("ubfx %0, %10, #0, #2 \n\t"
"ubfx %1, %10, #2, #2 \n\t"
"ldr %0, [%9, %0, lsl #2] \n\t"
"ubfx %2, %10, #4, #2 \n\t"
"ldr %1, [%9, %1, lsl #2] \n\t"
"ubfx %3, %10, #6, #2 \n\t"
"ldr %2, [%9, %2, lsl #2] \n\t"
"vmov d0, %0, %1 \n\t"
"ldr %3, [%9, %3, lsl #2] \n\t"
"vld1.32 {d2[],d3[]},[%11,:32] \n\t"
"vmov d1, %2, %3 \n\t"
"vmul.f32 q0, q0, q1 \n\t"
"vst1.32 {q0}, [%4,:128]! \n\t"
: "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst),
"=m"(dst[0]), "=m"(dst[1]), "=m"(dst[2]), "=m"(dst[3])
: "r"(v), "r"(idx), "r"(scale)
: "d0", "d1", "d2", "d3");
return dst;
}
......@@ -73,22 +74,23 @@ static inline float *VMUL2S(float *dst, const float *v, unsigned idx,
unsigned sign, const float *scale)
{
unsigned v0, v1, v2, v3;
__asm__ volatile ("ubfx %0, %6, #0, #4 \n\t"
"ubfx %1, %6, #4, #4 \n\t"
"ldr %0, [%5, %0, lsl #2] \n\t"
"lsl %2, %8, #30 \n\t"
"ldr %1, [%5, %1, lsl #2] \n\t"
"lsl %3, %8, #31 \n\t"
"vmov d0, %0, %1 \n\t"
"bic %2, %2, #1<<30 \n\t"
"vld1.32 {d1[]}, [%7,:32] \n\t"
"vmov d2, %2, %3 \n\t"
"veor d0, d0, d2 \n\t"
"vmul.f32 d0, d0, d1 \n\t"
"vst1.32 {d0}, [%4,:64]! \n\t"
: "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst)
: "r"(v), "r"(idx), "r"(scale), "r"(sign)
: "d0", "d1", "d2");
__asm__ ("ubfx %0, %8, #0, #4 \n\t"
"ubfx %1, %8, #4, #4 \n\t"
"ldr %0, [%7, %0, lsl #2] \n\t"
"lsl %2, %10, #30 \n\t"
"ldr %1, [%7, %1, lsl #2] \n\t"
"lsl %3, %10, #31 \n\t"
"vmov d0, %0, %1 \n\t"
"bic %2, %2, #1<<30 \n\t"
"vld1.32 {d1[]}, [%9,:32] \n\t"
"vmov d2, %2, %3 \n\t"
"veor d0, d0, d2 \n\t"
"vmul.f32 d0, d0, d1 \n\t"
"vst1.32 {d0}, [%4,:64]! \n\t"
: "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst),
"=m"(dst[0]), "=m"(dst[1])
: "r"(v), "r"(idx), "r"(scale), "r"(sign)
: "d0", "d1", "d2");
return dst;
}
......@@ -97,38 +99,39 @@ static inline float *VMUL4S(float *dst, const float *v, unsigned idx,
unsigned sign, const float *scale)
{
unsigned v0, v1, v2, v3, nz;
__asm__ volatile ("vld1.32 {d2[],d3[]},[%9,:32] \n\t"
"ubfx %0, %8, #0, #2 \n\t"
"ubfx %1, %8, #2, #2 \n\t"
"ldr %0, [%7, %0, lsl #2] \n\t"
"ubfx %2, %8, #4, #2 \n\t"
"ldr %1, [%7, %1, lsl #2] \n\t"
"ubfx %3, %8, #6, #2 \n\t"
"ldr %2, [%7, %2, lsl #2] \n\t"
"vmov d0, %0, %1 \n\t"
"ldr %3, [%7, %3, lsl #2] \n\t"
"lsr %6, %8, #12 \n\t"
"rbit %6, %6 \n\t"
"vmov d1, %2, %3 \n\t"
"lsls %6, %6, #1 \n\t"
"and %0, %5, #1<<31 \n\t"
"lslcs %5, %5, #1 \n\t"
"lsls %6, %6, #1 \n\t"
"and %1, %5, #1<<31 \n\t"
"lslcs %5, %5, #1 \n\t"
"lsls %6, %6, #1 \n\t"
"and %2, %5, #1<<31 \n\t"
"lslcs %5, %5, #1 \n\t"
"vmov d4, %0, %1 \n\t"
"and %3, %5, #1<<31 \n\t"
"vmov d5, %2, %3 \n\t"
"veor q0, q0, q2 \n\t"
"vmul.f32 q0, q0, q1 \n\t"
"vst1.32 {q0}, [%4,:128]! \n\t"
: "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst),
"+r"(sign), "=r"(nz)
: "r"(v), "r"(idx), "r"(scale)
: "cc", "d0", "d1", "d2", "d3", "d4", "d5");
__asm__ ("vld1.32 {d2[],d3[]},[%13,:32] \n\t"
"ubfx %0, %12, #0, #2 \n\t"
"ubfx %1, %12, #2, #2 \n\t"
"ldr %0, [%11,%0, lsl #2] \n\t"
"ubfx %2, %12, #4, #2 \n\t"
"ldr %1, [%11,%1, lsl #2] \n\t"
"ubfx %3, %12, #6, #2 \n\t"
"ldr %2, [%11,%2, lsl #2] \n\t"
"vmov d0, %0, %1 \n\t"
"ldr %3, [%11,%3, lsl #2] \n\t"
"lsr %6, %12, #12 \n\t"
"rbit %6, %6 \n\t"
"vmov d1, %2, %3 \n\t"
"lsls %6, %6, #1 \n\t"
"and %0, %5, #1<<31 \n\t"
"lslcs %5, %5, #1 \n\t"
"lsls %6, %6, #1 \n\t"
"and %1, %5, #1<<31 \n\t"
"lslcs %5, %5, #1 \n\t"
"lsls %6, %6, #1 \n\t"
"and %2, %5, #1<<31 \n\t"
"lslcs %5, %5, #1 \n\t"
"vmov d4, %0, %1 \n\t"
"and %3, %5, #1<<31 \n\t"
"vmov d5, %2, %3 \n\t"
"veor q0, q0, q2 \n\t"
"vmul.f32 q0, q0, q1 \n\t"
"vst1.32 {q0}, [%4,:128]! \n\t"
: "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst),
"+r"(sign), "=r"(nz),
"=m"(dst[0]), "=m"(dst[1]), "=m"(dst[2]), "=m"(dst[3])
: "r"(v), "r"(idx), "r"(scale)
: "cc", "d0", "d1", "d2", "d3", "d4", "d5");
return dst;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment