Commit 6b9a7b33 authored by Martin Storsjo's avatar Martin Storsjo
Browse files

thumb: Add a parameter for specifying a shift offset for the pc addition conversion

The branch instructions are encoded as 16 bit instructions
by the microsoft assembler, while they are encoded as 32 bit
instructions by gnu binutils.

Change-Id: I622b9025df3520c08eef8447df078f5517fb4b67
Showing with 7 additions and 4 deletions
...@@ -28,7 +28,7 @@ while (<STDIN>) ...@@ -28,7 +28,7 @@ while (<STDIN>)
s/qsubaddx/qsax/i; s/qsubaddx/qsax/i;
s/qaddsubx/qasx/i; s/qaddsubx/qasx/i;
thumb::FixThumbInstructions($_); thumb::FixThumbInstructions($_, 1);
s/ldrneb/ldrbne/i; s/ldrneb/ldrbne/i;
s/ldrneh/ldrhne/i; s/ldrneh/ldrhne/i;
......
...@@ -183,7 +183,7 @@ while (<STDIN>) ...@@ -183,7 +183,7 @@ while (<STDIN>)
s/(vtbl.\d+\s+[^,]+),([^,]+)/$1,\{$2\}/g; s/(vtbl.\d+\s+[^,]+),([^,]+)/$1,\{$2\}/g;
if ($thumb) { if ($thumb) {
thumb::FixThumbInstructions($_); thumb::FixThumbInstructions($_, 0);
} }
# eabi_attributes numerical equivalents can be found in the # eabi_attributes numerical equivalents can be found in the
......
...@@ -11,8 +11,11 @@ ...@@ -11,8 +11,11 @@
package thumb; package thumb;
sub FixThumbInstructions($) sub FixThumbInstructions($$)
{ {
my $short_branches = $_[1];
my $branch_shift_offset = $short_branches ? 1 : 0;
# Write additions with shifts, such as "add r10, r11, lsl #8", # Write additions with shifts, such as "add r10, r11, lsl #8",
# in three operand form, "add r10, r10, r11, lsl #8". # in three operand form, "add r10, r10, r11, lsl #8".
s/(add\s+)(r\d+),\s*(r\d+),\s*(lsl #\d+)/$1$2, $2, $3, $4/g; s/(add\s+)(r\d+),\s*(r\d+),\s*(lsl #\d+)/$1$2, $2, $3, $4/g;
...@@ -56,7 +59,7 @@ sub FixThumbInstructions($) ...@@ -56,7 +59,7 @@ sub FixThumbInstructions($)
# "itttt lt", "movlt.n r12, pc", "addlt.w r12, #12", # "itttt lt", "movlt.n r12, pc", "addlt.w r12, #12",
# "addlt.w r12, r12, r3, lsl #2", "movlt.n pc, r12". # "addlt.w r12, r12, r3, lsl #2", "movlt.n pc, r12".
# This assumes that r12 is free at this point. # This assumes that r12 is free at this point.
s/^(\s*)addlt(\s+)pc,\s*pc,\s*(\w+),\s*lsl\s*#(\d+)/$1itttt$2lt\n$1movlt.n$2r12, pc\n$1addlt.w$2r12, #12\n$1addlt.w$2r12, r12, $3, lsl #$4\n$1movlt.n$2pc, r12/g; s/^(\s*)addlt(\s+)pc,\s*pc,\s*(\w+),\s*lsl\s*#(\d+)/$1itttt$2lt\n$1movlt.n$2r12, pc\n$1addlt.w$2r12, #12\n$1addlt.w$2r12, r12, $3, lsl #($4-$branch_shift_offset)\n$1movlt.n$2pc, r12/g;
# Convert "mov pc, lr" into "bx lr", since the former only works # Convert "mov pc, lr" into "bx lr", since the former only works
# for switching from arm to thumb (and only in armv7), but not # for switching from arm to thumb (and only in armv7), but not
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment