diff --git a/build/make/Makefile b/build/make/Makefile
index 4ac5bcf1fb8e66449420516658d8a01d7be51aaf..e52ecd8c8082e35ad8f356063fb9800a3715e638 100644
--- a/build/make/Makefile
+++ b/build/make/Makefile
@@ -381,6 +381,7 @@ ifneq ($(call enabled,DIST-SRCS),)
     DIST-SRCS-$(ARCH_ARM)$(ARCH_X86)$(ARCH_X86_64)    += build/make/obj_int_extract.c
     DIST-SRCS-$(ARCH_ARM)    += build/make/ads2gas.pl
     DIST-SRCS-$(ARCH_ARM)    += build/make/ads2gas_apple.pl
+    DIST-SRCS-$(ARCH_ARM)    += build/make/thumb.pm
     DIST-SRCS-yes            += $(target:-$(TOOLCHAIN)=).mk
 endif
 INSTALL-SRCS := $(call cond_enabled,CONFIG_INSTALL_SRCS,INSTALL-SRCS)
diff --git a/build/make/ads2gas.pl b/build/make/ads2gas.pl
index 69522bfd86e8911f24f40fb5a62f59f00c73d79c..c84941e99475ab108a397ea6edbd7a03bd6e32f1 100755
--- a/build/make/ads2gas.pl
+++ b/build/make/ads2gas.pl
@@ -18,6 +18,10 @@
 # Usage: cat inputfile | perl ads2gas.pl > outputfile
 #
 
+use FindBin;
+use lib $FindBin::Bin;
+use thumb;
+
 my $thumb = 0;
 
 foreach my $arg (@ARGV) {
@@ -179,54 +183,7 @@ while (<STDIN>)
     s/(vtbl.\d+\s+[^,]+),([^,]+)/$1,\{$2\}/g;
 
     if ($thumb) {
-        # Write additions with shifts, such as "add r10, r11, lsl #8",
-        # in three operand form, "add r10, r10, r11, lsl #8".
-        s/(add\s+)(r\d+),\s*(r\d+),\s*(lsl #\d+)/$1$2, $2, $3, $4/g;
-
-        # Convert additions with a non-constant shift into a sequence
-        # with left shift, addition and a right shift (to restore the
-        # register to the original value). Currently the right shift
-        # isn't necessary in the code base since the values in these
-        # registers aren't used, but doing the shift for consitency.
-        # This converts instructions such as "add r12, r12, r5, lsl r4"
-        # into the sequence "lsl r5, r4", "add r12, r12, r5", "lsr r5, r4".
-        s/^(\s*)(add)(\s+)(r\d+),\s*(r\d+),\s*(r\d+),\s*lsl (r\d+)/$1lsl$3$6, $7\n$1$2$3$4, $5, $6\n$1lsr$3$6, $7/g;
-
-        # Convert loads with right shifts in the indexing into a
-        # sequence of an add, load and sub. This converts
-        # "ldrb r4, [r9, lr, asr #1]" into "add r9, r9, lr, asr #1",
-        # "ldrb r9, [r9]", "sub r9, r9, lr, asr #1".
-        s/^(\s*)(ldrb)(\s+)(r\d+),\s*\[(\w+),\s*(\w+),\s*(asr #\d+)\]/$1add $3$5, $5, $6, $7\n$1$2$3$4, [$5]\n$1sub $3$5, $5, $6, $7/g;
-
-        # Convert register indexing with writeback into a separate add
-        # instruction. This converts "ldrb r12, [r1, r2]!" into
-        # "ldrb r12, [r1, r2]", "add r1, r1, r2".
-        s/^(\s*)(ldrb)(\s+)(r\d+),\s*\[(\w+),\s*(\w+)\]!/$1$2$3$4, [$5, $6]\n$1add $3$5, $6/g;
-
-        # Convert negative register indexing into separate sub/add instructions.
-        # This converts "ldrne r4, [src, -pstep, lsl #1]" into
-        # "subne src, src, pstep, lsl #1", "ldrne r4, [src]",
-        # "addne src, src, pstep, lsl #1". In a couple of cases where
-        # this is used, it's used for two subsequent load instructions,
-        # where a hand-written version of it could merge two subsequent
-        # add and sub instructions.
-        s/^(\s*)((ldr|str)(ne)?)(\s+)(r\d+),\s*\[(\w+), -([^\]]+)\]/$1sub$4$5$7, $7, $8\n$1$2$5$6, [$7]\n$1add$4$5$7, $7, $8/g;
-
-        # Convert register post indexing to a separate add instruction.
-        # This converts "ldrneb r9, [r0], r2" into "ldrneb r9, [r0]",
-        # "add r0, r2".
-        s/^(\s*)((ldr|str)(ne)?[bhd]?)(\s+)(\w+),(\s*\w+,)?\s*\[(\w+)\],\s*(\w+)/$1$2$5$6,$7 [$8]\n$1add$4$5$8, $8, $9/g;
-
-        # Convert a conditional addition to the pc register into a series of
-        # instructions. This converts "addlt pc, pc, r3, lsl #2" into
-        # "ittt lt", "addlt.w r12, pc, #10", "addlt.w r12, r12, r3, lsl #2",
-        # "movlt.n pc, r12". This assumes that r12 is free at this point.
-        s/^(\s*)addlt(\s+)pc,\s*pc,\s*(\w+),\s*lsl\s*#(\d+)/$1ittt$2lt\n$1addlt.w$2r12, pc, #10\n$1addlt.w$2r12, r12, $3, lsl #$4\n$1movlt.n$2pc, r12/g;
-
-        # Convert "mov pc, lr" into "bx lr", since the former only works
-        # for switching from arm to thumb (and only in armv7), but not
-        # from thumb to arm.
-        s/mov(\s*)pc\s*,\s*lr/bx$1lr/g;
+        thumb::FixThumbInstructions($_);
     }
 
     # eabi_attributes numerical equivalents can be found in the
diff --git a/build/make/thumb.pm b/build/make/thumb.pm
new file mode 100644
index 0000000000000000000000000000000000000000..51308a3a721c1a9bb5c66e19ed50dad8b4804b1c
--- /dev/null
+++ b/build/make/thumb.pm
@@ -0,0 +1,66 @@
+#!/usr/bin/perl
+##
+##  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+##
+##  Use of this source code is governed by a BSD-style license
+##  that can be found in the LICENSE file in the root of the source
+##  tree. An additional intellectual property rights grant can be found
+##  in the file PATENTS.  All contributing project authors may
+##  be found in the AUTHORS file in the root of the source tree.
+##
+
+package thumb;
+
+sub FixThumbInstructions($)
+{
+    # Write additions with shifts, such as "add r10, r11, lsl #8",
+    # in three operand form, "add r10, r10, r11, lsl #8".
+    s/(add\s+)(r\d+),\s*(r\d+),\s*(lsl #\d+)/$1$2, $2, $3, $4/g;
+
+    # Convert additions with a non-constant shift into a sequence
+    # with left shift, addition and a right shift (to restore the
+    # register to the original value). Currently the right shift
+    # isn't necessary in the code base since the values in these
+    # registers aren't used, but doing the shift for consitency.
+    # This converts instructions such as "add r12, r12, r5, lsl r4"
+    # into the sequence "lsl r5, r4", "add r12, r12, r5", "lsr r5, r4".
+    s/^(\s*)(add)(\s+)(r\d+),\s*(r\d+),\s*(r\d+),\s*lsl (r\d+)/$1lsl$3$6, $7\n$1$2$3$4, $5, $6\n$1lsr$3$6, $7/g;
+
+    # Convert loads with right shifts in the indexing into a
+    # sequence of an add, load and sub. This converts
+    # "ldrb r4, [r9, lr, asr #1]" into "add r9, r9, lr, asr #1",
+    # "ldrb r9, [r9]", "sub r9, r9, lr, asr #1".
+    s/^(\s*)(ldrb)(\s+)(r\d+),\s*\[(\w+),\s*(\w+),\s*(asr #\d+)\]/$1add $3$5, $5, $6, $7\n$1$2$3$4, [$5]\n$1sub $3$5, $5, $6, $7/g;
+
+    # Convert register indexing with writeback into a separate add
+    # instruction. This converts "ldrb r12, [r1, r2]!" into
+    # "ldrb r12, [r1, r2]", "add r1, r1, r2".
+    s/^(\s*)(ldrb)(\s+)(r\d+),\s*\[(\w+),\s*(\w+)\]!/$1$2$3$4, [$5, $6]\n$1add $3$5, $6/g;
+
+    # Convert negative register indexing into separate sub/add instructions.
+    # This converts "ldrne r4, [src, -pstep, lsl #1]" into
+    # "subne src, src, pstep, lsl #1", "ldrne r4, [src]",
+    # "addne src, src, pstep, lsl #1". In a couple of cases where
+    # this is used, it's used for two subsequent load instructions,
+    # where a hand-written version of it could merge two subsequent
+    # add and sub instructions.
+    s/^(\s*)((ldr|str)(ne)?)(\s+)(r\d+),\s*\[(\w+), -([^\]]+)\]/$1sub$4$5$7, $7, $8\n$1$2$5$6, [$7]\n$1add$4$5$7, $7, $8/g;
+
+    # Convert register post indexing to a separate add instruction.
+    # This converts "ldrneb r9, [r0], r2" into "ldrneb r9, [r0]",
+    # "add r0, r2".
+    s/^(\s*)((ldr|str)(ne)?[bhd]?)(\s+)(\w+),(\s*\w+,)?\s*\[(\w+)\],\s*(\w+)/$1$2$5$6,$7 [$8]\n$1add$4$5$8, $8, $9/g;
+
+    # Convert a conditional addition to the pc register into a series of
+    # instructions. This converts "addlt pc, pc, r3, lsl #2" into
+    # "ittt lt", "addlt.w r12, pc, #10", "addlt.w r12, r12, r3, lsl #2",
+    # "movlt.n pc, r12". This assumes that r12 is free at this point.
+    s/^(\s*)addlt(\s+)pc,\s*pc,\s*(\w+),\s*lsl\s*#(\d+)/$1ittt$2lt\n$1addlt.w$2r12, pc, #10\n$1addlt.w$2r12, r12, $3, lsl #$4\n$1movlt.n$2pc, r12/g;
+
+    # Convert "mov pc, lr" into "bx lr", since the former only works
+    # for switching from arm to thumb (and only in armv7), but not
+    # from thumb to arm.
+    s/mov(\s*)pc\s*,\s*lr/bx$1lr/g;
+}
+
+1;