Refactor ARM Blur prefill logic.

Refactor the prefill logic for ARM (and improve documentation along the way) so
as to fix some cases where data is read outside of the source image, and to
minimise the remaining cases which must fall back to the C implementation.

Change-Id: I3d06416b40c48dea06258e9f7bb5ddc246d7c710
diff --git a/cpu_ref/rsCpuIntrinsicBlur.cpp b/cpu_ref/rsCpuIntrinsicBlur.cpp
index cac10d8..9d51e68 100644
--- a/cpu_ref/rsCpuIntrinsicBlur.cpp
+++ b/cpu_ref/rsCpuIntrinsicBlur.cpp
@@ -297,7 +297,7 @@
     uint32_t x2 = xend;
 
 #if defined(ARCH_ARM_USE_INTRINSICS)
-    if (gArchUseSIMD) {
+    if (gArchUseSIMD && info->dim.x >= 4) {
       rsdIntrinsicBlurU4_K(out, (uchar4 const *)(pin + stride * info->current.y),
                  info->dim.x, info->dim.y,
                  stride, x1, info->current.y, x2 - x1, cp->mIradius, cp->mIp + cp->mIradius);
@@ -368,10 +368,15 @@
     uint32_t x2 = xend;
 
 #if defined(ARCH_ARM_USE_INTRINSICS)
-    if (gArchUseSIMD) {
-        rsdIntrinsicBlurU1_K(out, pin + stride * info->current.y, info->dim.x, info->dim.y,
-                 stride, x1, info->current.y, x2 - x1, cp->mIradius, cp->mIp + cp->mIradius);
-        return;
+    if (gArchUseSIMD && info->dim.x >= 16) {
+        // The specialisation for r<=8 has an awkward prefill case, which is
+        // fiddly to resolve, where starting close to the right edge can cause
+        // a read beyond the end of input.  So avoid that case here.
+        if (cp->mIradius > 8 || (info->dim.x - rsMax(0, (int32_t)x1 - 8)) >= 16) {
+            rsdIntrinsicBlurU1_K(out, pin + stride * info->current.y, info->dim.x, info->dim.y,
+                     stride, x1, info->current.y, x2 - x1, cp->mIradius, cp->mIp + cp->mIradius);
+            return;
+        }
     }
 #endif
 
diff --git a/cpu_ref/rsCpuIntrinsics_advsimd_Blur.S b/cpu_ref/rsCpuIntrinsics_advsimd_Blur.S
index f73290f..109fd68 100644
--- a/cpu_ref/rsCpuIntrinsics_advsimd_Blur.S
+++ b/cpu_ref/rsCpuIntrinsics_advsimd_Blur.S
@@ -20,6 +20,10 @@
 
 //#define ARCH_ARM64_USE_BLUR_PRELOAD
 
+/* Number of fractional bits to preserve in intermediate results.  The
+ * intermediate storage is 16-bit, and we started with 8 bit data (the integer
+ * part), so this should be between 0 and 8.
+ */
 .set FRACTION_BITS, 7
 .set MAX_R, 25
 
@@ -60,8 +64,8 @@
  *      x1 -- src
  *      x2 -- pitch
  *      x5 -- r
- *      x6 -- rup
- *      x7 -- rdn
+ *      x6 -- rup (r, unless clipped to top of source image)
+ *      x7 -- rdn (r, unless clipped to bottom of source image)
  *      x12 -- switch index
  *      v0-v3 -- coefficient table
  *      x13 = -pitch
@@ -1155,15 +1159,126 @@
             ret
 END(fetch_generic_asm)
 
-/* Given values in v10 and v11, and an index in x11, sweep the (x11&15)th value
- * across to fill the rest of the register pair.  Used for filling the right
- * hand edge of the window when starting too close to the right hand edge of
- * the image.
+
+/* Fetch the next (16 - (x10 & 15)) columns of data, avoiding reading memory
+ * beyond that limit, and filling the rest of the vector with the last legal
+ * pixel.
+ * Result is in v10 and v11.  v8 and v9 are filled with the first legal pixel.
+ * Note: This function can read beyond the right edge of input if the image is
+ * narrower than 16 bytes.
+ */
+PRIVATE(fetch_clampleft1)
+            stp         x29, x30, [sp, #-16]!
+            bl          fetch_generic_asm
+            dup         v8.8h, v10.h[0]
+            dup         v9.8h, v10.h[0]
+            ands        x12, x10, #15
+            beq         1f
+            sub         x1, x1, x12
+            sub         x15, x15, x12
+            sub         x19, x19, x12
+            sub         x10, x10, x12
+            sub         x12, sp, x12, LSL #1
+            sub         sp, sp, #64
+            sub         x12, x12, #32
+            st1         {v8.8h, v9.8h, v10.8h,v11.8h}, [sp]
+            ld1         {v10.8h,v11.8h}, [x12]
+            add         sp, sp, #64
+1:          ldp         x29, x30, [sp], #16
+            ret
+END(fetch_clampleft1)
+
+PRIVATE(fetch_clampleft4)
+            stp         x29, x30, [sp, #-16]!
+            bl          fetch_generic_asm
+            dup         v8.2d, v10.d[0]
+            dup         v9.2d, v10.d[0]
+            ands        x12, x10, #15
+            beq         1f
+            sub         x1, x1, x12
+            sub         x15, x15, x12
+            sub         x19, x19, x12
+            sub         x10, x10, x12
+            sub         x12, sp, x12, LSL #1
+            sub         sp, sp, #64
+            sub         x12, x12, #32
+            st1         {v8.8h, v9.8h, v10.8h,v11.8h}, [sp]
+            ld1         {v10.8h,v11.8h}, [x12]
+            add         sp, sp, #64
+1:          ldp         x29, x30, [sp], #16
+            ret
+END(fetch_clampleft4)
+
+/* Fetch only the next (x11 & 15) (where 0 means 16) columns of data, avoiding
+ * reading memory beyond that limit, and filling the rest of the vector with
+ * the last legal pixel.
+ * Result is in v10 and v11.  v12 and v13 are filled with the last legal pixel.
+ * Note: This function can read beyond the left edge of input if the image is
+ * narrower than 16 bytes.
+ */
+PRIVATE(fetch_clampright1)
+            stp         x29, x30, [sp, #-16]!
+            sub         x12, xzr, x11
+            ands        x12, x12, #15
+            beq         1f
+            sub         x1, x1, x12
+            sub         x15, x15, x12
+            sub         x19, x19, x12
+            bl          fetch_generic_asm
+            dup         v12.8h, v11.h[7]
+            dup         v13.8h, v11.h[7]
+            sub         x12, xzr, x11
+            and         x12, x12, #15
+            sub         sp, sp, #64
+            add         x12, sp, x12, LSL #1
+            st1         {v10.8h,v11.8h,v12.8h,v13.8h}, [sp]
+            ld1         {v10.8h,v11.8h}, [x12]
+            add         sp, sp, #64
+            ldp         x29, x30, [sp], #16
+            ret
+1:          bl          fetch_generic_asm
+            dup         v12.8h, v11.h[7]
+            dup         v13.8h, v11.h[7]
+            ldp         x29, x30, [sp], #16
+            ret
+END(fetch_clampright1)
+
+PRIVATE(fetch_clampright4)
+            stp         x29, x30, [sp, #-16]!
+            sub         x12, xzr, x11
+            ands        x12, x12, #15
+            beq         1f
+            sub         x1, x1, x12
+            sub         x15, x15, x12
+            sub         x19, x19, x12
+            bl          fetch_generic_asm
+            dup         v12.2d, v11.d[1]
+            dup         v13.2d, v11.d[1]
+            sub         x12, xzr, x11
+            and         x12, x12, #15
+            sub         sp, sp, #64
+            add         x12, sp, x12, LSL #1
+            st1         {v10.8h,v11.8h,v12.8h,v13.8h}, [sp]
+            ld1         {v10.8h,v11.8h}, [x12]
+            add         sp, sp, #64
+            ldp         x29, x30, [sp], #16
+            ret
+1:          bl          fetch_generic_asm
+            dup         v12.2d, v11.d[1]
+            dup         v13.2d, v11.d[1]
+            ldp         x29, x30, [sp], #16
+            ret
+END(fetch_clampright4)
+
+/* Given values in v10 and v11, and an index in x11, sweep the (x11 & 15)th
+ * value across to fill the rest of the register pair.  Used for filling the
+ * right hand edge of the window when reading too close to the right hand edge
+ * of the image.
  * Also returns a dup-ed copy of the last element in v12 for the tail-fill
  * case (this happens incidentally in common path, but must be done
  * deliberately in the fast-out path).
  */
-PRIVATE(prefetch_clampright1)
+PRIVATE(prefill_sweepright1)
             ands        x12, x11, #15
             beq         1f
             sub         x12, x12, #1
@@ -1171,16 +1286,17 @@
             st1         {v10.8h,v11.8h}, [sp]
             add         x12, sp, x12, LSL #1
             ld1r        {v12.8h}, [x12]
-            st1         {v12.8h}, [x12], #16
-            st1         {v12.8h}, [x12]
+            ld1r        {v13.8h}, [x12]
+            st1         {v12.8h,v13.8h}, [x12]
             ld1         {v10.8h,v11.8h}, [sp]
             add         sp, sp, #64
             ret
 1:          dup         v12.8h, v11.h[7]
+            dup         v13.8h, v11.h[7]
             ret
-END(prefetch_clampright1)
+END(prefill_sweepright1)
 
-PRIVATE(prefetch_clampright4)
+PRIVATE(prefill_sweepright4)
             ands        x12, x11, #15
             beq         1f
             sub         x12, x12, #4
@@ -1188,159 +1304,259 @@
             st1         {v10.8h,v11.8h}, [sp]
             add         x12, sp, x12, LSL #1
             ld1r        {v12.2d}, [x12]
-            st1         {v12.8h}, [x12], #16
-            st1         {v12.8h}, [x12]
+            st1         {v13.8h}, [x12]
             ld1         {v10.8h,v11.8h}, [sp]
             add         sp, sp, #64
             ret
 1:          dup         v12.2d, v11.d[1]
+            dup         v13.2d, v11.d[1]
             ret
-END(prefetch_clampright4)
+END(prefill_sweepright4)
 
-
-/* Helpers for prefetch, below.
+/* The main loop keeps a sliding window of data that has already been convolved
+ * in the vertical axis for the current line.  This usually stays in the
+ * register file, but spills to memory for large windows.  The first thing that
+ * needs to be done at start-up is to fill this window with image data, taking
+ * into account the padding needed if the left or right edges of the image fall
+ * within this window.
  */
-.macro prefetch_out qa, qb, store, qsa, qsb, qsb_hi
-  .if \store == 2
-    .ifc \qsa,\qsb
-            st1         {\qsa}, [x9], #16
-            st1         {\qsb}, [x9], #16
+
+/* Because the window is in the register file writes to it cannot be indexed
+ * by another register.  Consequently the fill loops are unrolled to address
+ * the registers directly.  This macro distinguishes between writes to the
+ * register file and writes to the spill buffer (indicated by a destination
+ * register named xx).
+ */
+.macro prefill_out ra, rb, sra, srb
+  .ifc \ra,xx
+    .ifc \rb,xx
+            st1         {\sra,\srb}, [x9], #32
     .else
-            st1         {\qsa,\qsb}, [x9], #32
-    .endif
-  .elseif \store == 1
             bic         x9, x9, #0x40
-            st1         {\qsa}, [x9], #16
-            mov         \qb, \qsb
-  .elseif \store == 0
-            mov         \qa, \qsa
-            mov         \qb, \qsb
+            st1         {\sra}, [x9], #16
+            mov         \rb, \srb
+    .endif
+  .else
+    .ifnc \ra,\sra
+            mov         \ra, \sra
+    .endif
+    .ifnc \rb,\srb
+            mov         \rb, \srb
+    .endif
   .endif
 .endm
 
-.macro prefetch_one  qa, qb, rem, c, store=0, step=1
-.set i, (need - 16) - \rem
-.if i >= 0
-1:          cmp         x10, #i+16
-            blo         2f
-            prefetch_out \qa, \qb, \store, v9.16b, v9.16b, v9.d[1]
-            b           1f
-2:          cmp         x11, #i+16
-            bls         3f
-            prefetch_out \qa, \qb, \store, v10.16b, v11.16b, v11.d[1]
-            bl          fetch_generic_asm
-            b           2f
-3:          bl          prefetch_clampright\step
-            prefetch_out \qa, \qb, \store, v10.16b, v11.16b, v11.d[1]
-4:          b           4f+4
-           //v12 contains pad word from prefetch_clampright call
-            prefetch_out \qa, \qb, \store, v12.16b, v12.16b, v12.d[1]
-  .if \rem > 0
-            b           4f+4
-  .else
-1:
-2:
-3:
-4:          nop
-  .endif
-.endif
+/* This macro provides the list of registers representing the window, and the
+ * cases where the register file is too small and a spill buffer is used
+ * instead.
+ * Since several specialisations of each function are generated, this also
+ * culls superfluous iterations, and sets the variable `i` for subsequent
+ * macros indicating the current index into the window.
+ */
+.macro prefill_list, macro, nextmacro, max_r, step, label
+  .macro ifneeded macro, nextmacro, line, nextline, ra, rb, step, label
+    .if windowsize >= (\line * 16)
+      .set i, windowsize - (\line * 16)
+\label\macro\line:
+            prefill_\macro \label\nextmacro\line, \label\nextmacro\nextline, \ra, \rb, \step
+    .endif
+  .endm
+            ifneeded \macro \nextmacro, 13, 12, xx,      xx,      \step, \label
+            ifneeded \macro \nextmacro, 12, 11, xx,      xx,      \step, \label
+            ifneeded \macro \nextmacro, 11, 10, xx,      v17.16b, \step, \label
+            ifneeded \macro \nextmacro, 10,  9, v18.16b, v19.16b, \step, \label
+            ifneeded \macro \nextmacro,  9,  8, v20.16b, v21.16b, \step, \label
+            ifneeded \macro \nextmacro,  8,  7, v22.16b, v23.16b, \step, \label
+            ifneeded \macro \nextmacro,  7,  6, v24.16b, v25.16b, \step, \label
+            ifneeded \macro \nextmacro,  6,  5, v26.16b, v27.16b, \step, \label
+            ifneeded \macro \nextmacro,  5,  4, v28.16b, v29.16b, \step, \label
+            ifneeded \macro \nextmacro,  4,  3, v30.16b, v31.16b, \step, \label
+            ifneeded \macro \nextmacro,  3,  2, v4.16b,  v5.16b,  \step, \label
+            ifneeded \macro \nextmacro,  2,  1, v6.16b,  v7.16b,  \step, \label
+            ifneeded \macro \nextmacro,  1,  0, v8.16b,  v9.16b,  \step, \label
+\label\macro\()0:
+            b           \label\()_end
+  .purgem ifneeded
 .endm
 
+/* These macros represent the possible stages of filling the window.
+ * Each macro is unrolled enough times that it can fill the entire window
+ * itself, but normally it will have to hand control to subsequent macros
+ * part-way through and this is done using labels named \next and \after, where
+ * \next is the next macro starting at the same window position and \after is
+ * the next macro starting after the current window position.
+ */
+
+/* leftfill: v8 and v9 contain the left padding value.  While the window
+ * extends outside of the image on the left-hand side, and at least 16 more
+ * padding values are needed in the window, store v8 and v9 into the window.
+ * Otherwise skip forward to storing image data.
+ */
+.macro prefill_leftfill, next, after, ra, rb, step
+            cmp         x10, #i+16
+            blo         \next
+            prefill_out \ra, \rb, v8.16b, v9.16b
+.endm
+
+/* leftedge: The very first non-fill or partial-fill chunk from the image is
+ * already loaded (as it was used to calculate the left padding value), so
+ * store it here, and then drop into the regular load/store cycle in the next
+ * macro.
+ */
+.macro prefill_leftedge, next, after, ra, rb, step
+1:          prefill_out \ra, \rb, v10.16b, v11.16b
+            b           \after
+.endm
+
+/* dofetch: Copy chunks of the image into the window without any complications
+ * from edge conditions.
+ */
+.macro prefill_dofetch, next, after, ra, rb, step
+            cmp         x11, #i+16
+            bls         \next
+            bl          fetch_generic_asm
+            prefill_out \ra, \rb, v10.16b, v11.16b
+.endm
+
+/* rightedge: The last fetch (currently in v10 and v11) may have gone beyond
+ * the right-hand edge of the image.  In that case sweep the last valid pixel
+ * across the rest of the chunk, and in either case prepare padding data in v12
+ * and v13 for the next macro.  This is done in fetch_clampright.
+ * This only happens once before going on to the next macro.
+ * Sometimes leftedge also covers the rightedge case, in which case this has
+ * to be skipped altogether.
+ */
+.macro prefill_rightedge, next, after, ra, rb, step
+            cmp         x11, #i
+            bls         \next
+            bl          fetch_clampright\step
+            prefill_out \ra, \rb, v10.16b, v11.16b
+            b           \after
+.endm
+
+/* rightfill: The rest of the window is simply filled with right padding from
+ * v12 and v13.
+ */
+.macro prefill_rightfill, next, after, ra, rb, step
+            prefill_out \ra, \rb, v12.16b, v13.16b
+.endm
+
+/* Here all of the macros above are unrolled and laid out in the proper order.
+ */
+.macro prefill_body, max_r, step, label
+            prefill_list leftfill,  leftedge,   \max_r, \step, \label
+            prefill_list leftedge,  dofetch,    \max_r, \step, \label
+            prefill_list dofetch,   rightedge,  \max_r, \step, \label
+            prefill_list rightedge, rightfill,  \max_r, \step, \label
+            prefill_list rightfill, oops,       \max_r, \step, \label
+\label\()_end:
+.endm
+
+
 /* Fill the convolution window with context data.  The aim here is to load
- * exactly rlf + rrt columns, and in the main loop to read as many columns as
- * will be written.  This is complicated by the need to handle cases when the
- * input starts very close to the left or right (or both) edges of the image,
- * and where these do not fall on 16-byte boundaries.
+ * exactly 2*r columns, and in the main loop to read as many columns as will be
+ * written.  This is complicated by the window being divided into chunks at
+ * register boundaries, and the need to handle cases when the input starts very
+ * close to the left or right (or both) edges of the image and the need to fill
+ * the spaces that leaves with left and right edge padding values.
  *
  * Input:
  *      x1 -- src
  *      x2 -- pitch
  *      x3 -- count
- *      x4 -- inlen
+ *      x4 -- available image data right of src pointer
  *      x5 -- r
  *      x6 -- rup
  *      x7 -- rdn
- *      x8 -- rlf
+ *      x8 -- available image data left of src pointer
  *      x9 -- buffer (if needed)
  *      x13 = -pitch
  *      x15 = top-row in
  *      x19 = bottom-row in
  * Output:
- *      x1 += rlf + min(count, rrt)
+ *      x4 -= min(inlen, count + windowsize - centertap)
+ *      x1 += min(inlen, count + windowsize - centertap)
+ *      x15 += min(inlen, count + windowsize - centertap)
+ *      x19 += min(inlen, count + windowsize - centertap)
  * Modifies:
  *      x10 -- fill start index in the window
  *      x11 -- fill stop index in the window
  *      x12 -- scratch
  */
-.macro prefetch step=1, max_r=25
-.set need, ((\max_r + \max_r) * \step + 15) & ~15
-  .if \step == 1
-            mov         x10, #need - (\max_r * \step)
-            sub         x10, x10, x8
-  .else
-            mov         x10, #need - (\max_r * \step)
-            sub         x10, x10, x8, LSL #2
-  .endif
-            add         x11, x10, x4
-            subs        x11, x11, #need
-            csel        x11, xzr, x11, hi
-            add         x11, x11, #need
+.macro prefill step=1, max_r=25, label=xx
+.set windowsize, (((\max_r + \max_r) * \step + 15) & ~15)
+.set centertap, (windowsize - \max_r * \step)
+            mov         x10, #centertap
+            subs        x10, x10, x8
+            csel        x10, xzr, x10, lo
 
-            bl          fetch_generic_asm
-  .if \step == 1
-            dup         v9.8h, v10.h[0]
-  .else
-            dup         v9.2d, v10.d[0]
-  .endif
-            ands        x12, x10, #15
-            beq         2f
-            sub         sp, sp, #32
-            st1         {v10.8h,v11.8h}, [sp]
-            sub         x12, sp, x12, LSL #1
-            sub         sp, sp, #16
-            st1         {v9.8h}, [sp]
-            sub         sp, sp, #16
-            st1         {v9.8h}, [sp]
-            ld1         {v10.8h,v11.8h}, [x12]
-            add         sp, sp, #64
-            sub         x1, x1, x10
-            sub         x15, x15, x10
-            sub         x19, x19, x10
-            bic         x10, x10, #15
+            subs        x11, x4, #windowsize - centertap
+            csel        x11, xzr, x11, hs
+            add         x11, x11, #windowsize
+
+            /* x10 indicates where in the window legal image data begins.
+             * x11 indicates where in the window legal image date ends.
+             * When starting near the centre of a large image these would be
+             * zero and windowsize respectively, but when starting near the
+             * edges this can change.
+             * When starting on the leftmost pixel, x10 will be centertap.
+             * When starting on the rightmost pixel, x11 will be centertap+1.
+             */
+
+            /* x4 indicates how much data there is between the current pointers
+             * and the right edge of the image.  The pointers currently point
+             * to the data needed at centertap.  The subsequent code will
+             * consume (windowsize - x10) data, but only the data from
+             * centertap to windowsize comes out of x4's budget.
+             */
+1:          subs        x4, x4, #windowsize - centertap
+            csel        x4, xzr, x4, lo
+
+            /* And the pointers need to rewind to the start of the window.
+             */
+            sub         x1, x1, #centertap
+            sub         x15, x15, #centertap
+            sub         x19, x19, #centertap
+
+            /* Unless x8 indicated that there wasn't that much data available.
+             */
             add         x1, x1, x10
             add         x15, x15, x10
             add         x19, x19, x10
-2:
-  .if \step > 1
-            /* it's only in the uchar2 and uchar4 cases where the register file
-             * is insufficient (given MAX_R <= 25).
-             */
-            prefetch_one xx, xx, 192, c=\max_r, step=\step, store=2
-            prefetch_one xx, xx, 176, c=\max_r, step=\step, store=2
-            prefetch_one xx,      v17.16b, 160, c=\max_r, step=\step, store=1
-            prefetch_one v18.16b, v19.16b, 144, c=\max_r, step=\step, store=0
-            prefetch_one v20.16b, v21.16b, 128, c=\max_r, step=\step, store=0
-            prefetch_one v22.16b, v23.16b, 112, c=\max_r, step=\step, store=0
-            prefetch_one v24.16b, v25.16b,  96, c=\max_r, step=\step, store=0
-            prefetch_one v26.16b, v27.16b,  80, c=\max_r, step=\step, store=0
-            prefetch_one v28.16b, v29.16b,  64, c=\max_r, step=\step, store=0
-  .endif
-            prefetch_one v30.16b, v31.16b,  48, c=\max_r, step=\step, store=0
-            prefetch_one v4.16b,  v5.16b,   32, c=\max_r, step=\step, store=0
-            prefetch_one v6.16b,  v7.16b,   16, c=\max_r, step=\step, store=0
-            prefetch_one v8.16b,  v9.16b,    0, c=\max_r, step=\step, store=0
 
-  .if \step == 1
-            add         x10, x8, #\max_r * \step
-  .else
-            lsl         x10, x8, #2
-            add         x10, x10, #\max_r * \step
-  .endif
-            subs        x4, x4, x10
-            csel        x4, xzr, x4, lo
+            /* Get the first chunk, and add padding to align it to the window
+             * if necessary.
+             */
+            bl          fetch_clampleft\step
+
+            /* Sometimes the start and the end of the window are in the same
+             * chunk.  In that case both ends need filler at the outset.
+             */
+            sub         x12, x11, #1
+            eor         x12,  x10, x12
+            cmp         x12, #16
+            bhs         1f
+            bl          prefill_sweepright\step
+
+            /* Iterate through all the points in the window and fill them in
+             * with padding or image data as needed.
+             */
+1:          prefill_body \max_r, \step, \label
 .endm
 
-/* The main loop.
+/* The main body of the convolve functions.  Having already pre-filled the
+ * convolution window with 2*r input values, the logic settles into a regular
+ * pattern of reading and writing at a 1:1 rate until either input or output
+ * expires.  The input leads the output by r values, so when processing all the
+ * way to the right-hand edge, or within r pixels of that edge, the input will
+ * run out first.  In the case of very narrow images, or sub-windows starting
+ * near the right edge, the input may already have run out while the
+ * convolution window was being filled and this loop will start with a
+ * zero-length input.
+ *
+ * Once the input runs out, the rest of the output must be processed by padding
+ * the remainder of the window with pad value from the last valid pixel from
+ * the source.
  *
  * Input:
  *      x0 = dst
@@ -1358,7 +1574,26 @@
  * Modifies
  *      x8 = fetch code pointer
  */
-.macro mainloop core, step=1, max_r=25, labelc="", labelnc=""
+.macro conv_body core, step=1, max_r=25, labelc="", labelnc=""
+
+            /* If x4 >= x3 then there's no need for clipping.  The main loop
+             * needs to exit when either x3 or x4 runs out, so clamp x4 to be
+             * no greater than x3 and use x4 for the loop.
+             * However, if x4 comes out of the loop with less than 16 bytes
+             * left, a partial read would be necessary to avoid reading beyond
+             * the end of the image.  To avoid this, clamp x4 to the next
+             * multiple of 16, which is still sufficient to force it out of the
+             * loop but doesn't imply a rewind.
+             */
+            add         x12, x3, #15
+            bic         x12, x12, #15
+            cmp         x4, x12
+            csel        x4, x12, x4, hi
+
+            /* First calculate the entry-point into the internal fetch logic.
+             * This is done so the same function can service several kernel
+             * sizes.
+             */
             adrp        x8, \labelnc
             add         x8, x8, #:lo12:\labelnc
             sub         x8, x8, x5, LSL #5
@@ -1375,11 +1610,20 @@
             sub         x8, x8, x5, LSL #6
             add         x8, x8, x5, LSL #3
             b           5f
-            .align  4
-3:          fetch max_r=\max_r, labelc=\labelc, labelnc=\labelnc, reg=x8
 
-            /* For each call to fetch two are made to \core.  It would be
-             * preferable to have twice the work done in \core.
+            /* Main loop: ... */
+            .align  4
+3:          /* first perform a vertical convolution from memory to get the next
+             * 16 taps of the horizontal window into the register file...
+             */
+            fetch max_r=\max_r, labelc=\labelc, labelnc=\labelnc, reg=x8
+
+            /* ...then perform a horizontal convolution on that window to
+             * produce eight output bytes, and slide the window along.
+             * This has to be done twice to match the 16-way vertical pass.
+             * It would be preferable to have twice the work done in \core, but
+             * that would demand yet another variant on those macros and would
+             * perturb the register allocation severely.
              */
             \core
             st1         {v15.8b}, [x0], #8
@@ -1388,7 +1632,18 @@
 
             sub         x3, x3, #16
 5:          subs        x4, x4, #16
-            bhs         3b
+            bhi         3b
+            /* Here there's 16 or fewer bytes available before the edge of the
+             * source image.  x4 holds that count minus 16 (because it was
+             * decremented before the first iteration ran).  The last read may
+             * not be a whole chunk, and beyond that a fill value must be used.
+             *
+             * Of course, none of that matters if there's no more output to
+             * produce...
+             */
+            cbz         x3, 5f
+
+            /* Oh well. */
             adds        x4, x4, #16
             bne         1f
   .if \step==1
@@ -1398,35 +1653,17 @@
             dup         v10.2d, v9.d[1]
             dup         v11.2d, v9.d[1]
   .endif
-            b           4f
+            b           3f
 
-1:          sub         x1, x1, #16
-            sub         x15, x15, #16
-            sub         x19, x19, #16
-            add         x1, x1, x4
-            add         x15, x15, x4
-            add         x19, x19, x4
-            bl          fetch_generic_asm
-
-  .if \step==1
-            dup         v12.8h, v11.h[7]
-  .else
-            dup         v12.2d, v11.d[1]
-  .endif
-            sub         x4, xzr, x4
-            tbz         x4, #3, 1f
-            mov         v10.16b, v11.16b
-            mov         v11.16b, v12.16b
-1:          tbz         x4, #2, 1f
-            ext         v10.16b, v10.16b, v11.16b, #4*2
-            ext         v11.16b, v11.16b, v12.16b, #4*2
-1:          tbz         x4, #1, 1f
-            ext         v10.16b, v10.16b, v11.16b, #2*2
-            ext         v11.16b, v11.16b, v12.16b, #2*2
-1:          tbz         x4, #0, 4f
-            ext         v10.16b, v10.16b, v11.16b, #1*2
-            ext         v11.16b, v11.16b, v12.16b, #1*2
-4:          cbz         x3, 5f
+            /* To avoid reading past end of input, rewind pointers by (16-x4)
+             * to ensure that they're exactly 16 bytes from the edge.
+             */
+1:          mov         x11, x4
+            bl          fetch_clampright\step
+            /* Now to put this padding to use, perform any remaining
+             * iterations.  This is done at half the rate of the main loop,
+             * because there's no longer pressure from a 16-lane window filler.
+             */
 3:          \core
   .if \step==1
             dup         v11.8h, v11.h[7]
@@ -1436,8 +1673,12 @@
             subs        x3, x3, #8
             blo         4f
             st1         {v15.8b}, [x0], #8
-            beq         5f
-            b           3b
+            bne         3b
+            b           5f
+
+            /* If the final iteration contained 0 < l < 8 values, then perform
+             * a piecewise store of the final vector.
+             */
 4:          tbz         x3, #2, 1f
             st1         {v15.s}[0], [x0], #4
             ext         v15.8b, v15.8b, v15.8b, #4
@@ -1447,16 +1688,17 @@
 1:          tbz         x3, #0, 5f
             st1         {v15.b}[0], [x0], #1
             ext         v15.8b, v15.8b, v15.8b, #1
-5:          nop
+5:          mov         x0, #0
 .endm
 
+
 .irp r, TUNED_LIST1, 25
 PRIVATE(convolve1_\r)
             stp         x29,x30, [sp, #-16]!
 
-            prefetch    step=1, max_r=\r
+            prefill     step=1, max_r=\r, label=.Lcnv1_\r
 
-            mainloop    core=hconv1_\r, step=1, max_r=\r, labelc=.Lcnv1_\r, labelnc=.Lcnvnc1_\r
+            conv_body   core=hconv1_\r, step=1, max_r=\r, labelc=.Lcnv1_\r, labelnc=.Lcnvnc1_\r
 
             ldp         x29,x30, [sp], #16
             ret
@@ -1465,23 +1707,20 @@
 
 .irp r, TUNED_LIST4, 25
 PRIVATE(convolve4_\r)
-            sub         x12, sp, #0x040
-            bic         x9, x12, #0x07f
-            mov         sp, x9
-            stp         x12,x30, [sp, #-16]!
+            sub         x9, sp, #0x40
+            stp         x29,x30, [sp, #-(16 + 0x40 + 0x80)]!
+            bic         x9, x9, #0x7f
 
-            /* x9 now points to a buffer on the stack whose address has the low
-             * 7 bits clear.  This allows easy address calculation in the
-             * wrap-around cases.
+            /* x9 now points to a 0x40 byte buffer on the stack whose address
+             * has the low 7 bits clear.  This allows easy address calculation
+             * in the wrap-around cases.
              */
 
+            prefill     step=4, max_r=\r, label=.Lcnv4_\r
 
-            prefetch    step=4, max_r=\r
+            conv_body   core=hconv4_\r, step=4, max_r=\r, labelc=.Lcnv4_\r, labelnc=.Lcnvnc4_\r
 
-            mainloop    core=hconv4_\r, step=4, max_r=\r, labelc=.Lcnv4_\r, labelnc=.Lcnvnc4_\r
-
-            ldp         x12,x30, [sp]
-            add         sp, x12, #0x40
+            ldp         x29,x30, [sp], #(16 + 0x40 + 0x80)
             ret
 END(convolve4_\r)
 .endr
@@ -1504,34 +1743,25 @@
             sub         sp, sp, #64
             st1         {v8.1d - v11.1d}, [sp]
             st1         {v12.1d - v15.1d}, [x8]
-            mov         x8, x5        // x
-            ldr         w5, [sp,#80]  // r
-            sub         x9, x2, x8
-            sub         x10, x3, x6
-            mov         x2, x4        // pitch
-            mov         x3, x7        // count
-            sub         x7, x10, #1
-            sub         x9, x9, x3
+            mov         x8, x5          // x
+            ldr         w5, [sp,#80]    // r
+            sub         x9, x2, x8      // w - x
+            sub         x10, x3, x6     // h - y
+            mov         x2, x4          // pitch
+            mov         x3, x7          // count
+            sub         x7, x10, #1     // h - y - 1
+            mov         x4, x9          // inlen = (w - x)
 
             ldr         x12, [sp, #88] // tab
 
-            add         x1, x1, x8
+            add         x1, x1, x8      // src += x
 
             cmp         x6, x5
-            csel        x6, x5, x6, hs
+            csel        x6, x5, x6, hs  // rup = min(r, y)
             cmp         x7, x5
-            csel        x7, x5, x7, hs
-            cmp         x8, x5
-            csel        x8, x5, x8, hs
-            cmp         x9, x5
-            csel        x9, x5, x9, hs
+            csel        x7, x5, x7, hs  // rdn = min(r, h - y - 1)
 
-            add         x4, x8, x9
-            add         x4, x4, x3
-
-            sub         x1, x1, x8
-
-            sub         x13, xzr, x2
+            sub         x13, xzr, x2    // -pitch
             msub        x15, x2, x6, x1
             madd        x19, x2, x7, x1
 
@@ -1569,33 +1799,25 @@
             sub         sp, sp, #64
             st1         {v8.1d - v11.1d}, [sp]
             st1         {v12.1d - v15.1d}, [x8]
-            mov         x8, x5        // x
-            ldr         w5, [sp,#80]  // r
-            sub         x9, x2, x8
-            sub         x10, x3, x6
-            mov         x2, x4        // pitch
-            mov         x3, x7        // count
-            sub         x7, x10, #1
-            sub         x9, x9, x3
+            lsl         x8, x5, #2      // x
+            lsl         x2, x2, #2
+            ldr         w5, [sp,#80]    // r
+            sub         x9, x2, x8      // w - x
+            sub         x10, x3, x6     // h - y
+            mov         x2, x4          // pitch
+            lsl         x3, x7, #2      // count
+            sub         x7, x10, #1     // h - y - 1
+            mov         x4, x9          // inlen = (w - x)
 
             ldr         x12, [sp, #88]
 
-            add         x1, x1, x8, LSL #2
+            add         x1, x1, x8      // in += x
 
             cmp         x6, x5
-            csel        x6, x5, x6, hs
+            csel        x6, x5, x6, hs  // rup = min(r, y)
             cmp         x7, x5
-            csel        x7, x5, x7, hs
-            cmp         x8, x5
-            csel        x8, x5, x8, hs
-            cmp         x9, x5
-            csel        x9, x5, x9, hs
+            csel        x7, x5, x7, hs  // rdn = min(r, h - y - 1)
 
-            lsl         x3, x3, #2
-            add         x4, x8, x9
-            add         x4, x3, x4, LSL #2
-
-            sub         x1, x1, x8, LSL #2
 
             sub         x13, xzr, x2
             msub        x15, x2, x6, x1
diff --git a/cpu_ref/rsCpuIntrinsics_neon_Blur.S b/cpu_ref/rsCpuIntrinsics_neon_Blur.S
index a6479cb..5df98ce 100644
--- a/cpu_ref/rsCpuIntrinsics_neon_Blur.S
+++ b/cpu_ref/rsCpuIntrinsics_neon_Blur.S
@@ -68,8 +68,8 @@
  *      r1 -- src
  *      r2 -- pitch
  *      r5 -- r
- *      r6 -- rup
- *      r7 -- rdn
+ *      r6 -- rup (r, unless clipped to top of source image)
+ *      r7 -- rdn (r, unless clipped to bottom of source image)
  *      r12 -- switch index
  *      q0-q3 -- coefficient table
  * Output:
@@ -1155,33 +1155,142 @@
             bx          lr
 END(fetch_generic_asm)
 
-/* Given values in q10 and q11, and an index in r11, sweep the (r11&15)th value
- * across to fill the rest of the register pair.  Used for filling the right
- * hand edge of the window when starting too close to the right hand edge of
- * the image.
+
+/* Fetch the next (16 - (r10 & 15)) columns of data, avoiding reading memory
+ * beyond that limit, and filling the rest of the vector with the last legal
+ * pixel.
+ * Result is in q10 and q11.  q8 and q9 are filled with the first legal pixel.
+ * Note: This function can read beyond the right edge of input if the image is
+ * narrower than 16 bytes.
+ */
+PRIVATE(fetch_clampleft1)
+            push        {r12,lr}
+            bl          fetch_generic_asm
+            vdup.u16    q8, d20[0]
+            vdup.u16    q9, d20[0]
+            ands        r12, r10, #15
+            beq         1f
+            sub         r1, r1, r12
+            sub         r10, r10, r12
+            sub         sp, sp, #32
+            vst1.u16    {q10,q11}, [sp]
+            sub         r12, sp, r12, LSL #1
+            sub         sp, sp, #32
+            vst1.u16    {q8,q9}, [sp]
+            vld1.u16    {q10,q11}, [r12]
+            add         sp, sp, #64
+1:          pop         {r12,pc}
+END(fetch_clampleft1)
+
+PRIVATE(fetch_clampleft4)
+            push        {r12,lr}
+            bl          fetch_generic_asm
+            vmov.u16    d16, d20
+            vmov.u16    d17, d20
+            vmov.u16    d18, d20
+            vmov.u16    d19, d20
+            ands        r12, r10, #15
+            beq         1f
+            sub         r1, r1, r12
+            sub         r10, r10, r12
+            sub         sp, sp, #32
+            vst1.u16    {q10-q11}, [sp]
+            sub         r12, sp, r12, LSL #1
+            sub         sp, sp, #32
+            vst1.u16    {q8,q9}, [sp]
+            vld1.u16    {q10,q11}, [r12]
+            add         sp, sp, #64
+1:          pop         {r12,pc}
+END(fetch_clampleft4)
+
+/* Fetch only the next (r11 & 15) (where 0 means 16) columns of data, avoiding
+ * reading memory beyond that limit, and filling the rest of the vector with
+ * the last legal pixel.
+ * Result is in q10 and q11.  q12 and q13 are filled with the last legal pixel.
+ * Note: This function can read beyond the left edge of input if the image is
+ * narrower than 16 bytes.
+ */
+PRIVATE(fetch_clampright1)
+            push        {r12, lr}
+            rsb         r12, r11, #0
+            ands        r12, r12, #15
+            beq         1f
+            sub         r1, r1, r12
+            bl          fetch_generic_asm
+            vdup.u16    q12, d23[3]
+            vdup.u16    q13, d23[3]
+            rsb         r12, r11, #0
+            and         r12, r12, #15
+            sub         sp, sp, #32
+            vst1.u16    {q12,q13}, [sp]
+            sub         sp, sp, #32
+            add         r12, sp, r12, LSL #1
+            vst1.u16    {q10,q11}, [sp]
+            vld1.u16    {q10,q11}, [r12]
+            add         sp, sp, #64
+            pop         {r12,pc}
+1:          bl          fetch_generic_asm
+            vdup.u16    q12, d23[3]
+            vdup.u16    q13, d23[3]
+            pop         {r12,pc}
+END(fetch_clampright1)
+
+PRIVATE(fetch_clampright4)
+            push        {r12, lr}
+            rsb         r12, r11, #0
+            ands        r12, r12, #15
+            beq         1f
+            sub         r1, r1, r12
+            bl          fetch_generic_asm
+            vmov.u16    d24, d23
+            vmov.u16    d25, d23
+            vmov.u16    d26, d23
+            vmov.u16    d27, d23
+            rsb         r12, r11, #0
+            and         r12, r12, #15
+            sub         sp, sp, #32
+            vst1.u16    {q12-q13}, [sp]
+            sub         sp, sp, #32
+            add         r12, sp, r12, LSL #1
+            vst1.u16    {q10,q11}, [sp]
+            vld1.u16    {q10,q11}, [r12]
+            add         sp, sp, #64
+            pop         {r12,pc}
+1:          bl          fetch_generic_asm
+            vmov.u16    d24, d23
+            vmov.u16    d25, d23
+            vmov.u16    d26, d23
+            vmov.u16    d27, d23
+            pop         {r12,pc}
+END(fetch_clampright4)
+
+/* Given values in q10 and q11, and an index in r11, sweep the (r11 & 15)th
+ * value across to fill the rest of the register pair.  Used for filling the
+ * right hand edge of the window when reading too close to the right hand edge
+ * of the image.
  * Also returns a dup-ed copy of the last element in q12 for the tail-fill
  * case (this happens incidentally in common path, but must be done
  * deliberately in the fast-out path).
  */
-PRIVATE(prefetch_clampright1)
+PRIVATE(prefill_sweepright1)
             ands        r12, r11, #15
             beq         1f
             sub         r12, r12, #1
             sub         sp, sp, #64
             vst1.u16    {q10,q11}, [sp]
             add         r12, sp, r12, LSL #1
-            vld1.u16    {d24[]}, [r12]
-            vld1.u16    {d25[]}, [r12]
-            vst1.u16    {q12}, [r12]!
-            vst1.u16    {q12}, [r12]
+            vld1.u16    {d24[],d25[]}, [r12]
+            vld1.u16    {d26[],d27[]}, [r12]
+            vst1.u16    {q12,q13}, [r12]
             vld1.u16    {q10,q11}, [sp]
             add         sp, sp, #64
             bx          lr
 1:          vdup.u16    q12, d23[3]
+            vdup.u16    q13, d23[3]
             bx          lr
-END(prefetch_clampright1)
+END(prefill_sweepright1)
 
-PRIVATE(prefetch_clampright4)
+PRIVATE(prefill_sweepright4)
             ands        r12, r11, #15
             beq         1f
             sub         r12, r12, #4
@@ -1190,156 +1299,262 @@
             add         r12, sp, r12, LSL #1
             vld1.u64    {d24}, [r12]
             vld1.u64    {d25}, [r12]
-            vst1.u16    {q12}, [r12]!
-            vst1.u16    {q12}, [r12]
+            vld1.u64    {d26}, [r12]
+            vld1.u64    {d27}, [r12]
+            vst1.u16    {q12,q13}, [r12]
             vld1.u16    {q10,q11}, [sp]
             add         sp, sp, #64
             bx          lr
 1:          vmov.u16    d24, d23
             vmov.u16    d25, d23
+            vmov.u16    d26, d23
+            vmov.u16    d27, d23
             bx          lr
-END(prefetch_clampright4)
+END(prefill_sweepright4)
 
-
-/* Helpers for prefetch, below.
+/* The main loop keeps a sliding window of data that has already been convolved
+ * in the vertical axis for the current line.  This usually stays in the
+ * register file, but spills to memory for large windows.  The first thing that
+ * needs to be done at start-up is to fill this window with image data, taking
+ * into account the padding needed if the left or right edges of the image fall
+ * within this window.
  */
-.macro prefetch_out qa, qb, store, qsa, qsb, qsb_hi
-  .if \store > 0
-    .ifc \qsa,\qsb
-            vst1.u16    {\qsa}, [r9:128]!
-            vst1.u16    {\qsb}, [r9:128]!
+
+/* Because the window is in the register file writes to it cannot be indexed
+ * by another register.  Consequently the fill loops are unrolled to address
+ * the registers directly.  This macro distinguishes between writes to the
+ * register file and writes to the spill buffer (indicated by a destination
+ * register named xx).
+ */
+.macro prefill_out ra, rb, sra, srb, srb_hi
+  .ifc \ra,xx
+    .ifc \rb,xx
+            vst1.u16    {\sra,\srb}, [r9:128]!
     .else
-            vst1.u16    {\qsa,\qsb}, [r9:256]!
+            /* this case is used only for the last tap of uchar1 r=25 */
+            /* discard \sra */
+            vmov.u16    \rb, \srb_hi
     .endif
-  .elseif \store == 0
-            vmov.u16    \qa, \qsa
-            vmov.u16    \qb, \qsb
   .else
-            vmov.u16    \qb, \qsb_hi
+    .ifnc \ra,\sra
+            vmov.u16    \ra, \sra
+    .endif
+    .ifnc \rb,\srb
+            vmov.u16    \rb, \srb
+    .endif
   .endif
 .endm
 
-.macro prefetch_one  qa, qb, rem, c, store=0, step=1
-.set i, (need - 16) - \rem
-.if i >= 0
-1:          cmp         r10, #i+16
-            blo         2f
-            prefetch_out \qa, \qb, \store, q9, q9, d19
-            b           1f
-2:          cmp         r11, #i+16
-            bls         3f
-            prefetch_out \qa, \qb, \store, q10, q11, d23
-            bl          fetch_generic_asm
-            b           2f
-3:          bl          prefetch_clampright\step
-            prefetch_out \qa, \qb, \store, q10, q11, d23
-4:          b           4f+4
-            @q12 contains pad word from prefetch_clampright call
-            prefetch_out \qa, \qb, \store, q12, q12, d25
-  .if \rem > 0
-            b           4f+4
-  .else
-1:
-2:
-3:
-4:          nop
-  .endif
-.endif
-.endm
-
-/* Fill the convolution window with context data.  The aim here is to load
- * exactly rlf + rrt columns, and in the main loop to read as many columns as
- * will be written.  This is complicated by the need to handle cases when the
- * input starts very close to the left or right (or both) edges of the image,
- * and where these do not fall on 16-byte boundaries.
- *
- * Input:
- *      r1 -- src
- *      r2 -- pitch
- *      r3 -- count
- *      r4 -- inlen
- *      r5 -- r
- *      r6 -- rup
- *      r7 -- rdn
- *      r8 -- rlf
- *      r9 -- buffer (if needed)
- * Output:
- *      r1 += rlf + min(count, rrt)
- * Modifies:
- *      r10 -- fill start index in the window
- *      r11 -- fill stop index in the window
- *      r12 -- scratch
+/* This macro provides the list of registers representing the window, and the
+ * cases where the register file is too small and a spill buffer is used
+ * instead.
+ * Since several specialisations of each function are generated, this also
+ * culls superfluous iterations, and sets the variable `i` for subsequent
+ * macros indicating the current index into the window.
  */
-.macro prefetch step=1, max_r=25
-.set need, ((\max_r + \max_r) * \step + 15) & ~15
-  .if \step == 1
-            rsb         r10, r8, #need - (\max_r * \step)
-  .else
-            mov         r10, r8, LSL #2
-            rsb         r10, r10, #need - (\max_r * \step)
-  .endif
-            add         r11, r10, r4
-            cmp         r11, #need
-            movhi       r11, #need
-
-            bl          fetch_generic_asm
-  .if \step == 1
-            vdup.u16    q9, d20[0]
-  .else
-            vmov.u16    d18, d20
-            vmov.u16    d19, d20
-  .endif
-            ands        r12, r10, #15
-            beq         2f
-            sub         sp, sp, #32
-            vst1.u16    {q10,q11}, [sp]
-            sub         r12, sp, r12, LSL #1
-            sub         sp, sp, #16
-            vst1.u16    {q9}, [sp]
-            sub         sp, sp, #16
-            vst1.u16    {q9}, [sp]
-            vld1.u16    {q10,q11}, [r12]
-            add         sp, sp, #64
-            sub         r1, r1, r10
-            bic         r10, r10, #15
-            add         r1, r1, r10
-2:
+.macro prefill_list, macro, nextmacro, max_r, step, label
+  .macro ifneeded macro, nextmacro, line, nextline, ra, rb, step, label
+    .if windowsize >= (\line * 16)
+      .set i, windowsize - (\line * 16)
+\label\macro\line:
+            prefill_\macro \label\nextmacro\line, \label\nextmacro\nextline, \ra, \rb, \step
+    .endif
+  .endm
   .if \step > 1
-            /* it's only in the uchar2 and uchar4 cases where the register file
-             * is insufficient (given MAX_R <= 25).
-             */
-            prefetch_one xx, xx, 192, c=\max_r, step=\step, store=1
-            prefetch_one xx, xx, 176, c=\max_r, step=\step, store=1
-            prefetch_one xx, xx, 160, c=\max_r, step=\step, store=1
-            prefetch_one xx, xx, 144, c=\max_r, step=\step, store=1
-            prefetch_one xx, xx, 128, c=\max_r, step=\step, store=1
-            prefetch_one xx, xx, 112, c=\max_r, step=\step, store=1
-            prefetch_one xx, xx,  96, c=\max_r, step=\step, store=1
-            prefetch_one xx, xx,  80, c=\max_r, step=\step, store=1
-            prefetch_one xx, xx,  64, c=\max_r, step=\step, store=1
-            prefetch_one xx, xx,  48, c=\max_r, step=\step, store=1
+            ifneeded \macro \nextmacro, 13, 12, xx, xx,  \step, \label
+            ifneeded \macro \nextmacro, 12, 11, xx, xx,  \step, \label
+            ifneeded \macro \nextmacro, 11, 10, xx, xx,  \step, \label
+            ifneeded \macro \nextmacro, 10,  9, xx, xx,  \step, \label
+            ifneeded \macro \nextmacro,  9,  8, xx, xx,  \step, \label
+            ifneeded \macro \nextmacro,  8,  7, xx, xx,  \step, \label
+            ifneeded \macro \nextmacro,  7,  6, xx, xx,  \step, \label
+            ifneeded \macro \nextmacro,  6,  5, xx, xx,  \step, \label
+            ifneeded \macro \nextmacro,  5,  4, xx, xx,  \step, \label
+            ifneeded \macro \nextmacro,  4,  3, xx, xx,  \step, \label
   .else
             /* q3 normally contains the coefficient table, but it's not fully
              * used.  In the uchar1, r=25 case the other half of q3 is used for
              * the last two window taps to avoid falling out to memory.
              */
-            prefetch_one xx, d7,  48, c=\max_r, step=\step, store=-1
+            ifneeded \macro \nextmacro,  4,  3, xx, d7,   \step, \label
   .endif
-            prefetch_one q4, q5,  32, c=\max_r, step=\step, store=0
-            prefetch_one q6, q7,  16, c=\max_r, step=\step, store=0
-            prefetch_one q8, q9,   0, c=\max_r, step=\step, store=0
+            ifneeded \macro \nextmacro,  3,  2, q4, q5,   \step, \label
+            ifneeded \macro \nextmacro,  2,  1, q6, q7,   \step, \label
+            ifneeded \macro \nextmacro,  1,  0, q8, q9,   \step, \label
 
-  .if \step == 1
-            add         r10, r8, #\max_r * \step
-  .else
-            mov         r10, r8, LSL #2
-            add         r10, r10, #\max_r * \step
-  .endif
-            subs        r4, r4, r10
-            movlo       r4, #0
+\label\macro\()0:
+            b           \label\()_end
+  .purgem ifneeded
 .endm
 
-/* The main loop.
+/* These macros represent the possible stages of filling the window.
+ * Each macro is unrolled enough times that it can fill the entire window
+ * itself, but normally it will have to hand control to subsequent macros
+ * part-way through and this is done using labels named \next and \after, where
+ * \next is the next macro starting at the same window position and \after is
+ * the next macro starting after the current window position.
+ */
+
+/* leftfill: v8 and v9 contain the left padding value.  While the window
+ * extends outside of the image on the left-hand side, and at least 16 more
+ * padding values are needed in the window, store v8 and v9 into the window.
+ * Otherwise skip forward to storing image data.
+ */
+.macro prefill_leftfill, next, after, ra, rb, step
+            cmp         r10, #i+16
+            blo         \next
+            prefill_out \ra, \rb, q8, q9, d19
+.endm
+
+/* leftedge: The very first non-fill or partial-fill chunk from the image is
+ * already loaded (as it was used to calculate the left padding value), so
+ * store it here, and then drop into the regular load/store cycle in the next
+ * macro.
+ */
+.macro prefill_leftedge, next, after, ra, rb, step
+1:          prefill_out \ra, \rb, q10, q11, d23
+            b           \after
+.endm
+
+/* dofetch: Copy chunks of the image into the window without any complications
+ * from edge conditions.
+ */
+.macro prefill_dofetch, next, after, ra, rb, step
+            cmp         r11, #i+16
+            bls         \next
+            bl          fetch_generic_asm
+            prefill_out \ra, \rb, q10, q11, d23
+.endm
+
+/* rightedge: The last fetch (currently in v10 and v11) may have gone beyond
+ * the right-hand edge of the image.  In that case sweep the last valid pixel
+ * across the rest of the chunk, and in either case prepare padding data in v12
+ * and v13 for the next macro.  This is done in fetch_clampright.
+ * This only happens once before going on to the next macro.
+ * Sometimes leftedge also covers the rightedge case, in which case this has
+ * to be skipped altogether.
+ */
+.macro prefill_rightedge, next, after, ra, rb, step
+            cmp         r11, #i
+            bls         \next
+            bl          fetch_clampright\step
+            prefill_out \ra, \rb, q10, q11, d23
+            b           \after
+.endm
+
+/* rightfill: The rest of the window is simply filled with right padding from
+ * v12 and v13.
+ */
+.macro prefill_rightfill, next, after, ra, rb, step
+            prefill_out \ra, \rb, q12, q13, d25
+.endm
+
+/* Here all of the macros above are unrolled and laid out in the proper order.
+ */
+.macro prefill_body, max_r, step, label
+            prefill_list leftfill,  leftedge,   \max_r, \step, \label
+            prefill_list leftedge,  dofetch,    \max_r, \step, \label
+            prefill_list dofetch,   rightedge,  \max_r, \step, \label
+            prefill_list rightedge, rightfill,  \max_r, \step, \label
+            prefill_list rightfill, oops,       \max_r, \step, \label
+\label\()_end:
+.endm
+
+/* Fill the convolution window with context data.  The aim here is to load
+ * exactly 2*r columns, and in the main loop to read as many columns as will be
+ * written.  This is complicated by the window being divided into chunks at
+ * register boundaries, and the need to handle cases when the input starts very
+ * close to the left or right (or both) edges of the image and the need to fill
+ * the spaces that leaves with left and right edge padding values.
+ *
+ * Input:
+ *      r1 -- src
+ *      r2 -- pitch
+ *      r3 -- count
+ *      r4 -- available image data right of src pointer
+ *      r5 -- r
+ *      r6 -- rup
+ *      r7 -- rdn
+ *      r8 -- available image data left of src pointer
+ *      r9 -- buffer (if needed)
+ * Output:
+ *      r4 -= min(inlen, count + windowsize - centertap)
+ *      r1 += min(inlen, count + windowsize - centertap)
+ * Modifies:
+ *      r10 -- fill start index in the window
+ *      r11 -- fill stop index in the window
+ *      r12 -- scratch
+ */
+.macro prefill step=1, max_r=25, label=xx
+.set windowsize, (((\max_r + \max_r) * \step + 15) & ~15)
+.set centertap, (windowsize - \max_r * \step)
+            mov         r10, #centertap
+            subs        r10, r10, r8
+            movlo       r10, #0
+
+            subs        r11, r4, #windowsize - centertap
+            movhs       r11, #0
+            add         r11, r11, #windowsize
+
+            /* r10 indicates where in the window legal image data begins.
+             * r11 indicates where in the window legal image date ends.
+             * When starting near the centre of a large image these would be
+             * zero and windowsize respectively, but when starting near the
+             * edges this can change.
+             * When starting on the leftmost pixel, r10 will be centertap.
+             * When starting on the rightmost pixel, r11 will be centertap+1.
+             */
+
+            /* r4 indicates how much data there is between the current pointers
+             * and the right edge of the image.  The pointers currently point
+             * to the data needed at centertap.  The subsequent code will
+             * consume (windowsize - r10) data, but only the data from
+             * centertap to windowsize comes out of r4's budget.
+             */
+1:          subs        r4, r4, #windowsize - centertap
+            movlo       r4, #0
+
+            /* And the pointers need to rewind to the start of the window.
+             */
+            sub         r1, r1, #centertap
+
+            /* Unless x8 indicated that there wasn't that much data available.
+             */
+            add         r1, r1, r10
+
+
+            /* Get the first chunk, and add padding to align it to the window
+             * if necessary.
+             */
+            bl          fetch_clampleft\step
+
+            /* Sometimes the start and the end of the window are in the same
+             * chunk.  In that case both ends need filler at the outset.
+             */
+            sub         r12, r11, #1
+            eor         r12,  r10, r12
+            cmp         r12, #16
+            bllo        prefill_sweepright\step
+
+            /* Iterate through all the points in the window and fill them in
+             * with padding or image data as needed.
+             */
+            prefill_body \max_r, \step, \label
+.endm
+
+/* The main body of the convolve functions.  Having already pre-filled the
+ * convolution window with 2*r input values, the logic settles into a regular
+ * pattern of reading and writing at a 1:1 rate until either input or output
+ * expires.  The input leads the output by r values, so when processing all the
+ * way to the right-hand edge, or within r pixels of that edge, the input will
+ * run out first.  In the case of very narrow images, or sub-windows starting
+ * near the right edge, the input may already have run out while the
+ * convolution window was being filled and this loop will start with a
+ * zero-length input.
+ *
+ * Once the input runs out, the rest of the output must be processed by padding
+ * the remainder of the window with pad value from the last valid pixel from
+ * the source.
  *
  * Input:
  *      r0 = dst
@@ -1354,7 +1569,26 @@
  * Modifies
  *      r8 = fetch code pointer
  */
-.macro mainloop core, step=1, max_r=25, labelc="", labelnc=""
+.macro conv_body core, step=1, max_r=25, labelc="", labelnc=""
+
+            /* If x4 >= x3 then there's no need for clipping.  The main loop
+             * needs to exit when either x3 or x4 runs out, so clamp x4 to be
+             * no greater than x3 and use x4 for the loop.
+             * However, if x4 comes out of the loop with less than 16 bytes
+             * left, a partial read would be necessary to avoid reading beyond
+             * the end of the image.  To avoid this, clamp x4 to the next
+             * multiple of 16, which is still sufficient to force it out of the
+             * loop but doesn't imply a rewind.
+             */
+            add         r12, r3, #15
+            bic         r12, r12, #15
+            cmp         r4, r12
+            movhi       r4, r12
+
+            /* First calculate the entry-point into the internal fetch logic.
+             * This is done so the same function can service several kernel
+             * sizes.
+             */
             ldr         r8, 3f
 1:          add         r8, r8, pc
             sub         r8, r5, LSL #5
@@ -1373,12 +1607,20 @@
             .align 3
 3:          .word       \labelnc-1b-8
             .word       \labelc-2b-8
-            .align 4
-3:          fetch max_r=\max_r, labelc=\labelc, labelnc=\labelnc, reg=r8
 
-            /* For each call to fetch two are made to \core.  It would be
-             * preferable to have twice the work done in \core, but the
-             * register file is too small for this to be straightforward.
+            /* Main loop: ... */
+            .align 4
+3:          /* first perform a vertical convolution from memory to get the next
+             * 16 taps of the horizontal window into the register file...
+             */
+            fetch max_r=\max_r, labelc=\labelc, labelnc=\labelnc, reg=r8
+
+            /* ...then perform a horizontal convolution on that window to
+             * produce eight output bytes, and slide the window along.
+             * This has to be done twice to match the 16-way vertical pass.
+             * It would be preferable to have twice the work done in \core, but
+             * that would demand yet another variant on those macros and would
+             * perturb the register allocation severely.
              */
             \core
             vst1.u8     {d31}, [r0]!
@@ -1387,7 +1629,19 @@
 
             sub         r3, r3, #16
 5:          subs        r4, r4, #16
-            bhs         3b
+            bhi         3b
+            /* Here there's 16 or fewer bytes available before the edge of the
+             * source image.  x4 holds that count minus 16 (because it was
+             * decremented before the first iteration ran).  The last read may
+             * not be a whole chunk, and beyond that a fill value must be used.
+             *
+             * Of course, none of that matters if there's no more output to
+             * produce...
+             */
+            cmp         r3, #0
+            beq         5f
+
+            /* Oh well. */
             adds        r4, r4, #16
             bne         1f
   .if \step==1
@@ -1399,37 +1653,17 @@
             vmov.u64    d22, d19
             vmov.u64    d23, d19
   .endif
-            b           4f
+            b           3f
 
-1:          sub         r1, r1, #16
-            add         r1, r1, r4
-            bl          fetch_generic_asm
-
-  .if \step==1
-            vdup.u16    q12, d23[3]
-  .else
-            vmov.u64    d24, d23
-            vmov.u64    d25, d23
-  .endif
-            rsb         r4, r4, #0
-            tst         r4, #8
-            beq         1f
-            vmov        q10, q11
-            vmov        q11, q12
-1:          tst         r4, #4
-            beq         1f
-            vext.u16    q10, q10, q11, #4
-            vext.u16    q11, q11, q12, #4
-1:          tst         r4, #2
-            beq         1f
-            vext.u16    q10, q10, q11, #2
-            vext.u16    q11, q11, q12, #2
-1:          tst         r4, #1
-            beq         4f
-            vext.u16    q10, q10, q11, #1
-            vext.u16    q11, q11, q12, #1
-4:          cmp         r3, #0
-            beq         5f
+            /* To avoid reading past end of input, rewind pointers by (16-r4)
+             * to ensure that they're exactly 16 bytes from the edge.
+             */
+1:          mov         r11, r4
+            bl          fetch_clampright\step
+            /* Now to put this padding to use, perform any remaining
+             * iterations.  This is done at half the rate of the main loop,
+             * because there's no longer pressure from a 16-lane window filler.
+             */
 3:          \core
   .if \step==1
             vdup.u16    q11, d23[3]
@@ -1439,8 +1673,12 @@
             subs        r3, r3, #8
             blo         4f
             vst1.u8     {d31}, [r0]!
-            beq         5f
-            b           3b
+            bne         3b
+            b           5f
+
+            /* If the final iteration contained 0 < l < 8 values, then perform
+             * a piecewise store of the final vector.
+             */
 4:          tst         r3, #4
             beq         1f
             vst1.u32    {d31[0]}, [r0]!
@@ -1453,18 +1691,16 @@
             beq         5f
             vst1.u8     {d31[0]}, [r0]!
             vext.u8     d31, d31, d31, #1
-5:          nop
+5:          mov         r0, #0
 .endm
 
 .irp r, TUNED_LIST1, 25
 PRIVATE(convolve1_\r)
             push        {r12,lr}
 
-            sub         r1, r1, r8
+            prefill     step=1, max_r=\r, label=.Lcnv1_\r
 
-            prefetch    step=1, max_r=\r
-
-            mainloop    core=hconv1_\r, step=1, max_r=\r, labelc=.Lcnv1_\r, labelnc=.Lcnvnc1_\r
+            conv_body   core=hconv1_\r, step=1, max_r=\r, labelc=.Lcnv1_\r, labelnc=.Lcnvnc1_\r
 
             pop         {r12,pc}
 END(convolve1_\r)
@@ -1472,25 +1708,22 @@
 
 .irp r, TUNED_LIST4, 25
 PRIVATE(convolve4_\r)
-            sub         r12, sp, #0x200
-            bic         r9, r12, #0x3fc
-            mov         sp, r9
             push        {r12,lr}
+            sub         r9, sp, #0x200
+            sub         sp, sp, #0x200 + 0x400
+            bic         r9, r9, #0x3fc
 
-            /* r9 now points to a buffer on the stack whose address has the low
-             * 10 bits clear.  This allows easy address calculation in the
-             * wrap-around cases.
+            /* r9 now points to a 0x200 byte buffer on the stack whose address
+             * has the low 10 bits clear.  This allows easy address calculation
+             * in the wrap-around cases.
              */
 
-            sub         r1, r1, r8, LSL #2
+            prefill     step=4, max_r=\r, label=.Lcnv4_\r
 
-            prefetch    step=4, max_r=\r
+            conv_body   core=hconv4_\r, step=4, max_r=\r, labelc=.Lcnv4_\r, labelnc=.Lcnvnc4_\r
 
-            mainloop    core=hconv4_\r, step=4, max_r=\r, labelc=.Lcnv4_\r, labelnc=.Lcnvnc4_\r
-
-            pop         {r12,lr}
-            add         sp, r12, #0x200
-            bx          lr
+            add         sp, sp, #0x200 + 0x400
+            pop         {r12,pc}
 END(convolve4_\r)
 .endr
 
@@ -1509,31 +1742,23 @@
 ENTRY(rsdIntrinsicBlurU1_K)
             push        {r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
             vpush       {d8-d15}
-            ldr         r5, [sp,#120]
-            ldr         r8, [sp,#108]
-            ldr         r6, [sp,#112]
-            sub         r9, r2, r8
-            sub         r7, r3, r6
-            ldr         r2, [sp,#104]
-            ldr         r3, [sp,#116]
-            sub         r9, r9, r3
-            sub         r7, r7, #1
+            ldr         r6, [sp,#112]   // y
+            ldr         r8, [sp,#108]   // x
+            ldr         r5, [sp,#120]   // r
+            sub         r4, r2, r8      // inlen = w - x
+            sub         r7, r3, r6      // h - y
+            ldr         r2, [sp,#104]   // pitch
+            ldr         r3, [sp,#116]   // count
+            sub         r7, r7, #1      // h - y - 1
 
             ldr         r12, [sp,#124]
 
-            add         r1, r1, r8
+            add         r1, r1, r8      // src += x
 
             cmp         r6, r5
-            movhi       r6, r5
+            movhi       r6, r5          // rup = min(r, y)
             cmp         r7, r5
-            movhi       r7, r5
-            cmp         r8, r5
-            movhi       r8, r5
-            cmp         r9, r5
-            movhi       r9, r5
-
-            add         r4, r8, r9
-            add         r4, r4, r3
+            movhi       r7, r5          // rdn = min(r, h - y - 1)
 
             vld1.u16    {d0,d1,d2,d3}, [r12]!
             vld1.u16    {d4,d5,d6}, [r12]!
@@ -1564,32 +1789,25 @@
 ENTRY(rsdIntrinsicBlurU4_K)
             push        {r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
             vpush       {d8-d15}
-            ldr         r5, [sp,#120]
-            ldr         r8, [sp,#108]
-            ldr         r6, [sp,#112]
-            sub         r9, r2, r8
-            sub         r7, r3, r6
-            ldr         r2, [sp,#104]
-            ldr         r3, [sp,#116]
-            sub         r9, r9, r3
-            sub         r7, r7, #1
+            ldr         r6, [sp,#112]   // y
+            ldr         r8, [sp,#108]   // x
+            ldr         r5, [sp,#120]   // r
+            lsl         r8, r8, #2
+            rsb         r4, r8, r2, LSL #2 // inlen = (w - x)
+            sub         r7, r3, r6      // h - y
+            ldr         r2, [sp,#104]   // pitch
+            ldr         r3, [sp,#116]   // count
+            sub         r7, r7, #1      // h - y - 1
+            lsl         r3, r3, #2      // count
 
             ldr         r12, [sp,#124]
 
-            add         r1, r1, r8, LSL #2
+            add         r1, r1, r8      // in += x
 
             cmp         r6, r5
-            movhi       r6, r5
+            movhi       r6, r5          // rup = min(r, y)
             cmp         r7, r5
-            movhi       r7, r5
-            cmp         r8, r5
-            movhi       r8, r5
-            cmp         r9, r5
-            movhi       r9, r5
-
-            mov         r3, r3, LSL #2
-            add         r4, r8, r9
-            add         r4, r3, r4, LSL #2
+            movhi       r7, r5          // rdn = min(r, h - y - 1)
 
             vld1.u16    {d0,d1,d2,d3}, [r12]!
             vld1.u16    {d4,d5,d6}, [r12]!